hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
5ee6e2f5aae87b313431b6b9498ab41e21c414c030803aaa3e7e08b503f4e425 | from itertools import combinations
from sympy.combinatorics.graycode import GrayCode
class Subset():
"""
Represents a basic subset object.
Explanation
===========
We generate subsets using essentially two techniques,
binary enumeration and lexicographic enumeration.
The Subset class takes two arguments, the first one
describes the initial subset to consider and the second
describes the superset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
['b']
>>> a.prev_binary().subset
['c']
"""
_rank_binary = None
_rank_lex = None
_rank_graycode = None
_subset = None
_superset = None
def __new__(cls, subset, superset):
"""
Default constructor.
It takes the ``subset`` and its ``superset`` as its parameters.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.subset
['c', 'd']
>>> a.superset
['a', 'b', 'c', 'd']
>>> a.size
2
"""
if len(subset) > len(superset):
raise ValueError('Invalid arguments have been provided. The '
'superset must be larger than the subset.')
for elem in subset:
if elem not in superset:
raise ValueError('The superset provided is invalid as it does '
'not contain the element {}'.format(elem))
obj = object.__new__(cls)
obj._subset = subset
obj._superset = superset
return obj
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
whether both objects are of the class Subset and if the values
of the subset and superset attributes are the same.
"""
if not isinstance(other, Subset):
return NotImplemented
return self.subset == other.subset and self.superset == other.superset
def iterate_binary(self, k):
"""
This is a helper function. It iterates over the
binary subsets by ``k`` steps. This variable can be
both positive or negative.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.iterate_binary(-2).subset
['d']
>>> a = Subset(['a', 'b', 'c'], ['a', 'b', 'c', 'd'])
>>> a.iterate_binary(2).subset
[]
See Also
========
next_binary, prev_binary
"""
bin_list = Subset.bitlist_from_subset(self.subset, self.superset)
n = (int(''.join(bin_list), 2) + k) % 2**self.superset_size
bits = bin(n)[2:].rjust(self.superset_size, '0')
return Subset.subset_from_bitlist(self.superset, bits)
def next_binary(self):
"""
Generates the next binary ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
['b']
>>> a = Subset(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
[]
See Also
========
prev_binary, iterate_binary
"""
return self.iterate_binary(1)
def prev_binary(self):
"""
Generates the previous binary ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a', 'b', 'c', 'd'])
>>> a.prev_binary().subset
['a', 'b', 'c', 'd']
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.prev_binary().subset
['c']
See Also
========
next_binary, iterate_binary
"""
return self.iterate_binary(-1)
def next_lexicographic(self):
"""
Generates the next lexicographically ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_lexicographic().subset
['d']
>>> a = Subset(['d'], ['a', 'b', 'c', 'd'])
>>> a.next_lexicographic().subset
[]
See Also
========
prev_lexicographic
"""
i = self.superset_size - 1
indices = Subset.subset_indices(self.subset, self.superset)
if i in indices:
if i - 1 in indices:
indices.remove(i - 1)
else:
indices.remove(i)
i = i - 1
while not i in indices and i >= 0:
i = i - 1
if i >= 0:
indices.remove(i)
indices.append(i+1)
else:
while i not in indices and i >= 0:
i = i - 1
indices.append(i + 1)
ret_set = []
super_set = self.superset
for i in indices:
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
def prev_lexicographic(self):
"""
Generates the previous lexicographically ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a', 'b', 'c', 'd'])
>>> a.prev_lexicographic().subset
['d']
>>> a = Subset(['c','d'], ['a', 'b', 'c', 'd'])
>>> a.prev_lexicographic().subset
['c']
See Also
========
next_lexicographic
"""
i = self.superset_size - 1
indices = Subset.subset_indices(self.subset, self.superset)
while i >= 0 and i not in indices:
i = i - 1
if i == 0 or i - 1 in indices:
indices.remove(i)
else:
if i >= 0:
indices.remove(i)
indices.append(i - 1)
indices.append(self.superset_size - 1)
ret_set = []
super_set = self.superset
for i in indices:
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
def iterate_graycode(self, k):
"""
Helper function used for prev_gray and next_gray.
It performs ``k`` step overs to get the respective Gray codes.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([1, 2, 3], [1, 2, 3, 4])
>>> a.iterate_graycode(3).subset
[1, 4]
>>> a.iterate_graycode(-2).subset
[1, 2, 4]
See Also
========
next_gray, prev_gray
"""
unranked_code = GrayCode.unrank(self.superset_size,
(self.rank_gray + k) % self.cardinality)
return Subset.subset_from_bitlist(self.superset,
unranked_code)
def next_gray(self):
"""
Generates the next Gray code ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([1, 2, 3], [1, 2, 3, 4])
>>> a.next_gray().subset
[1, 3]
See Also
========
iterate_graycode, prev_gray
"""
return self.iterate_graycode(1)
def prev_gray(self):
"""
Generates the previous Gray code ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([2, 3, 4], [1, 2, 3, 4, 5])
>>> a.prev_gray().subset
[2, 3, 4, 5]
See Also
========
iterate_graycode, next_gray
"""
return self.iterate_graycode(-1)
@property
def rank_binary(self):
"""
Computes the binary ordered rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a','b','c','d'])
>>> a.rank_binary
0
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.rank_binary
3
See Also
========
iterate_binary, unrank_binary
"""
if self._rank_binary is None:
self._rank_binary = int("".join(
Subset.bitlist_from_subset(self.subset,
self.superset)), 2)
return self._rank_binary
@property
def rank_lexicographic(self):
"""
Computes the lexicographic ranking of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.rank_lexicographic
14
>>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6])
>>> a.rank_lexicographic
43
"""
if self._rank_lex is None:
def _ranklex(self, subset_index, i, n):
if subset_index == [] or i > n:
return 0
if i in subset_index:
subset_index.remove(i)
return 1 + _ranklex(self, subset_index, i + 1, n)
return 2**(n - i - 1) + _ranklex(self, subset_index, i + 1, n)
indices = Subset.subset_indices(self.subset, self.superset)
self._rank_lex = _ranklex(self, indices, 0, self.superset_size)
return self._rank_lex
@property
def rank_gray(self):
"""
Computes the Gray code ranking of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c','d'], ['a','b','c','d'])
>>> a.rank_gray
2
>>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6])
>>> a.rank_gray
27
See Also
========
iterate_graycode, unrank_gray
"""
if self._rank_graycode is None:
bits = Subset.bitlist_from_subset(self.subset, self.superset)
self._rank_graycode = GrayCode(len(bits), start=bits).rank
return self._rank_graycode
@property
def subset(self):
"""
Gets the subset represented by the current instance.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.subset
['c', 'd']
See Also
========
superset, size, superset_size, cardinality
"""
return self._subset
@property
def size(self):
"""
Gets the size of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.size
2
See Also
========
subset, superset, superset_size, cardinality
"""
return len(self.subset)
@property
def superset(self):
"""
Gets the superset of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.superset
['a', 'b', 'c', 'd']
See Also
========
subset, size, superset_size, cardinality
"""
return self._superset
@property
def superset_size(self):
"""
Returns the size of the superset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.superset_size
4
See Also
========
subset, superset, size, cardinality
"""
return len(self.superset)
@property
def cardinality(self):
"""
Returns the number of all possible subsets.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.cardinality
16
See Also
========
subset, superset, size, superset_size
"""
return 2**(self.superset_size)
@classmethod
def subset_from_bitlist(self, super_set, bitlist):
"""
Gets the subset defined by the bitlist.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.subset_from_bitlist(['a', 'b', 'c', 'd'], '0011').subset
['c', 'd']
See Also
========
bitlist_from_subset
"""
if len(super_set) != len(bitlist):
raise ValueError("The sizes of the lists are not equal")
ret_set = []
for i in range(len(bitlist)):
if bitlist[i] == '1':
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
@classmethod
def bitlist_from_subset(self, subset, superset):
"""
Gets the bitlist corresponding to a subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.bitlist_from_subset(['c', 'd'], ['a', 'b', 'c', 'd'])
'0011'
See Also
========
subset_from_bitlist
"""
bitlist = ['0'] * len(superset)
if isinstance(subset, Subset):
subset = subset.subset
for i in Subset.subset_indices(subset, superset):
bitlist[i] = '1'
return ''.join(bitlist)
@classmethod
def unrank_binary(self, rank, superset):
"""
Gets the binary ordered subset of the specified rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.unrank_binary(4, ['a', 'b', 'c', 'd']).subset
['b']
See Also
========
iterate_binary, rank_binary
"""
bits = bin(rank)[2:].rjust(len(superset), '0')
return Subset.subset_from_bitlist(superset, bits)
@classmethod
def unrank_gray(self, rank, superset):
"""
Gets the Gray code ordered subset of the specified rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset
['a', 'b']
>>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset
[]
See Also
========
iterate_graycode, rank_gray
"""
graycode_bitlist = GrayCode.unrank(len(superset), rank)
return Subset.subset_from_bitlist(superset, graycode_bitlist)
@classmethod
def subset_indices(self, subset, superset):
"""Return indices of subset in superset in a list; the list is empty
if all elements of ``subset`` are not in ``superset``.
Examples
========
>>> from sympy.combinatorics import Subset
>>> superset = [1, 3, 2, 5, 4]
>>> Subset.subset_indices([3, 2, 1], superset)
[1, 2, 0]
>>> Subset.subset_indices([1, 6], superset)
[]
>>> Subset.subset_indices([], superset)
[]
"""
a, b = superset, subset
sb = set(b)
d = {}
for i, ai in enumerate(a):
if ai in sb:
d[ai] = i
sb.remove(ai)
if not sb:
break
else:
return list()
return [d[bi] for bi in b]
def ksubsets(superset, k):
"""
Finds the subsets of size ``k`` in lexicographic order.
This uses the itertools generator.
Examples
========
>>> from sympy.combinatorics.subsets import ksubsets
>>> list(ksubsets([1, 2, 3], 2))
[(1, 2), (1, 3), (2, 3)]
>>> list(ksubsets([1, 2, 3, 4, 5], 2))
[(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), \
(2, 5), (3, 4), (3, 5), (4, 5)]
See Also
========
Subset
"""
return combinations(superset, k)
|
c3dd7e80b1efcea87fdaa94ce021520069145e389a71c99baa423f263d54efa7 | """
The Schur number S(k) is the largest integer n for which the interval [1,n]
can be partitioned into k sum-free sets.(http://mathworld.wolfram.com/SchurNumber.html)
"""
import math
from sympy.core import S
from sympy.core.basic import Basic
from sympy.core.function import Function
from sympy.core.numbers import Integer
class SchurNumber(Function):
r"""
This function creates a SchurNumber object
which is evaluated for `k \le 5` otherwise only
the lower bound information can be retrieved.
Examples
========
>>> from sympy.combinatorics.schur_number import SchurNumber
Since S(3) = 13, hence the output is a number
>>> SchurNumber(3)
13
We do not know the Schur number for values greater than 5, hence
only the object is returned
>>> SchurNumber(6)
SchurNumber(6)
Now, the lower bound information can be retrieved using lower_bound()
method
>>> SchurNumber(6).lower_bound()
536
"""
@classmethod
def eval(cls, k):
if k.is_Number:
if k is S.Infinity:
return S.Infinity
if k.is_zero:
return S.Zero
if not k.is_integer or k.is_negative:
raise ValueError("k should be a positive integer")
first_known_schur_numbers = {1: 1, 2: 4, 3: 13, 4: 44, 5: 160}
if k <= 5:
return Integer(first_known_schur_numbers[k])
def lower_bound(self):
f_ = self.args[0]
# Improved lower bounds known for S(6) and S(7)
if f_ == 6:
return Integer(536)
if f_ == 7:
return Integer(1680)
# For other cases, use general expression
if f_.is_Integer:
return 3*self.func(f_ - 1).lower_bound() - 1
return (3**f_ - 1)/2
def _schur_subsets_number(n):
if n is S.Infinity:
raise ValueError("Input must be finite")
if n <= 0:
raise ValueError("n must be a non-zero positive integer.")
elif n <= 3:
min_k = 1
else:
min_k = math.ceil(math.log(2*n + 1, 3))
return Integer(min_k)
def schur_partition(n):
"""
This function returns the partition in the minimum number of sum-free subsets
according to the lower bound given by the Schur Number.
Parameters
==========
n: a number
n is the upper limit of the range [1, n] for which we need to find and
return the minimum number of free subsets according to the lower bound
of schur number
Returns
=======
List of lists
List of the minimum number of sum-free subsets
Notes
=====
It is possible for some n to make the partition into less
subsets since the only known Schur numbers are:
S(1) = 1, S(2) = 4, S(3) = 13, S(4) = 44.
e.g for n = 44 the lower bound from the function above is 5 subsets but it has been proven
that can be done with 4 subsets.
Examples
========
For n = 1, 2, 3 the answer is the set itself
>>> from sympy.combinatorics.schur_number import schur_partition
>>> schur_partition(2)
[[1, 2]]
For n > 3, the answer is the minimum number of sum-free subsets:
>>> schur_partition(5)
[[3, 2], [5], [1, 4]]
>>> schur_partition(8)
[[3, 2], [6, 5, 8], [1, 4, 7]]
"""
if isinstance(n, Basic) and not n.is_Number:
raise ValueError("Input value must be a number")
number_of_subsets = _schur_subsets_number(n)
if n == 1:
sum_free_subsets = [[1]]
elif n == 2:
sum_free_subsets = [[1, 2]]
elif n == 3:
sum_free_subsets = [[1, 2, 3]]
else:
sum_free_subsets = [[1, 4], [2, 3]]
while len(sum_free_subsets) < number_of_subsets:
sum_free_subsets = _generate_next_list(sum_free_subsets, n)
missed_elements = [3*k + 1 for k in range(len(sum_free_subsets), (n-1)//3 + 1)]
sum_free_subsets[-1] += missed_elements
return sum_free_subsets
def _generate_next_list(current_list, n):
new_list = []
for item in current_list:
temp_1 = [number*3 for number in item if number*3 <= n]
temp_2 = [number*3 - 1 for number in item if number*3 - 1 <= n]
new_item = temp_1 + temp_2
new_list.append(new_item)
last_list = [3*k + 1 for k in range(0, len(current_list)+1) if 3*k + 1 <= n]
new_list.append(last_list)
current_list = new_list
return current_list
|
68380b559b8e58c5e0dc1017392a56b15022fec044a4fd95869b1af821993258 | from typing import Dict as tDict, List
from sympy.core import S
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol, symbols as _symbols
from sympy.core.sympify import CantSympify
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.iterables import flatten, is_sequence
from sympy.utilities.magic import pollute
from sympy.utilities.misc import as_int
@public
def free_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1))``.
Parameters
==========
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> x**2*y**-1
x**2*y**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group,) + tuple(_free_group.generators)
@public
def xfree_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1)))``.
Parameters
==========
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import xfree_group
>>> F, (x, y, z) = xfree_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> y**2*x**-2*z**-1
y**2*x**-2*z**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group, _free_group.generators)
@public
def vfree_group(symbols):
"""Construct a free group and inject ``f_0, f_1, ..., f_(n-1)`` as symbols
into the global namespace.
Parameters
==========
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import vfree_group
>>> vfree_group("x, y, z")
<free group on the generators (x, y, z)>
>>> x**2*y**-2*z # noqa: F821
x**2*y**-2*z
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
pollute([sym.name for sym in _free_group.symbols], _free_group.generators)
return _free_group
def _parse_symbols(symbols):
if not symbols:
return tuple()
if isinstance(symbols, str):
return _symbols(symbols, seq=True)
elif isinstance(symbols, Expr or FreeGroupElement):
return (symbols,)
elif is_sequence(symbols):
if all(isinstance(s, str) for s in symbols):
return _symbols(symbols)
elif all(isinstance(s, Expr) for s in symbols):
return symbols
raise ValueError("The type of `symbols` must be one of the following: "
"a str, Symbol/Expr or a sequence of "
"one of these types")
##############################################################################
# FREE GROUP #
##############################################################################
_free_group_cache = {} # type: tDict[int, FreeGroup]
class FreeGroup(DefaultPrinting):
"""
Free group with finite or infinite number of generators. Its input API
is that of a str, Symbol/Expr or a sequence of one of
these types (which may be empty)
See Also
========
sympy.polys.rings.PolyRing
References
==========
.. [1] http://www.gap-system.org/Manuals/doc/ref/chap37.html
.. [2] https://en.wikipedia.org/wiki/Free_group
"""
is_associative = True
is_group = True
is_FreeGroup = True
is_PermutationGroup = False
relators = [] # type: List[Expr]
def __new__(cls, symbols):
symbols = tuple(_parse_symbols(symbols))
rank = len(symbols)
_hash = hash((cls.__name__, symbols, rank))
obj = _free_group_cache.get(_hash)
if obj is None:
obj = object.__new__(cls)
obj._hash = _hash
obj._rank = rank
# dtype method is used to create new instances of FreeGroupElement
obj.dtype = type("FreeGroupElement", (FreeGroupElement,), {"group": obj})
obj.symbols = symbols
obj.generators = obj._generators()
obj._gens_set = set(obj.generators)
for symbol, generator in zip(obj.symbols, obj.generators):
if isinstance(symbol, Symbol):
name = symbol.name
if hasattr(obj, name):
setattr(obj, name, generator)
_free_group_cache[_hash] = obj
return obj
def _generators(group):
"""Returns the generators of the FreeGroup.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F.generators
(x, y, z)
"""
gens = []
for sym in group.symbols:
elm = ((sym, 1),)
gens.append(group.dtype(elm))
return tuple(gens)
def clone(self, symbols=None):
return self.__class__(symbols or self.symbols)
def __contains__(self, i):
"""Return True if ``i`` is contained in FreeGroup."""
if not isinstance(i, FreeGroupElement):
return False
group = i.group
return self == group
def __hash__(self):
return self._hash
def __len__(self):
return self.rank
def __str__(self):
if self.rank > 30:
str_form = "<free group with %s generators>" % self.rank
else:
str_form = "<free group on the generators "
gens = self.generators
str_form += str(gens) + ">"
return str_form
__repr__ = __str__
def __getitem__(self, index):
symbols = self.symbols[index]
return self.clone(symbols=symbols)
def __eq__(self, other):
"""No ``FreeGroup`` is equal to any "other" ``FreeGroup``.
"""
return self is other
def index(self, gen):
"""Return the index of the generator `gen` from ``(f_0, ..., f_(n-1))``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> F.index(y)
1
>>> F.index(x)
0
"""
if isinstance(gen, self.dtype):
return self.generators.index(gen)
else:
raise ValueError("expected a generator of Free Group %s, got %s" % (self, gen))
def order(self):
"""Return the order of the free group.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> F.order()
oo
>>> free_group("")[0].order()
1
"""
if self.rank == 0:
return S.One
else:
return S.Infinity
@property
def elements(self):
"""
Return the elements of the free group.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> (z,) = free_group("")
>>> z.elements
{<identity>}
"""
if self.rank == 0:
# A set containing Identity element of `FreeGroup` self is returned
return {self.identity}
else:
raise ValueError("Group contains infinitely many elements"
", hence cannot be represented")
@property
def rank(self):
r"""
In group theory, the `rank` of a group `G`, denoted `G.rank`,
can refer to the smallest cardinality of a generating set
for G, that is
\operatorname{rank}(G)=\min\{ |X|: X\subseteq G, \left\langle X\right\rangle =G\}.
"""
return self._rank
@property
def is_abelian(self):
"""Returns if the group is Abelian.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.is_abelian
False
"""
return self.rank in (0, 1)
@property
def identity(self):
"""Returns the identity element of free group."""
return self.dtype()
def contains(self, g):
"""Tests if Free Group element ``g`` belong to self, ``G``.
In mathematical terms any linear combination of generators
of a Free Group is contained in it.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.contains(x**3*y**2)
True
"""
if not isinstance(g, FreeGroupElement):
return False
elif self != g.group:
return False
else:
return True
def center(self):
"""Returns the center of the free group `self`."""
return {self.identity}
############################################################################
# FreeGroupElement #
############################################################################
class FreeGroupElement(CantSympify, DefaultPrinting, tuple):
"""Used to create elements of FreeGroup. It cannot be used directly to
create a free group element. It is called by the `dtype` method of the
`FreeGroup` class.
"""
is_assoc_word = True
def new(self, init):
return self.__class__(init)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.group, frozenset(tuple(self))))
return _hash
def copy(self):
return self.new(self)
@property
def is_identity(self):
if self.array_form == tuple():
return True
else:
return False
@property
def array_form(self):
"""
SymPy provides two different internal kinds of representation
of associative words. The first one is called the `array_form`
which is a tuple containing `tuples` as its elements, where the
size of each tuple is two. At the first position the tuple
contains the `symbol-generator`, while at the second position
of tuple contains the exponent of that generator at the position.
Since elements (i.e. words) do not commute, the indexing of tuple
makes that property to stay.
The structure in ``array_form`` of ``FreeGroupElement`` is of form:
``( ( symbol_of_gen, exponent ), ( , ), ... ( , ) )``
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> (x*z).array_form
((x, 1), (z, 1))
>>> (x**2*z*y*x**2).array_form
((x, 2), (z, 1), (y, 1), (x, 2))
See Also
========
letter_repr
"""
return tuple(self)
@property
def letter_form(self):
"""
The letter representation of a ``FreeGroupElement`` is a tuple
of generator symbols, with each entry corresponding to a group
generator. Inverses of the generators are represented by
negative generator symbols.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b, c, d = free_group("a b c d")
>>> (a**3).letter_form
(a, a, a)
>>> (a**2*d**-2*a*b**-4).letter_form
(a, a, -d, -d, a, -b, -b, -b, -b)
>>> (a**-2*b**3*d).letter_form
(-a, -a, b, b, b, d)
See Also
========
array_form
"""
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in self.array_form]))
def __getitem__(self, i):
group = self.group
r = self.letter_form[i]
if r.is_Symbol:
return group.dtype(((r, 1),))
else:
return group.dtype(((-r, -1),))
def index(self, gen):
if len(gen) != 1:
raise ValueError()
return (self.letter_form).index(gen.letter_form[0])
@property
def letter_form_elm(self):
"""
"""
group = self.group
r = self.letter_form
return [group.dtype(((elm,1),)) if elm.is_Symbol \
else group.dtype(((-elm,-1),)) for elm in r]
@property
def ext_rep(self):
"""This is called the External Representation of ``FreeGroupElement``
"""
return tuple(flatten(self.array_form))
def __contains__(self, gen):
return gen.array_form[0][0] in tuple([r[0] for r in self.array_form])
def __str__(self):
if self.is_identity:
return "<identity>"
str_form = ""
array_form = self.array_form
for i in range(len(array_form)):
if i == len(array_form) - 1:
if array_form[i][1] == 1:
str_form += str(array_form[i][0])
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1])
else:
if array_form[i][1] == 1:
str_form += str(array_form[i][0]) + "*"
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1]) + "*"
return str_form
__repr__ = __str__
def __pow__(self, n):
n = as_int(n)
group = self.group
if n == 0:
return group.identity
if n < 0:
n = -n
return (self.inverse())**n
result = self
for i in range(n - 1):
result = result*self
# this method can be improved instead of just returning the
# multiplication of elements
return result
def __mul__(self, other):
"""Returns the product of elements belonging to the same ``FreeGroup``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> x*y**2*y**-4
x*y**-2
>>> z*y**-2
z*y**-2
>>> x**2*y*y**-1*x**-2
<identity>
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
if self.is_identity:
return other
if other.is_identity:
return self
r = list(self.array_form + other.array_form)
zero_mul_simp(r, len(self.array_form) - 1)
return group.dtype(tuple(r))
def __truediv__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return self*(other.inverse())
def __rtruediv__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return other*(self.inverse())
def __add__(self, other):
return NotImplemented
def inverse(self):
"""
Returns the inverse of a ``FreeGroupElement`` element
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> x.inverse()
x**-1
>>> (x*y).inverse()
y**-1*x**-1
"""
group = self.group
r = tuple([(i, -j) for i, j in self.array_form[::-1]])
return group.dtype(r)
def order(self):
"""Find the order of a ``FreeGroupElement``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y = free_group("x y")
>>> (x**2*y*y**-1*x**-2).order()
1
"""
if self.is_identity:
return S.One
else:
return S.Infinity
def commutator(self, other):
"""
Return the commutator of `self` and `x`: ``~x*~self*x*self``
"""
group = self.group
if not isinstance(other, group.dtype):
raise ValueError("commutator of only FreeGroupElement of the same "
"FreeGroup exists")
else:
return self.inverse()*other.inverse()*self*other
def eliminate_words(self, words, _all=False, inverse=True):
'''
Replace each subword from the dictionary `words` by words[subword].
If words is a list, replace the words by the identity.
'''
again = True
new = self
if isinstance(words, dict):
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, words[sub], _all=_all, inverse=inverse)
if new != prev:
again = True
else:
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, _all=_all, inverse=inverse)
if new != prev:
again = True
return new
def eliminate_word(self, gen, by=None, _all=False, inverse=True):
"""
For an associative word `self`, a subword `gen`, and an associative
word `by` (identity by default), return the associative word obtained by
replacing each occurrence of `gen` in `self` by `by`. If `_all = True`,
the occurrences of `gen` that may appear after the first substitution will
also be replaced and so on until no occurrences are found. This might not
always terminate (e.g. `(x).eliminate_word(x, x**2, _all=True)`).
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y = free_group("x y")
>>> w = x**5*y*x**2*y**-4*x
>>> w.eliminate_word( x, x**2 )
x**10*y*x**4*y**-4*x**2
>>> w.eliminate_word( x, y**-1 )
y**-11
>>> w.eliminate_word(x**5)
y*x**2*y**-4*x
>>> w.eliminate_word(x*y, y)
x**4*y*x**2*y**-4*x
See Also
========
substituted_word
"""
if by is None:
by = self.group.identity
if self.is_independent(gen) or gen == by:
return self
if gen == self:
return by
if gen**-1 == by:
_all = False
word = self
l = len(gen)
try:
i = word.subword_index(gen)
k = 1
except ValueError:
if not inverse:
return word
try:
i = word.subword_index(gen**-1)
k = -1
except ValueError:
return word
word = word.subword(0, i)*by**k*word.subword(i+l, len(word)).eliminate_word(gen, by)
if _all:
return word.eliminate_word(gen, by, _all=True, inverse=inverse)
else:
return word
def __len__(self):
"""
For an associative word `self`, returns the number of letters in it.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> len(w)
13
>>> len(a**17)
17
>>> len(w**0)
0
"""
return sum(abs(j) for (i, j) in self)
def __eq__(self, other):
"""
Two associative words are equal if they are words over the
same alphabet and if they are sequences of the same letters.
This is equivalent to saying that the external representations
of the words are equal.
There is no "universal" empty word, every alphabet has its own
empty word.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> f
<free group on the generators (swapnil0, swapnil1)>
>>> g, swap0, swap1 = free_group("swap0 swap1")
>>> g
<free group on the generators (swap0, swap1)>
>>> swapnil0 == swapnil1
False
>>> swapnil0*swapnil1 == swapnil1/swapnil1*swapnil0*swapnil1
True
>>> swapnil0*swapnil1 == swapnil1*swapnil0
False
>>> swapnil1**0 == swap0**0
False
"""
group = self.group
if not isinstance(other, group.dtype):
return False
return tuple.__eq__(self, other)
def __lt__(self, other):
"""
The ordering of associative words is defined by length and
lexicography (this ordering is called short-lex ordering), that
is, shorter words are smaller than longer words, and words of the
same length are compared w.r.t. the lexicographical ordering induced
by the ordering of generators. Generators are sorted according
to the order in which they were created. If the generators are
invertible then each generator `g` is larger than its inverse `g^{-1}`,
and `g^{-1}` is larger than every generator that is smaller than `g`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> b < a
False
>>> a < a.inverse()
False
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
l = len(self)
m = len(other)
# implement lenlex order
if l < m:
return True
elif l > m:
return False
for i in range(l):
a = self[i].array_form[0]
b = other[i].array_form[0]
p = group.symbols.index(a[0])
q = group.symbols.index(b[0])
if p < q:
return True
elif p > q:
return False
elif a[1] < b[1]:
return True
elif a[1] > b[1]:
return False
return False
def __le__(self, other):
return (self == other or self < other)
def __gt__(self, other):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> y**2 > x**2
True
>>> y*z > z*y
False
>>> x > x.inverse()
True
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
return not self <= other
def __ge__(self, other):
return not self < other
def exponent_sum(self, gen):
"""
For an associative word `self` and a generator or inverse of generator
`gen`, ``exponent_sum`` returns the number of times `gen` appears in
`self` minus the number of times its inverse appears in `self`. If
neither `gen` nor its inverse occur in `self` then 0 is returned.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.exponent_sum(x)
2
>>> w.exponent_sum(x**-1)
-2
>>> w = x**2*y**4*x**-3
>>> w.exponent_sum(x)
-1
See Also
========
generator_count
"""
if len(gen) != 1:
raise ValueError("gen must be a generator or inverse of a generator")
s = gen.array_form[0]
return s[1]*sum([i[1] for i in self.array_form if i[0] == s[0]])
def generator_count(self, gen):
"""
For an associative word `self` and a generator `gen`,
``generator_count`` returns the multiplicity of generator
`gen` in `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.generator_count(x)
2
>>> w = x**2*y**4*x**-3
>>> w.generator_count(x)
5
See Also
========
exponent_sum
"""
if len(gen) != 1 or gen.array_form[0][1] < 0:
raise ValueError("gen must be a generator")
s = gen.array_form[0]
return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])
def subword(self, from_i, to_j, strict=True):
"""
For an associative word `self` and two positive integers `from_i` and
`to_j`, `subword` returns the subword of `self` that begins at position
`from_i` and ends at `to_j - 1`, indexing is done with origin 0.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.subword(2, 6)
a**3*b
"""
group = self.group
if not strict:
from_i = max(from_i, 0)
to_j = min(len(self), to_j)
if from_i < 0 or to_j > len(self):
raise ValueError("`from_i`, `to_j` must be positive and no greater than "
"the length of associative word")
if to_j <= from_i:
return group.identity
else:
letter_form = self.letter_form[from_i: to_j]
array_form = letter_form_to_array_form(letter_form, group)
return group.dtype(array_form)
def subword_index(self, word, start = 0):
'''
Find the index of `word` in `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**2*b*a*b**3
>>> w.subword_index(a*b*a*b)
1
'''
l = len(word)
self_lf = self.letter_form
word_lf = word.letter_form
index = None
for i in range(start,len(self_lf)-l+1):
if self_lf[i:i+l] == word_lf:
index = i
break
if index is not None:
return index
else:
raise ValueError("The given word is not a subword of self")
def is_dependent(self, word):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**4*y**-3).is_dependent(x**4*y**-2)
True
>>> (x**2*y**-1).is_dependent(x*y)
False
>>> (x*y**2*x*y**2).is_dependent(x*y**2)
True
>>> (x**12).is_dependent(x**-4)
True
See Also
========
is_independent
"""
try:
return self.subword_index(word) is not None
except ValueError:
pass
try:
return self.subword_index(word**-1) is not None
except ValueError:
return False
def is_independent(self, word):
"""
See Also
========
is_dependent
"""
return not self.is_dependent(word)
def contains_generators(self):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> (x**2*y**-1).contains_generators()
{x, y}
>>> (x**3*z).contains_generators()
{x, z}
"""
group = self.group
gens = set()
for syllable in self.array_form:
gens.add(group.dtype(((syllable[0], 1),)))
return set(gens)
def cyclic_subword(self, from_i, to_j):
group = self.group
l = len(self)
letter_form = self.letter_form
period1 = int(from_i/l)
if from_i >= l:
from_i -= l*period1
to_j -= l*period1
diff = to_j - from_i
word = letter_form[from_i: to_j]
period2 = int(to_j/l) - 1
word += letter_form*period2 + letter_form[:diff-l+from_i-l*period2]
word = letter_form_to_array_form(word, group)
return group.dtype(word)
def cyclic_conjugates(self):
"""Returns a words which are cyclic to the word `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x*y*x*y*x
>>> w.cyclic_conjugates()
{x*y*x**2*y, x**2*y*x*y, y*x*y*x**2, y*x**2*y*x, x*y*x*y*x}
>>> s = x*y*x**2*y*x
>>> s.cyclic_conjugates()
{x**2*y*x**2*y, y*x**2*y*x**2, x*y*x**2*y*x}
References
==========
.. [1] http://planetmath.org/cyclicpermutation
"""
return {self.cyclic_subword(i, i+len(self)) for i in range(len(self))}
def is_cyclic_conjugate(self, w):
"""
Checks whether words ``self``, ``w`` are cyclic conjugates.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w1 = x**2*y**5
>>> w2 = x*y**5*x
>>> w1.is_cyclic_conjugate(w2)
True
>>> w3 = x**-1*y**5*x**-1
>>> w3.is_cyclic_conjugate(w2)
False
"""
l1 = len(self)
l2 = len(w)
if l1 != l2:
return False
w1 = self.identity_cyclic_reduction()
w2 = w.identity_cyclic_reduction()
letter1 = w1.letter_form
letter2 = w2.letter_form
str1 = ' '.join(map(str, letter1))
str2 = ' '.join(map(str, letter2))
if len(str1) != len(str2):
return False
return str1 in str2 + ' ' + str2
def number_syllables(self):
"""Returns the number of syllables of the associative word `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> (swapnil1**3*swapnil0*swapnil1**-1).number_syllables()
3
"""
return len(self.array_form)
def exponent_syllable(self, i):
"""
Returns the exponent of the `i`-th syllable of the associative word
`self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.exponent_syllable( 2 )
2
"""
return self.array_form[i][1]
def generator_syllable(self, i):
"""
Returns the symbol of the generator that is involved in the
i-th syllable of the associative word `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.generator_syllable( 3 )
b
"""
return self.array_form[i][0]
def sub_syllables(self, from_i, to_j):
"""
`sub_syllables` returns the subword of the associative word `self` that
consists of syllables from positions `from_to` to `to_j`, where
`from_to` and `to_j` must be positive integers and indexing is done
with origin 0.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a, b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.sub_syllables(1, 2)
b
>>> w.sub_syllables(3, 3)
<identity>
"""
if not isinstance(from_i, int) or not isinstance(to_j, int):
raise ValueError("both arguments should be integers")
group = self.group
if to_j <= from_i:
return group.identity
else:
r = tuple(self.array_form[from_i: to_j])
return group.dtype(r)
def substituted_word(self, from_i, to_j, by):
"""
Returns the associative word obtained by replacing the subword of
`self` that begins at position `from_i` and ends at position `to_j - 1`
by the associative word `by`. `from_i` and `to_j` must be positive
integers, indexing is done with origin 0. In other words,
`w.substituted_word(w, from_i, to_j, by)` is the product of the three
words: `w.subword(0, from_i)`, `by`, and
`w.subword(to_j len(w))`.
See Also
========
eliminate_word
"""
lw = len(self)
if from_i >= to_j or from_i > lw or to_j > lw:
raise ValueError("values should be within bounds")
# otherwise there are four possibilities
# first if from=1 and to=lw then
if from_i == 0 and to_j == lw:
return by
elif from_i == 0: # second if from_i=1 (and to_j < lw) then
return by*self.subword(to_j, lw)
elif to_j == lw: # third if to_j=1 (and from_i > 1) then
return self.subword(0, from_i)*by
else: # finally
return self.subword(0, from_i)*by*self.subword(to_j, lw)
def is_cyclically_reduced(self):
r"""Returns whether the word is cyclically reduced or not.
A word is cyclically reduced if by forming the cycle of the
word, the word is not reduced, i.e a word w = `a_1 ... a_n`
is called cyclically reduced if `a_1 \ne a_n^{-1}`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**-1*x**-1).is_cyclically_reduced()
False
>>> (y*x**2*y**2).is_cyclically_reduced()
True
"""
if not self:
return True
return self[0] != self[-1]**-1
def identity_cyclic_reduction(self):
"""Return a unique cyclically reduced version of the word.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).identity_cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).identity_cyclic_reduction()
x**2*y**-1
References
==========
.. [1] http://planetmath.org/cyclicallyreduced
"""
word = self.copy()
group = self.group
while not word.is_cyclically_reduced():
exp1 = word.exponent_syllable(0)
exp2 = word.exponent_syllable(-1)
r = exp1 + exp2
if r == 0:
rep = word.array_form[1: word.number_syllables() - 1]
else:
rep = ((word.generator_syllable(0), exp1 + exp2),) + \
word.array_form[1: word.number_syllables() - 1]
word = group.dtype(rep)
return word
def cyclic_reduction(self, removed=False):
"""Return a cyclically reduced version of the word. Unlike
`identity_cyclic_reduction`, this will not cyclically permute
the reduced word - just remove the "unreduced" bits on either
side of it. Compare the examples with those of
`identity_cyclic_reduction`.
When `removed` is `True`, return a tuple `(word, r)` where
self `r` is such that before the reduction the word was either
`r*word*r**-1`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).cyclic_reduction()
y**-1*x**2
>>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True)
(y**-1*x**2, x**-3)
"""
word = self.copy()
g = self.group.identity
while not word.is_cyclically_reduced():
exp1 = abs(word.exponent_syllable(0))
exp2 = abs(word.exponent_syllable(-1))
exp = min(exp1, exp2)
start = word[0]**abs(exp)
end = word[-1]**abs(exp)
word = start**-1*word*end**-1
g = g*start
if removed:
return word, g
return word
def power_of(self, other):
'''
Check if `self == other**n` for some integer n.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> ((x*y)**2).power_of(x*y)
True
>>> (x**-3*y**-2*x**3).power_of(x**-3*y*x**3)
True
'''
if self.is_identity:
return True
l = len(other)
if l == 1:
# self has to be a power of one generator
gens = self.contains_generators()
s = other in gens or other**-1 in gens
return len(gens) == 1 and s
# if self is not cyclically reduced and it is a power of other,
# other isn't cyclically reduced and the parts removed during
# their reduction must be equal
reduced, r1 = self.cyclic_reduction(removed=True)
if not r1.is_identity:
other, r2 = other.cyclic_reduction(removed=True)
if r1 == r2:
return reduced.power_of(other)
return False
if len(self) < l or len(self) % l:
return False
prefix = self.subword(0, l)
if prefix == other or prefix**-1 == other:
rest = self.subword(l, len(self))
return rest.power_of(other)
return False
def letter_form_to_array_form(array_form, group):
"""
This method converts a list given with possible repetitions of elements in
it. It returns a new list such that repetitions of consecutive elements is
removed and replace with a tuple element of size two such that the first
index contains `value` and the second index contains the number of
consecutive repetitions of `value`.
"""
a = list(array_form[:])
new_array = []
n = 1
symbols = group.symbols
for i in range(len(a)):
if i == len(a) - 1:
if a[i] == a[i - 1]:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -1))
else:
new_array.append((a[i], 1))
return new_array
elif a[i] == a[i + 1]:
n += 1
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
n = 1
def zero_mul_simp(l, index):
"""Used to combine two reduced words."""
while index >=0 and index < len(l) - 1 and l[index][0] == l[index + 1][0]:
exp = l[index][1] + l[index + 1][1]
base = l[index][0]
l[index] = (base, exp)
del l[index + 1]
if l[index][1] == 0:
del l[index]
index -= 1
|
134a2d9fbd903d19ea22366c37074c2f058f4e1b7760962f86df11876c1f602e | from sympy.combinatorics.permutations import Permutation, _af_rmul, \
_af_invert, _af_new
from sympy.combinatorics.perm_groups import PermutationGroup, _orbit, \
_orbit_transversal
from sympy.combinatorics.util import _distribute_gens_by_base, \
_orbits_transversals_from_bsgs
"""
References for tensor canonicalization:
[1] R. Portugal "Algorithmic simplification of tensor expressions",
J. Phys. A 32 (1999) 7779-7789
[2] R. Portugal, B.F. Svaiter "Group-theoretic Approach for Symbolic
Tensor Manipulation: I. Free Indices"
arXiv:math-ph/0107031v1
[3] L.R.U. Manssur, R. Portugal "Group-theoretic Approach for Symbolic
Tensor Manipulation: II. Dummy Indices"
arXiv:math-ph/0107032v1
[4] xperm.c part of XPerm written by J. M. Martin-Garcia
http://www.xact.es/index.html
"""
def dummy_sgs(dummies, sym, n):
"""
Return the strong generators for dummy indices.
Parameters
==========
dummies : List of dummy indices.
`dummies[2k], dummies[2k+1]` are paired indices.
In base form, the dummy indices are always in
consecutive positions.
sym : symmetry under interchange of contracted dummies::
* None no symmetry
* 0 commuting
* 1 anticommuting
n : number of indices
Examples
========
>>> from sympy.combinatorics.tensor_can import dummy_sgs
>>> dummy_sgs(list(range(2, 8)), 0, 8)
[[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9],
[0, 1, 2, 3, 6, 7, 4, 5, 8, 9]]
"""
if len(dummies) > n:
raise ValueError("List too large")
res = []
# exchange of contravariant and covariant indices
if sym is not None:
for j in dummies[::2]:
a = list(range(n + 2))
if sym == 1:
a[n] = n + 1
a[n + 1] = n
a[j], a[j + 1] = a[j + 1], a[j]
res.append(a)
# rename dummy indices
for j in dummies[:-3:2]:
a = list(range(n + 2))
a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1]
res.append(a)
return res
def _min_dummies(dummies, sym, indices):
"""
Return list of minima of the orbits of indices in group of dummies.
See ``double_coset_can_rep`` for the description of ``dummies`` and ``sym``.
``indices`` is the initial list of dummy indices.
Examples
========
>>> from sympy.combinatorics.tensor_can import _min_dummies
>>> _min_dummies([list(range(2, 8))], [0], list(range(10)))
[0, 1, 2, 2, 2, 2, 2, 2, 8, 9]
"""
num_types = len(sym)
m = []
for dx in dummies:
if dx:
m.append(min(dx))
else:
m.append(None)
res = indices[:]
for i in range(num_types):
for c, i in enumerate(indices):
for j in range(num_types):
if i in dummies[j]:
res[c] = m[j]
break
return res
def _trace_S(s, j, b, S_cosets):
"""
Return the representative h satisfying s[h[b]] == j
If there is not such a representative return None
"""
for h in S_cosets[b]:
if s[h[b]] == j:
return h
return None
def _trace_D(gj, p_i, Dxtrav):
"""
Return the representative h satisfying h[gj] == p_i
If there is not such a representative return None
"""
for h in Dxtrav:
if h[gj] == p_i:
return h
return None
def _dumx_remove(dumx, dumx_flat, p0):
"""
remove p0 from dumx
"""
res = []
for dx in dumx:
if p0 not in dx:
res.append(dx)
continue
k = dx.index(p0)
if k % 2 == 0:
p0_paired = dx[k + 1]
else:
p0_paired = dx[k - 1]
dx.remove(p0)
dx.remove(p0_paired)
dumx_flat.remove(p0)
dumx_flat.remove(p0_paired)
res.append(dx)
def transversal2coset(size, base, transversal):
a = []
j = 0
for i in range(size):
if i in base:
a.append(sorted(transversal[j].values()))
j += 1
else:
a.append([list(range(size))])
j = len(a) - 1
while a[j] == [list(range(size))]:
j -= 1
return a[:j + 1]
def double_coset_can_rep(dummies, sym, b_S, sgens, S_transversals, g):
r"""
Butler-Portugal algorithm for tensor canonicalization with dummy indices.
Parameters
==========
dummies
list of lists of dummy indices,
one list for each type of index;
the dummy indices are put in order contravariant, covariant
[d0, -d0, d1, -d1, ...].
sym
list of the symmetries of the index metric for each type.
possible symmetries of the metrics
* 0 symmetric
* 1 antisymmetric
* None no symmetry
b_S
base of a minimal slot symmetry BSGS.
sgens
generators of the slot symmetry BSGS.
S_transversals
transversals for the slot BSGS.
g
permutation representing the tensor.
Returns
=======
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Notes
=====
A tensor with dummy indices can be represented in a number
of equivalent ways which typically grows exponentially with
the number of indices. To be able to establish if two tensors
with many indices are equal becomes computationally very slow
in absence of an efficient algorithm.
The Butler-Portugal algorithm [3] is an efficient algorithm to
put tensors in canonical form, solving the above problem.
Portugal observed that a tensor can be represented by a permutation,
and that the class of tensors equivalent to it under slot and dummy
symmetries is equivalent to the double coset `D*g*S`
(Note: in this documentation we use the conventions for multiplication
of permutations p, q with (p*q)(i) = p[q[i]] which is opposite
to the one used in the Permutation class)
Using the algorithm by Butler to find a representative of the
double coset one can find a canonical form for the tensor.
To see this correspondence,
let `g` be a permutation in array form; a tensor with indices `ind`
(the indices including both the contravariant and the covariant ones)
can be written as
`t = T(ind[g[0]], \dots, ind[g[n-1]])`,
where `n = len(ind)`;
`g` has size `n + 2`, the last two indices for the sign of the tensor
(trick introduced in [4]).
A slot symmetry transformation `s` is a permutation acting on the slots
`t \rightarrow T(ind[(g*s)[0]], \dots, ind[(g*s)[n-1]])`
A dummy symmetry transformation acts on `ind`
`t \rightarrow T(ind[(d*g)[0]], \dots, ind[(d*g)[n-1]])`
Being interested only in the transformations of the tensor under
these symmetries, one can represent the tensor by `g`, which transforms
as
`g -> d*g*s`, so it belongs to the coset `D*g*S`, or in other words
to the set of all permutations allowed by the slot and dummy symmetries.
Let us explain the conventions by an example.
Given a tensor `T^{d3 d2 d1}{}_{d1 d2 d3}` with the slot symmetries
`T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}`
and symmetric metric, find the tensor equivalent to it which
is the lowest under the ordering of indices:
lexicographic ordering `d1, d2, d3` and then contravariant
before covariant index; that is the canonical form of the tensor.
The canonical form is `-T^{d1 d2 d3}{}_{d1 d2 d3}`
obtained using `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`.
To convert this problem in the input for this function,
use the following ordering of the index names
(- for covariant for short) `d1, -d1, d2, -d2, d3, -d3`
`T^{d3 d2 d1}{}_{d1 d2 d3}` corresponds to `g = [4, 2, 0, 1, 3, 5, 6, 7]`
where the last two indices are for the sign
`sgens = [Permutation(0, 2)(6, 7), Permutation(0, 4)(6, 7)]`
sgens[0] is the slot symmetry `-(0, 2)`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`
sgens[1] is the slot symmetry `-(0, 4)`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}`
The dummy symmetry group D is generated by the strong base generators
`[(0, 1), (2, 3), (4, 5), (0, 2)(1, 3), (0, 4)(1, 5)]`
where the first three interchange covariant and contravariant
positions of the same index (d1 <-> -d1) and the last two interchange
the dummy indices themselves (d1 <-> d2).
The dummy symmetry acts from the left
`d = [1, 0, 2, 3, 4, 5, 6, 7]` exchange `d1 \leftrightarrow -d1`
`T^{d3 d2 d1}{}_{d1 d2 d3} == T^{d3 d2}{}_{d1}{}^{d1}{}_{d2 d3}`
`g=[4, 2, 0, 1, 3, 5, 6, 7] -> [4, 2, 1, 0, 3, 5, 6, 7] = _af_rmul(d, g)`
which differs from `_af_rmul(g, d)`.
The slot symmetry acts from the right
`s = [2, 1, 0, 3, 4, 5, 7, 6]` exchanges slots 0 and 2 and changes sign
`T^{d3 d2 d1}{}_{d1 d2 d3} == -T^{d1 d2 d3}{}_{d1 d2 d3}`
`g=[4,2,0,1,3,5,6,7] -> [0, 2, 4, 1, 3, 5, 7, 6] = _af_rmul(g, s)`
Example in which the tensor is zero, same slot symmetries as above:
`T^{d2}{}_{d1 d3}{}^{d1 d3}{}_{d2}`
`= -T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,4)`;
`= T_{d3 d1}{}^{d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,2)`;
`= T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` symmetric metric;
`= 0` since two of these lines have tensors differ only for the sign.
The double coset D*g*S consists of permutations `h = d*g*s` corresponding
to equivalent tensors; if there are two `h` which are the same apart
from the sign, return zero; otherwise
choose as representative the tensor with indices
ordered lexicographically according to `[d1, -d1, d2, -d2, d3, -d3]`
that is ``rep = min(D*g*S) = min([d*g*s for d in D for s in S])``
The indices are fixed one by one; first choose the lowest index
for slot 0, then the lowest remaining index for slot 1, etc.
Doing this one obtains a chain of stabilizers
`S \rightarrow S_{b0} \rightarrow S_{b0,b1} \rightarrow \dots` and
`D \rightarrow D_{p0} \rightarrow D_{p0,p1} \rightarrow \dots`
where ``[b0, b1, ...] = range(b)`` is a base of the symmetric group;
the strong base `b_S` of S is an ordered sublist of it;
therefore it is sufficient to compute once the
strong base generators of S using the Schreier-Sims algorithm;
the stabilizers of the strong base generators are the
strong base generators of the stabilizer subgroup.
``dbase = [p0, p1, ...]`` is not in general in lexicographic order,
so that one must recompute the strong base generators each time;
however this is trivial, there is no need to use the Schreier-Sims
algorithm for D.
The algorithm keeps a TAB of elements `(s_i, d_i, h_i)`
where `h_i = d_i \times g \times s_i` satisfying `h_i[j] = p_j` for `0 \le j < i`
starting from `s_0 = id, d_0 = id, h_0 = g`.
The equations `h_0[0] = p_0, h_1[1] = p_1, \dots` are solved in this order,
choosing each time the lowest possible value of p_i
For `j < i`
`d_i*g*s_i*S_{b_0, \dots, b_{i-1}}*b_j = D_{p_0, \dots, p_{i-1}}*p_j`
so that for dx in `D_{p_0,\dots,p_{i-1}}` and sx in
`S_{base[0], \dots, base[i-1]}` one has `dx*d_i*g*s_i*sx*b_j = p_j`
Search for dx, sx such that this equation holds for `j = i`;
it can be written as `s_i*sx*b_j = J, dx*d_i*g*J = p_j`
`sx*b_j = s_i**-1*J; sx = trace(s_i**-1, S_{b_0,...,b_{i-1}})`
`dx**-1*p_j = d_i*g*J; dx = trace(d_i*g*J, D_{p_0,...,p_{i-1}})`
`s_{i+1} = s_i*trace(s_i**-1*J, S_{b_0,...,b_{i-1}})`
`d_{i+1} = trace(d_i*g*J, D_{p_0,...,p_{i-1}})**-1*d_i`
`h_{i+1}*b_i = d_{i+1}*g*s_{i+1}*b_i = p_i`
`h_n*b_j = p_j` for all j, so that `h_n` is the solution.
Add the found `(s, d, h)` to TAB1.
At the end of the iteration sort TAB1 with respect to the `h`;
if there are two consecutive `h` in TAB1 which differ only for the
sign, the tensor is zero, so return 0;
if there are two consecutive `h` which are equal, keep only one.
Then stabilize the slot generators under `i` and the dummy generators
under `p_i`.
Assign `TAB = TAB1` at the end of the iteration step.
At the end `TAB` contains a unique `(s, d, h)`, since all the slots
of the tensor `h` have been fixed to have the minimum value according
to the symmetries. The algorithm returns `h`.
It is important that the slot BSGS has lexicographic minimal base,
otherwise there is an `i` which does not belong to the slot base
for which `p_i` is fixed by the dummy symmetry only, while `i`
is not invariant from the slot stabilizer, so `p_i` is not in
general the minimal value.
This algorithm differs slightly from the original algorithm [3]:
the canonical form is minimal lexicographically, and
the BSGS has minimal base under lexicographic order.
Equal tensors `h` are eliminated from TAB.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.tensor_can import double_coset_can_rep, get_transversals
>>> gens = [Permutation(x) for x in [[2, 1, 0, 3, 4, 5, 7, 6], [4, 1, 2, 3, 0, 5, 7, 6]]]
>>> base = [0, 2]
>>> g = Permutation([4, 2, 0, 1, 3, 5, 6, 7])
>>> transversals = get_transversals(base, gens)
>>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g)
[0, 1, 2, 3, 4, 5, 7, 6]
>>> g = Permutation([4, 1, 3, 0, 5, 2, 6, 7])
>>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g)
0
"""
size = g.size
g = g.array_form
num_dummies = size - 2
indices = list(range(num_dummies))
all_metrics_with_sym = not any(_ is None for _ in sym)
num_types = len(sym)
dumx = dummies[:]
dumx_flat = []
for dx in dumx:
dumx_flat.extend(dx)
b_S = b_S[:]
sgensx = [h._array_form for h in sgens]
if b_S:
S_transversals = transversal2coset(size, b_S, S_transversals)
# strong generating set for D
dsgsx = []
for i in range(num_types):
dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies))
idn = list(range(size))
# TAB = list of entries (s, d, h) where h = _af_rmuln(d,g,s)
# for short, in the following d*g*s means _af_rmuln(d,g,s)
TAB = [(idn, idn, g)]
for i in range(size - 2):
b = i
testb = b in b_S and sgensx
if testb:
sgensx1 = [_af_new(_) for _ in sgensx]
deltab = _orbit(size, sgensx1, b)
else:
deltab = {b}
# p1 = min(IMAGES) = min(Union D_p*h*deltab for h in TAB)
if all_metrics_with_sym:
md = _min_dummies(dumx, sym, indices)
else:
md = [min(_orbit(size, [_af_new(
ddx) for ddx in dsgsx], ii)) for ii in range(size - 2)]
p_i = min([min([md[h[x]] for x in deltab]) for s, d, h in TAB])
dsgsx1 = [_af_new(_) for _ in dsgsx]
Dxtrav = _orbit_transversal(size, dsgsx1, p_i, False, af=True) \
if dsgsx else None
if Dxtrav:
Dxtrav = [_af_invert(x) for x in Dxtrav]
# compute the orbit of p_i
for ii in range(num_types):
if p_i in dumx[ii]:
# the orbit is made by all the indices in dum[ii]
if sym[ii] is not None:
deltap = dumx[ii]
else:
# the orbit is made by all the even indices if p_i
# is even, by all the odd indices if p_i is odd
p_i_index = dumx[ii].index(p_i) % 2
deltap = dumx[ii][p_i_index::2]
break
else:
deltap = [p_i]
TAB1 = []
while TAB:
s, d, h = TAB.pop()
if min([md[h[x]] for x in deltab]) != p_i:
continue
deltab1 = [x for x in deltab if md[h[x]] == p_i]
# NEXT = s*deltab1 intersection (d*g)**-1*deltap
dg = _af_rmul(d, g)
dginv = _af_invert(dg)
sdeltab = [s[x] for x in deltab1]
gdeltap = [dginv[x] for x in deltap]
NEXT = [x for x in sdeltab if x in gdeltap]
# d, s satisfy
# d*g*s*base[i-1] = p_{i-1}; using the stabilizers
# d*g*s*S_{base[0],...,base[i-1]}*base[i-1] =
# D_{p_0,...,p_{i-1}}*p_{i-1}
# so that to find d1, s1 satisfying d1*g*s1*b = p_i
# one can look for dx in D_{p_0,...,p_{i-1}} and
# sx in S_{base[0],...,base[i-1]}
# d1 = dx*d; s1 = s*sx
# d1*g*s1*b = dx*d*g*s*sx*b = p_i
for j in NEXT:
if testb:
# solve s1*b = j with s1 = s*sx for some element sx
# of the stabilizer of ..., base[i-1]
# sx*b = s**-1*j; sx = _trace_S(s, j,...)
# s1 = s*trace_S(s**-1*j,...)
s1 = _trace_S(s, j, b, S_transversals)
if not s1:
continue
else:
s1 = [s[ix] for ix in s1]
else:
s1 = s
# assert s1[b] == j # invariant
# solve d1*g*j = p_i with d1 = dx*d for some element dg
# of the stabilizer of ..., p_{i-1}
# dx**-1*p_i = d*g*j; dx**-1 = trace_D(d*g*j,...)
# d1 = trace_D(d*g*j,...)**-1*d
# to save an inversion in the inner loop; notice we did
# Dxtrav = [perm_af_invert(x) for x in Dxtrav] out of the loop
if Dxtrav:
d1 = _trace_D(dg[j], p_i, Dxtrav)
if not d1:
continue
else:
if p_i != dg[j]:
continue
d1 = idn
assert d1[dg[j]] == p_i # invariant
d1 = [d1[ix] for ix in d]
h1 = [d1[g[ix]] for ix in s1]
# assert h1[b] == p_i # invariant
TAB1.append((s1, d1, h1))
# if TAB contains equal permutations, keep only one of them;
# if TAB contains equal permutations up to the sign, return 0
TAB1.sort(key=lambda x: x[-1])
prev = [0] * size
while TAB1:
s, d, h = TAB1.pop()
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
else:
TAB.append((s, d, h))
prev = h
# stabilize the SGS
sgensx = [h for h in sgensx if h[b] == b]
if b in b_S:
b_S.remove(b)
_dumx_remove(dumx, dumx_flat, p_i)
dsgsx = []
for i in range(num_types):
dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies))
return TAB[0][-1]
def canonical_free(base, gens, g, num_free):
"""
Canonicalization of a tensor with respect to free indices
choosing the minimum with respect to lexicographical ordering
in the free indices.
Explanation
===========
``base``, ``gens`` BSGS for slot permutation group
``g`` permutation representing the tensor
``num_free`` number of free indices
The indices must be ordered with first the free indices
See explanation in double_coset_can_rep
The algorithm is a variation of the one given in [2].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import canonical_free
>>> gens = [[1, 0, 2, 3, 5, 4], [2, 3, 0, 1, 4, 5],[0, 1, 3, 2, 5, 4]]
>>> gens = [Permutation(h) for h in gens]
>>> base = [0, 2]
>>> g = Permutation([2, 1, 0, 3, 4, 5])
>>> canonical_free(base, gens, g, 4)
[0, 3, 1, 2, 5, 4]
Consider the product of Riemann tensors
``T = R^{a}_{d0}^{d1,d2}*R_{d2,d1}^{d0,b}``
The order of the indices is ``[a, b, d0, -d0, d1, -d1, d2, -d2]``
The permutation corresponding to the tensor is
``g = [0, 3, 4, 6, 7, 5, 2, 1, 8, 9]``
In particular ``a`` is position ``0``, ``b`` is in position ``9``.
Use the slot symmetries to get `T` is a form which is the minimal
in lexicographic order in the free indices ``a`` and ``b``, e.g.
``-R^{a}_{d0}^{d1,d2}*R^{b,d0}_{d2,d1}`` corresponding to
``[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]``
>>> from sympy.combinatorics.tensor_can import riemann_bsgs, tensor_gens
>>> base, gens = riemann_bsgs
>>> size, sbase, sgens = tensor_gens(base, gens, [[], []], 0)
>>> g = Permutation([0, 3, 4, 6, 7, 5, 2, 1, 8, 9])
>>> canonical_free(sbase, [Permutation(h) for h in sgens], g, 2)
[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]
"""
g = g.array_form
size = len(g)
if not base:
return g[:]
transversals = get_transversals(base, gens)
for x in sorted(g[:-2]):
if x not in base:
base.append(x)
h = g
for i, transv in enumerate(transversals):
h_i = [size]*num_free
# find the element s in transversals[i] such that
# _af_rmul(h, s) has its free elements with the lowest position in h
s = None
for sk in transv.values():
h1 = _af_rmul(h, sk)
hi = [h1.index(ix) for ix in range(num_free)]
if hi < h_i:
h_i = hi
s = sk
if s:
h = _af_rmul(h, s)
return h
def _get_map_slots(size, fixed_slots):
res = list(range(size))
pos = 0
for i in range(size):
if i in fixed_slots:
continue
res[i] = pos
pos += 1
return res
def _lift_sgens(size, fixed_slots, free, s):
a = []
j = k = 0
fd = list(zip(fixed_slots, free))
fd = [y for x, y in sorted(fd)]
num_free = len(free)
for i in range(size):
if i in fixed_slots:
a.append(fd[k])
k += 1
else:
a.append(s[j] + num_free)
j += 1
return a
def canonicalize(g, dummies, msym, *v):
"""
canonicalize tensor formed by tensors
Parameters
==========
g : permutation representing the tensor
dummies : list representing the dummy indices
it can be a list of dummy indices of the same type
or a list of lists of dummy indices, one list for each
type of index;
the dummy indices must come after the free indices,
and put in order contravariant, covariant
[d0, -d0, d1,-d1,...]
msym : symmetry of the metric(s)
it can be an integer or a list;
in the first case it is the symmetry of the dummy index metric;
in the second case it is the list of the symmetries of the
index metric for each type
v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i : BSGS for tensors of this type.
The BSGS should have minimal base under lexicographic ordering;
if not, an attempt is made do get the minimal BSGS;
in case of failure,
canonicalize_naive is used, which is much slower.
n_i : number of tensors of type `i`.
sym_i : symmetry under exchange of component tensors of type `i`.
Both for msym and sym_i the cases are
* None no symmetry
* 0 commuting
* 1 anticommuting
Returns
=======
0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Algorithm
=========
First one uses canonical_free to get the minimum tensor under
lexicographic order, using only the slot symmetries.
If the component tensors have not minimal BSGS, it is attempted
to find it; if the attempt fails canonicalize_naive
is used instead.
Compute the residual slot symmetry keeping fixed the free indices
using tensor_gens(base, gens, list_free_indices, sym).
Reduce the problem eliminating the free indices.
Then use double_coset_can_rep and lift back the result reintroducing
the free indices.
Examples
========
one type of index with commuting metric;
`A_{a b}` and `B_{a b}` antisymmetric and commuting
`T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}`
`ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices
g = [1, 3, 0, 5, 4, 2, 6, 7]
`T_c = 0`
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product
>>> from sympy.combinatorics import Permutation
>>> base2a, gens2a = get_symmetric_group_sgs(2, 1)
>>> t0 = (base2a, gens2a, 1, 0)
>>> t1 = (base2a, gens2a, 2, 0)
>>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7])
>>> canonicalize(g, range(6), 0, t0, t1)
0
same as above, but with `B_{a b}` anticommuting
`T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}`
can = [0,2,1,4,3,5,7,6]
>>> t1 = (base2a, gens2a, 2, 1)
>>> canonicalize(g, range(6), 0, t0, t1)
[0, 2, 1, 4, 3, 5, 7, 6]
two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order,
both with commuting metric
`f^{a b c}` antisymmetric, commuting
`A_{m a}` no symmetry, commuting
`T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}`
ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n]
g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15]
The canonical tensor is
`T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}`
can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14]
>>> base_f, gens_f = get_symmetric_group_sgs(3, 1)
>>> base1, gens1 = get_symmetric_group_sgs(1)
>>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)
>>> t0 = (base_f, gens_f, 2, 0)
>>> t1 = (base_A, gens_A, 4, 0)
>>> dummies = [range(2, 10), range(10, 14)]
>>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15])
>>> canonicalize(g, dummies, [0, 0], t0, t1)
[0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14]
"""
from sympy.combinatorics.testutil import canonicalize_naive
if not isinstance(msym, list):
if not msym in (0, 1, None):
raise ValueError('msym must be 0, 1 or None')
num_types = 1
else:
num_types = len(msym)
if not all(msymx in (0, 1, None) for msymx in msym):
raise ValueError('msym entries must be 0, 1 or None')
if len(dummies) != num_types:
raise ValueError(
'dummies and msym must have the same number of elements')
size = g.size
num_tensors = 0
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
# check that the BSGS is minimal;
# this property is used in double_coset_can_rep;
# if it is not minimal use canonicalize_naive
if not _is_minimal_bsgs(base_i, gens_i):
mbsgs = get_minimal_bsgs(base_i, gens_i)
if not mbsgs:
can = canonicalize_naive(g, dummies, msym, *v)
return can
base_i, gens_i = mbsgs
v1.append((base_i, gens_i, [[]] * n_i, sym_i))
num_tensors += n_i
if num_types == 1 and not isinstance(msym, list):
dummies = [dummies]
msym = [msym]
flat_dummies = []
for dumx in dummies:
flat_dummies.extend(dumx)
if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)):
raise ValueError('dummies is not valid')
# slot symmetry of the tensor
size1, sbase, sgens = gens_products(*v1)
if size != size1:
raise ValueError(
'g has size %d, generators have size %d' % (size, size1))
free = [i for i in range(size - 2) if i not in flat_dummies]
num_free = len(free)
# g1 minimal tensor under slot symmetry
g1 = canonical_free(sbase, sgens, g, num_free)
if not flat_dummies:
return g1
# save the sign of g1
sign = 0 if g1[-1] == size - 1 else 1
# the free indices are kept fixed.
# Determine free_i, the list of slots of tensors which are fixed
# since they are occupied by free indices, which are fixed.
start = 0
for i in range(len(v)):
free_i = []
base_i, gens_i, n_i, sym_i = v[i]
len_tens = gens_i[0].size - 2
# for each component tensor get a list od fixed islots
for j in range(n_i):
# get the elements corresponding to the component tensor
h = g1[start:(start + len_tens)]
fr = []
# get the positions of the fixed elements in h
for k in free:
if k in h:
fr.append(h.index(k))
free_i.append(fr)
start += len_tens
v1[i] = (base_i, gens_i, free_i, sym_i)
# BSGS of the tensor with fixed free indices
# if tensor_gens fails in gens_product, use canonicalize_naive
size, sbase, sgens = gens_products(*v1)
# reduce the permutations getting rid of the free indices
pos_free = [g1.index(x) for x in range(num_free)]
size_red = size - num_free
g1_red = [x - num_free for x in g1 if x in flat_dummies]
if sign:
g1_red.extend([size_red - 1, size_red - 2])
else:
g1_red.extend([size_red - 2, size_red - 1])
map_slots = _get_map_slots(size, pos_free)
sbase_red = [map_slots[i] for i in sbase if i not in pos_free]
sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens]
dummies_red = [[x - num_free for x in y] for y in dummies]
transv_red = get_transversals(sbase_red, sgens_red)
g1_red = _af_new(g1_red)
g2 = double_coset_can_rep(
dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red)
if g2 == 0:
return 0
# lift to the case with the free indices
g3 = _lift_sgens(size, pos_free, free, g2)
return g3
def perm_af_direct_product(gens1, gens2, signed=True):
"""
Direct products of the generators gens1 and gens2.
Examples
========
>>> from sympy.combinatorics.tensor_can import perm_af_direct_product
>>> gens1 = [[1, 0, 2, 3], [0, 1, 3, 2]]
>>> gens2 = [[1, 0]]
>>> perm_af_direct_product(gens1, gens2, False)
[[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]]
>>> gens1 = [[1, 0, 2, 3, 5, 4], [0, 1, 3, 2, 4, 5]]
>>> gens2 = [[1, 0, 2, 3]]
>>> perm_af_direct_product(gens1, gens2, True)
[[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]]
"""
gens1 = [list(x) for x in gens1]
gens2 = [list(x) for x in gens2]
s = 2 if signed else 0
n1 = len(gens1[0]) - s
n2 = len(gens2[0]) - s
start = list(range(n1))
end = list(range(n1, n1 + n2))
if signed:
gens1 = [gen[:-2] + end + [gen[-2] + n2, gen[-1] + n2]
for gen in gens1]
gens2 = [start + [x + n1 for x in gen] for gen in gens2]
else:
gens1 = [gen + end for gen in gens1]
gens2 = [start + [x + n1 for x in gen] for gen in gens2]
res = gens1 + gens2
return res
def bsgs_direct_product(base1, gens1, base2, gens2, signed=True):
"""
Direct product of two BSGS.
Parameters
==========
base1 : base of the first BSGS.
gens1 : strong generating sequence of the first BSGS.
base2, gens2 : similarly for the second BSGS.
signed : flag for signed permutations.
Examples
========
>>> from sympy.combinatorics.tensor_can import (get_symmetric_group_sgs, bsgs_direct_product)
>>> base1, gens1 = get_symmetric_group_sgs(1)
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> bsgs_direct_product(base1, gens1, base2, gens2)
([1], [(4)(1 2)])
"""
s = 2 if signed else 0
n1 = gens1[0].size - s
base = list(base1)
base += [x + n1 for x in base2]
gens1 = [h._array_form for h in gens1]
gens2 = [h._array_form for h in gens2]
gens = perm_af_direct_product(gens1, gens2, signed)
size = len(gens[0])
id_af = list(range(size))
gens = [h for h in gens if h != id_af]
if not gens:
gens = [id_af]
return base, [_af_new(h) for h in gens]
def get_symmetric_group_sgs(n, antisym=False):
"""
Return base, gens of the minimal BSGS for (anti)symmetric tensor
Parameters
==========
``n``: rank of the tensor
``antisym`` : bool
``antisym = False`` symmetric tensor
``antisym = True`` antisymmetric tensor
Examples
========
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> get_symmetric_group_sgs(3)
([0, 1], [(4)(0 1), (4)(1 2)])
"""
if n == 1:
return [], [_af_new(list(range(3)))]
gens = [Permutation(n - 1)(i, i + 1)._array_form for i in range(n - 1)]
if antisym == 0:
gens = [x + [n, n + 1] for x in gens]
else:
gens = [x + [n + 1, n] for x in gens]
base = list(range(n - 1))
return base, [_af_new(h) for h in gens]
riemann_bsgs = [0, 2], [Permutation(0, 1)(4, 5), Permutation(2, 3)(4, 5),
Permutation(5)(0, 2)(1, 3)]
def get_transversals(base, gens):
"""
Return transversals for the group with BSGS base, gens
"""
if not base:
return []
stabs = _distribute_gens_by_base(base, gens)
orbits, transversals = _orbits_transversals_from_bsgs(base, stabs)
transversals = [{x: h._array_form for x, h in y.items()} for y in
transversals]
return transversals
def _is_minimal_bsgs(base, gens):
"""
Check if the BSGS has minimal base under lexigographic order.
base, gens BSGS
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import riemann_bsgs, _is_minimal_bsgs
>>> _is_minimal_bsgs(*riemann_bsgs)
True
>>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)]))
>>> _is_minimal_bsgs(*riemann_bsgs1)
False
"""
base1 = []
sgs1 = gens[:]
size = gens[0].size
for i in range(size):
if not all(h._array_form[i] == i for h in sgs1):
base1.append(i)
sgs1 = [h for h in sgs1 if h._array_form[i] == i]
return base1 == base
def get_minimal_bsgs(base, gens):
"""
Compute a minimal GSGS
base, gens BSGS
If base, gens is a minimal BSGS return it; else return a minimal BSGS
if it fails in finding one, it returns None
TODO: use baseswap in the case in which if it fails in finding a
minimal BSGS
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import get_minimal_bsgs
>>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)]))
>>> get_minimal_bsgs(*riemann_bsgs1)
([0, 2], [(0 1)(4 5), (5)(0 2)(1 3), (2 3)(4 5)])
"""
G = PermutationGroup(gens)
base, gens = G.schreier_sims_incremental()
if not _is_minimal_bsgs(base, gens):
return None
return base, gens
def tensor_gens(base, gens, list_free_indices, sym=0):
"""
Returns size, res_base, res_gens BSGS for n tensors of the
same type.
Explanation
===========
base, gens BSGS for tensors of this type
list_free_indices list of the slots occupied by fixed indices
for each of the tensors
sym symmetry under commutation of two tensors
sym None no symmetry
sym 0 commuting
sym 1 anticommuting
Examples
========
>>> from sympy.combinatorics.tensor_can import tensor_gens, get_symmetric_group_sgs
two symmetric tensors with 3 indices without free indices
>>> base, gens = get_symmetric_group_sgs(3)
>>> tensor_gens(base, gens, [[], []])
(8, [0, 1, 3, 4], [(7)(0 1), (7)(1 2), (7)(3 4), (7)(4 5), (7)(0 3)(1 4)(2 5)])
two symmetric tensors with 3 indices with free indices in slot 1 and 0
>>> tensor_gens(base, gens, [[1], [0]])
(8, [0, 4], [(7)(0 2), (7)(4 5)])
four symmetric tensors with 3 indices, two of which with free indices
"""
def _get_bsgs(G, base, gens, free_indices):
"""
return the BSGS for G.pointwise_stabilizer(free_indices)
"""
if not free_indices:
return base[:], gens[:]
else:
H = G.pointwise_stabilizer(free_indices)
base, sgs = H.schreier_sims_incremental()
return base, sgs
# if not base there is no slot symmetry for the component tensors
# if list_free_indices.count([]) < 2 there is no commutation symmetry
# so there is no resulting slot symmetry
if not base and list_free_indices.count([]) < 2:
n = len(list_free_indices)
size = gens[0].size
size = n * (size - 2) + 2
return size, [], [_af_new(list(range(size)))]
# if any(list_free_indices) one needs to compute the pointwise
# stabilizer, so G is needed
if any(list_free_indices):
G = PermutationGroup(gens)
else:
G = None
# no_free list of lists of indices for component tensors without fixed
# indices
no_free = []
size = gens[0].size
id_af = list(range(size))
num_indices = size - 2
if not list_free_indices[0]:
no_free.append(list(range(num_indices)))
res_base, res_gens = _get_bsgs(G, base, gens, list_free_indices[0])
for i in range(1, len(list_free_indices)):
base1, gens1 = _get_bsgs(G, base, gens, list_free_indices[i])
res_base, res_gens = bsgs_direct_product(res_base, res_gens,
base1, gens1, 1)
if not list_free_indices[i]:
no_free.append(list(range(size - 2, size - 2 + num_indices)))
size += num_indices
nr = size - 2
res_gens = [h for h in res_gens if h._array_form != id_af]
# if sym there are no commuting tensors stop here
if sym is None or not no_free:
if not res_gens:
res_gens = [_af_new(id_af)]
return size, res_base, res_gens
# if the component tensors have moinimal BSGS, so is their direct
# product P; the slot symmetry group is S = P*C, where C is the group
# to (anti)commute the component tensors with no free indices
# a stabilizer has the property S_i = P_i*C_i;
# the BSGS of P*C has SGS_P + SGS_C and the base is
# the ordered union of the bases of P and C.
# If P has minimal BSGS, so has S with this base.
base_comm = []
for i in range(len(no_free) - 1):
ind1 = no_free[i]
ind2 = no_free[i + 1]
a = list(range(ind1[0]))
a.extend(ind2)
a.extend(ind1)
base_comm.append(ind1[0])
a.extend(list(range(ind2[-1] + 1, nr)))
if sym == 0:
a.extend([nr, nr + 1])
else:
a.extend([nr + 1, nr])
res_gens.append(_af_new(a))
res_base = list(res_base)
# each base is ordered; order the union of the two bases
for i in base_comm:
if i not in res_base:
res_base.append(i)
res_base.sort()
if not res_gens:
res_gens = [_af_new(id_af)]
return size, res_base, res_gens
def gens_products(*v):
"""
Returns size, res_base, res_gens BSGS for n tensors of different types.
Explanation
===========
v is a sequence of (base_i, gens_i, free_i, sym_i)
where
base_i, gens_i BSGS of tensor of type `i`
free_i list of the fixed slots for each of the tensors
of type `i`; if there are `n_i` tensors of type `i`
and none of them have fixed slots, `free = [[]]*n_i`
sym 0 (1) if the tensors of type `i` (anti)commute among themselves
Examples
========
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, gens_products
>>> base, gens = get_symmetric_group_sgs(2)
>>> gens_products((base, gens, [[], []], 0))
(6, [0, 2], [(5)(0 1), (5)(2 3), (5)(0 2)(1 3)])
>>> gens_products((base, gens, [[1], []], 0))
(6, [2], [(5)(2 3)])
"""
res_size, res_base, res_gens = tensor_gens(*v[0])
for i in range(1, len(v)):
size, base, gens = tensor_gens(*v[i])
res_base, res_gens = bsgs_direct_product(res_base, res_gens, base,
gens, 1)
res_size = res_gens[0].size
id_af = list(range(res_size))
res_gens = [h for h in res_gens if h != id_af]
if not res_gens:
res_gens = [id_af]
return res_size, res_base, res_gens
|
983124cb193dc49745cc6023ce1966ab0e832a4f201a963a266ca914e1bd55f5 | from sympy.ntheory.primetest import isprime
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.printing.defaults import DefaultPrinting
from sympy.combinatorics.free_groups import free_group
class PolycyclicGroup(DefaultPrinting):
is_group = True
is_solvable = True
def __init__(self, pc_sequence, pc_series, relative_order, collector=None):
"""
Parameters
==========
pc_sequence : list
A sequence of elements whose classes generate the cyclic factor
groups of pc_series.
pc_series : list
A subnormal sequence of subgroups where each factor group is cyclic.
relative_order : list
The orders of factor groups of pc_series.
collector : Collector
By default, it is None. Collector class provides the
polycyclic presentation with various other functionalities.
"""
self.pcgs = pc_sequence
self.pc_series = pc_series
self.relative_order = relative_order
self.collector = Collector(self.pcgs, pc_series, relative_order) if not collector else collector
def is_prime_order(self):
return all(isprime(order) for order in self.relative_order)
def length(self):
return len(self.pcgs)
class Collector(DefaultPrinting):
"""
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 8.1.3
"""
def __init__(self, pcgs, pc_series, relative_order, free_group_=None, pc_presentation=None):
"""
Most of the parameters for the Collector class are the same as for PolycyclicGroup.
Others are described below.
Parameters
==========
free_group_ : tuple
free_group_ provides the mapping of polycyclic generating
sequence with the free group elements.
pc_presentation : dict
Provides the presentation of polycyclic groups with the
help of power and conjugate relators.
See Also
========
PolycyclicGroup
"""
self.pcgs = pcgs
self.pc_series = pc_series
self.relative_order = relative_order
self.free_group = free_group('x:{}'.format(len(pcgs)))[0] if not free_group_ else free_group_
self.index = {s: i for i, s in enumerate(self.free_group.symbols)}
self.pc_presentation = self.pc_relators()
def minimal_uncollected_subword(self, word):
r"""
Returns the minimal uncollected subwords.
Explanation
===========
A word ``v`` defined on generators in ``X`` is a minimal
uncollected subword of the word ``w`` if ``v`` is a subword
of ``w`` and it has one of the following form
* `v = {x_{i+1}}^{a_j}x_i`
* `v = {x_{i+1}}^{a_j}{x_i}^{-1}`
* `v = {x_i}^{a_j}`
for `a_j` not in `\{1, \ldots, s-1\}`. Where, ``s`` is the power
exponent of the corresponding generator.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x1, x2 = free_group("x1, x2")
>>> word = x2**2*x1**7
>>> collector.minimal_uncollected_subword(word)
((x2, 2),)
"""
# To handle the case word = <identity>
if not word:
return None
array = word.array_form
re = self.relative_order
index = self.index
for i in range(len(array)):
s1, e1 = array[i]
if re[index[s1]] and (e1 < 0 or e1 > re[index[s1]]-1):
return ((s1, e1), )
for i in range(len(array)-1):
s1, e1 = array[i]
s2, e2 = array[i+1]
if index[s1] > index[s2]:
e = 1 if e2 > 0 else -1
return ((s1, e1), (s2, e))
return None
def relations(self):
"""
Separates the given relators of pc presentation in power and
conjugate relations.
Returns
=======
(power_rel, conj_rel)
Separates pc presentation into power and conjugate relations.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> power_rel, conj_rel = collector.relations()
>>> power_rel
{x0**2: (), x1**3: ()}
>>> conj_rel
{x0**-1*x1*x0: x1**2}
See Also
========
pc_relators
"""
power_relators = {}
conjugate_relators = {}
for key, value in self.pc_presentation.items():
if len(key.array_form) == 1:
power_relators[key] = value
else:
conjugate_relators[key] = value
return power_relators, conjugate_relators
def subword_index(self, word, w):
"""
Returns the start and ending index of a given
subword in a word.
Parameters
==========
word : FreeGroupElement
word defined on free group elements for a
polycyclic group.
w : FreeGroupElement
subword of a given word, whose starting and
ending index to be computed.
Returns
=======
(i, j)
A tuple containing starting and ending index of ``w``
in the given word.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x1, x2 = free_group("x1, x2")
>>> word = x2**2*x1**7
>>> w = x2**2*x1
>>> collector.subword_index(word, w)
(0, 3)
>>> w = x1**7
>>> collector.subword_index(word, w)
(2, 9)
"""
low = -1
high = -1
for i in range(len(word)-len(w)+1):
if word.subword(i, i+len(w)) == w:
low = i
high = i+len(w)
break
if low == high == -1:
return -1, -1
return low, high
def map_relation(self, w):
"""
Return a conjugate relation.
Explanation
===========
Given a word formed by two free group elements, the
corresponding conjugate relation with those free
group elements is formed and mapped with the collected
word in the polycyclic presentation.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x0, x1 = free_group("x0, x1")
>>> w = x1*x0
>>> collector.map_relation(w)
x1**2
See Also
========
pc_presentation
"""
array = w.array_form
s1 = array[0][0]
s2 = array[1][0]
key = ((s2, -1), (s1, 1), (s2, 1))
key = self.free_group.dtype(key)
return self.pc_presentation[key]
def collected_word(self, word):
r"""
Return the collected form of a word.
Explanation
===========
A word ``w`` is called collected, if `w = {x_{i_1}}^{a_1} * \ldots *
{x_{i_r}}^{a_r}` with `i_1 < i_2< \ldots < i_r` and `a_j` is in
`\{1, \ldots, {s_j}-1\}`.
Otherwise w is uncollected.
Parameters
==========
word : FreeGroupElement
An uncollected word.
Returns
=======
word
A collected word of form `w = {x_{i_1}}^{a_1}, \ldots,
{x_{i_r}}^{a_r}` with `i_1, i_2, \ldots, i_r` and `a_j \in
\{1, \ldots, {s_j}-1\}`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x0, x1, x2, x3 = free_group("x0, x1, x2, x3")
>>> word = x3*x2*x1*x0
>>> collected_word = collector.collected_word(word)
>>> free_to_perm = {}
>>> free_group = collector.free_group
>>> for sym, gen in zip(free_group.symbols, collector.pcgs):
... free_to_perm[sym] = gen
>>> G1 = PermutationGroup()
>>> for w in word:
... sym = w[0]
... perm = free_to_perm[sym]
... G1 = PermutationGroup([perm] + G1.generators)
>>> G2 = PermutationGroup()
>>> for w in collected_word:
... sym = w[0]
... perm = free_to_perm[sym]
... G2 = PermutationGroup([perm] + G2.generators)
>>> G1 == G2
True
See Also
========
minimal_uncollected_subword
"""
free_group = self.free_group
while True:
w = self.minimal_uncollected_subword(word)
if not w:
break
low, high = self.subword_index(word, free_group.dtype(w))
if low == -1:
continue
s1, e1 = w[0]
if len(w) == 1:
re = self.relative_order[self.index[s1]]
q = e1 // re
r = e1-q*re
key = ((w[0][0], re), )
key = free_group.dtype(key)
if self.pc_presentation[key]:
presentation = self.pc_presentation[key].array_form
sym, exp = presentation[0]
word_ = ((w[0][0], r), (sym, q*exp))
word_ = free_group.dtype(word_)
else:
if r != 0:
word_ = ((w[0][0], r), )
word_ = free_group.dtype(word_)
else:
word_ = None
word = word.eliminate_word(free_group.dtype(w), word_)
if len(w) == 2 and w[1][1] > 0:
s2, e2 = w[1]
s2 = ((s2, 1), )
s2 = free_group.dtype(s2)
word_ = self.map_relation(free_group.dtype(w))
word_ = s2*word_**e1
word_ = free_group.dtype(word_)
word = word.substituted_word(low, high, word_)
elif len(w) == 2 and w[1][1] < 0:
s2, e2 = w[1]
s2 = ((s2, 1), )
s2 = free_group.dtype(s2)
word_ = self.map_relation(free_group.dtype(w))
word_ = s2**-1*word_**e1
word_ = free_group.dtype(word_)
word = word.substituted_word(low, high, word_)
return word
def pc_relators(self):
r"""
Return the polycyclic presentation.
Explanation
===========
There are two types of relations used in polycyclic
presentation.
* ``Power relations`` : Power relators are of the form `x_i^{re_i}`,
where `i \in \{0, \ldots, \mathrm{len(pcgs)}\}`, ``x`` represents polycyclic
generator and ``re`` is the corresponding relative order.
* ``Conjugate relations`` : Conjugate relators are of the form `x_j^-1x_ix_j`,
where `j < i \in \{0, \ldots, \mathrm{len(pcgs)}\}`.
Returns
=======
A dictionary with power and conjugate relations as key and
their collected form as corresponding values.
Notes
=====
Identity Permutation is mapped with empty ``()``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> S = SymmetricGroup(49).sylow_subgroup(7)
>>> der = S.derived_series()
>>> G = der[len(der)-2]
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> pcgs = PcGroup.pcgs
>>> len(pcgs)
6
>>> free_group = collector.free_group
>>> pc_resentation = collector.pc_presentation
>>> free_to_perm = {}
>>> for s, g in zip(free_group.symbols, pcgs):
... free_to_perm[s] = g
>>> for k, v in pc_resentation.items():
... k_array = k.array_form
... if v != ():
... v_array = v.array_form
... lhs = Permutation()
... for gen in k_array:
... s = gen[0]
... e = gen[1]
... lhs = lhs*free_to_perm[s]**e
... if v == ():
... assert lhs.is_identity
... continue
... rhs = Permutation()
... for gen in v_array:
... s = gen[0]
... e = gen[1]
... rhs = rhs*free_to_perm[s]**e
... assert lhs == rhs
"""
free_group = self.free_group
rel_order = self.relative_order
pc_relators = {}
perm_to_free = {}
pcgs = self.pcgs
for gen, s in zip(pcgs, free_group.generators):
perm_to_free[gen**-1] = s**-1
perm_to_free[gen] = s
pcgs = pcgs[::-1]
series = self.pc_series[::-1]
rel_order = rel_order[::-1]
collected_gens = []
for i, gen in enumerate(pcgs):
re = rel_order[i]
relation = perm_to_free[gen]**re
G = series[i]
l = G.generator_product(gen**re, original = True)
l.reverse()
word = free_group.identity
for g in l:
word = word*perm_to_free[g]
word = self.collected_word(word)
pc_relators[relation] = word if word else ()
self.pc_presentation = pc_relators
collected_gens.append(gen)
if len(collected_gens) > 1:
conj = collected_gens[len(collected_gens)-1]
conjugator = perm_to_free[conj]
for j in range(len(collected_gens)-1):
conjugated = perm_to_free[collected_gens[j]]
relation = conjugator**-1*conjugated*conjugator
gens = conj**-1*collected_gens[j]*conj
l = G.generator_product(gens, original = True)
l.reverse()
word = free_group.identity
for g in l:
word = word*perm_to_free[g]
word = self.collected_word(word)
pc_relators[relation] = word if word else ()
self.pc_presentation = pc_relators
return pc_relators
def exponent_vector(self, element):
r"""
Return the exponent vector of length equal to the
length of polycyclic generating sequence.
Explanation
===========
For a given generator/element ``g`` of the polycyclic group,
it can be represented as `g = {x_1}^{e_1}, \ldots, {x_n}^{e_n}`,
where `x_i` represents polycyclic generators and ``n`` is
the number of generators in the free_group equal to the length
of pcgs.
Parameters
==========
element : Permutation
Generator of a polycyclic group.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> pcgs = PcGroup.pcgs
>>> collector.exponent_vector(G[0])
[1, 0, 0, 0]
>>> exp = collector.exponent_vector(G[1])
>>> g = Permutation()
>>> for i in range(len(exp)):
... g = g*pcgs[i]**exp[i] if exp[i] else g
>>> assert g == G[1]
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 8.1.1, Definition 8.4
"""
free_group = self.free_group
G = PermutationGroup()
for g in self.pcgs:
G = PermutationGroup([g] + G.generators)
gens = G.generator_product(element, original = True)
gens.reverse()
perm_to_free = {}
for sym, g in zip(free_group.generators, self.pcgs):
perm_to_free[g**-1] = sym**-1
perm_to_free[g] = sym
w = free_group.identity
for g in gens:
w = w*perm_to_free[g]
word = self.collected_word(w)
index = self.index
exp_vector = [0]*len(free_group)
word = word.array_form
for t in word:
exp_vector[index[t[0]]] = t[1]
return exp_vector
def depth(self, element):
r"""
Return the depth of a given element.
Explanation
===========
The depth of a given element ``g`` is defined by
`\mathrm{dep}[g] = i` if `e_1 = e_2 = \ldots = e_{i-1} = 0`
and `e_i != 0`, where ``e`` represents the exponent-vector.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> collector.depth(G[0])
2
>>> collector.depth(G[1])
1
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 8.1.1, Definition 8.5
"""
exp_vector = self.exponent_vector(element)
return next((i+1 for i, x in enumerate(exp_vector) if x), len(self.pcgs)+1)
def leading_exponent(self, element):
r"""
Return the leading non-zero exponent.
Explanation
===========
The leading exponent for a given element `g` is defined
by `\mathrm{leading\_exponent}[g]` `= e_i`, if `\mathrm{depth}[g] = i`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> collector.leading_exponent(G[1])
1
"""
exp_vector = self.exponent_vector(element)
depth = self.depth(element)
if depth != len(self.pcgs)+1:
return exp_vector[depth-1]
return None
def _sift(self, z, g):
h = g
d = self.depth(h)
while d < len(self.pcgs) and z[d-1] != 1:
k = z[d-1]
e = self.leading_exponent(h)*(self.leading_exponent(k))**-1
e = e % self.relative_order[d-1]
h = k**-e*h
d = self.depth(h)
return h
def induced_pcgs(self, gens):
"""
Parameters
==========
gens : list
A list of generators on which polycyclic subgroup
is to be defined.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(8)
>>> G = S.sylow_subgroup(2)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> gens = [G[0], G[1]]
>>> ipcgs = collector.induced_pcgs(gens)
>>> [gen.order() for gen in ipcgs]
[2, 2, 2]
>>> G = S.sylow_subgroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> gens = [G[0], G[1]]
>>> ipcgs = collector.induced_pcgs(gens)
>>> [gen.order() for gen in ipcgs]
[3]
"""
z = [1]*len(self.pcgs)
G = gens
while G:
g = G.pop(0)
h = self._sift(z, g)
d = self.depth(h)
if d < len(self.pcgs):
for gen in z:
if gen != 1:
G.append(h**-1*gen**-1*h*gen)
z[d-1] = h;
z = [gen for gen in z if gen != 1]
return z
def constructive_membership_test(self, ipcgs, g):
"""
Return the exponent vector for induced pcgs.
"""
e = [0]*len(ipcgs)
h = g
d = self.depth(h)
for i, gen in enumerate(ipcgs):
while self.depth(gen) == d:
f = self.leading_exponent(h)*self.leading_exponent(gen)
f = f % self.relative_order[d-1]
h = gen**(-f)*h
e[i] = f
d = self.depth(h)
if h == 1:
return e
return False
|
dd1a32fa56b3b05127cfe65f58fe0e20ed1227351603e30b7f14d2cca6754eec | from sympy.combinatorics import Permutation as Perm
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.core import Basic, Tuple, default_sort_key
from sympy.sets import FiniteSet
from sympy.utilities.iterables import (minlex, unflatten, flatten)
from sympy.utilities.misc import as_int
rmul = Perm.rmul
class Polyhedron(Basic):
"""
Represents the polyhedral symmetry group (PSG).
Explanation
===========
The PSG is one of the symmetry groups of the Platonic solids.
There are three polyhedral groups: the tetrahedral group
of order 12, the octahedral group of order 24, and the
icosahedral group of order 60.
All doctests have been given in the docstring of the
constructor of the object.
References
==========
.. [1] http://mathworld.wolfram.com/PolyhedralGroup.html
"""
_edges = None
def __new__(cls, corners, faces=(), pgroup=()):
"""
The constructor of the Polyhedron group object.
Explanation
===========
It takes up to three parameters: the corners, faces, and
allowed transformations.
The corners/vertices are entered as a list of arbitrary
expressions that are used to identify each vertex.
The faces are entered as a list of tuples of indices; a tuple
of indices identifies the vertices which define the face. They
should be entered in a cw or ccw order; they will be standardized
by reversal and rotation to be give the lowest lexical ordering.
If no faces are given then no edges will be computed.
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> Polyhedron(list('abc'), [(1, 2, 0)]).faces
{(0, 1, 2)}
>>> Polyhedron(list('abc'), [(1, 0, 2)]).faces
{(0, 1, 2)}
The allowed transformations are entered as allowable permutations
of the vertices for the polyhedron. Instance of Permutations
(as with faces) should refer to the supplied vertices by index.
These permutation are stored as a PermutationGroup.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy import init_printing
>>> from sympy.abc import w, x, y, z
>>> init_printing(pretty_print=False, perm_cyclic=False)
Here we construct the Polyhedron object for a tetrahedron.
>>> corners = [w, x, y, z]
>>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)]
Next, allowed transformations of the polyhedron must be given. This
is given as permutations of vertices.
Although the vertices of a tetrahedron can be numbered in 24 (4!)
different ways, there are only 12 different orientations for a
physical tetrahedron. The following permutations, applied once or
twice, will generate all 12 of the orientations. (The identity
permutation, Permutation(range(4)), is not included since it does
not change the orientation of the vertices.)
>>> pgroup = [Permutation([[0, 1, 2], [3]]), \
Permutation([[0, 1, 3], [2]]), \
Permutation([[0, 2, 3], [1]]), \
Permutation([[1, 2, 3], [0]]), \
Permutation([[0, 1], [2, 3]]), \
Permutation([[0, 2], [1, 3]]), \
Permutation([[0, 3], [1, 2]])]
The Polyhedron is now constructed and demonstrated:
>>> tetra = Polyhedron(corners, faces, pgroup)
>>> tetra.size
4
>>> tetra.edges
{(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)}
>>> tetra.corners
(w, x, y, z)
It can be rotated with an arbitrary permutation of vertices, e.g.
the following permutation is not in the pgroup:
>>> tetra.rotate(Permutation([0, 1, 3, 2]))
>>> tetra.corners
(w, x, z, y)
An allowed permutation of the vertices can be constructed by
repeatedly applying permutations from the pgroup to the vertices.
Here is a demonstration that applying p and p**2 for every p in
pgroup generates all the orientations of a tetrahedron and no others:
>>> all = ( (w, x, y, z), \
(x, y, w, z), \
(y, w, x, z), \
(w, z, x, y), \
(z, w, y, x), \
(w, y, z, x), \
(y, z, w, x), \
(x, z, y, w), \
(z, y, x, w), \
(y, x, z, w), \
(x, w, z, y), \
(z, x, w, y) )
>>> got = []
>>> for p in (pgroup + [p**2 for p in pgroup]):
... h = Polyhedron(corners)
... h.rotate(p)
... got.append(h.corners)
...
>>> set(got) == set(all)
True
The make_perm method of a PermutationGroup will randomly pick
permutations, multiply them together, and return the permutation that
can be applied to the polyhedron to give the orientation produced
by those individual permutations.
Here, 3 permutations are used:
>>> tetra.pgroup.make_perm(3) # doctest: +SKIP
Permutation([0, 3, 1, 2])
To select the permutations that should be used, supply a list
of indices to the permutations in pgroup in the order they should
be applied:
>>> use = [0, 0, 2]
>>> p002 = tetra.pgroup.make_perm(3, use)
>>> p002
Permutation([1, 0, 3, 2])
Apply them one at a time:
>>> tetra.reset()
>>> for i in use:
... tetra.rotate(pgroup[i])
...
>>> tetra.vertices
(x, w, z, y)
>>> sequentially = tetra.vertices
Apply the composite permutation:
>>> tetra.reset()
>>> tetra.rotate(p002)
>>> tetra.corners
(x, w, z, y)
>>> tetra.corners in all and tetra.corners == sequentially
True
Notes
=====
Defining permutation groups
---------------------------
It is not necessary to enter any permutations, nor is necessary to
enter a complete set of transformations. In fact, for a polyhedron,
all configurations can be constructed from just two permutations.
For example, the orientations of a tetrahedron can be generated from
an axis passing through a vertex and face and another axis passing
through a different vertex or from an axis passing through the
midpoints of two edges opposite of each other.
For simplicity of presentation, consider a square --
not a cube -- with vertices 1, 2, 3, and 4:
1-----2 We could think of axes of rotation being:
| | 1) through the face
| | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4
3-----4 3) lines 1-4 or 2-3
To determine how to write the permutations, imagine 4 cameras,
one at each corner, labeled A-D:
A B A B
1-----2 1-----3 vertex index:
| | | | 1 0
| | | | 2 1
3-----4 2-----4 3 2
C D C D 4 3
original after rotation
along 1-4
A diagonal and a face axis will be chosen for the "permutation group"
from which any orientation can be constructed.
>>> pgroup = []
Imagine a clockwise rotation when viewing 1-4 from camera A. The new
orientation is (in camera-order): 1, 3, 2, 4 so the permutation is
given using the *indices* of the vertices as:
>>> pgroup.append(Permutation((0, 2, 1, 3)))
Now imagine rotating clockwise when looking down an axis entering the
center of the square as viewed. The new camera-order would be
3, 1, 4, 2 so the permutation is (using indices):
>>> pgroup.append(Permutation((2, 0, 3, 1)))
The square can now be constructed:
** use real-world labels for the vertices, entering them in
camera order
** for the faces we use zero-based indices of the vertices
in *edge-order* as the face is traversed; neither the
direction nor the starting point matter -- the faces are
only used to define edges (if so desired).
>>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup)
To rotate the square with a single permutation we can do:
>>> square.rotate(square.pgroup[0])
>>> square.corners
(1, 3, 2, 4)
To use more than one permutation (or to use one permutation more
than once) it is more convenient to use the make_perm method:
>>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations
>>> square.reset() # return to initial orientation
>>> square.rotate(p011)
>>> square.corners
(4, 2, 3, 1)
Thinking outside the box
------------------------
Although the Polyhedron object has a direct physical meaning, it
actually has broader application. In the most general sense it is
just a decorated PermutationGroup, allowing one to connect the
permutations to something physical. For example, a Rubik's cube is
not a proper polyhedron, but the Polyhedron class can be used to
represent it in a way that helps to visualize the Rubik's cube.
>>> from sympy import flatten, unflatten, symbols
>>> from sympy.combinatorics import RubikGroup
>>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD'])
>>> def show():
... pairs = unflatten(r2.corners, 2)
... print(pairs[::2])
... print(pairs[1::2])
...
>>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2))
>>> show()
[(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)]
[(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)]
>>> r2.rotate(0) # cw rotation of F
>>> show()
[(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)]
[(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)]
Predefined Polyhedra
====================
For convenience, the vertices and faces are defined for the following
standard solids along with a permutation group for transformations.
When the polyhedron is oriented as indicated below, the vertices in
a given horizontal plane are numbered in ccw direction, starting from
the vertex that will give the lowest indices in a given face. (In the
net of the vertices, indices preceded by "-" indicate replication of
the lhs index in the net.)
tetrahedron, tetrahedron_faces
------------------------------
4 vertices (vertex up) net:
0 0-0
1 2 3-1
4 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 1) (1, 2, 3)
cube, cube_faces
----------------
8 vertices (face up) net:
0 1 2 3-0
4 5 6 7-4
6 faces:
(0, 1, 2, 3)
(0, 1, 5, 4) (1, 2, 6, 5) (2, 3, 7, 6) (0, 3, 7, 4)
(4, 5, 6, 7)
octahedron, octahedron_faces
----------------------------
6 vertices (vertex up) net:
0 0 0-0
1 2 3 4-1
5 5 5-5
8 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 1, 4)
(1, 2, 5) (2, 3, 5) (3, 4, 5) (1, 4, 5)
dodecahedron, dodecahedron_faces
--------------------------------
20 vertices (vertex up) net:
0 1 2 3 4 -0
5 6 7 8 9 -5
14 10 11 12 13-14
15 16 17 18 19-15
12 faces:
(0, 1, 2, 3, 4) (0, 1, 6, 10, 5) (1, 2, 7, 11, 6)
(2, 3, 8, 12, 7) (3, 4, 9, 13, 8) (0, 4, 9, 14, 5)
(5, 10, 16, 15, 14) (6, 10, 16, 17, 11) (7, 11, 17, 18, 12)
(8, 12, 18, 19, 13) (9, 13, 19, 15, 14)(15, 16, 17, 18, 19)
icosahedron, icosahedron_faces
------------------------------
12 vertices (face up) net:
0 0 0 0 -0
1 2 3 4 5 -1
6 7 8 9 10 -6
11 11 11 11 -11
20 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4)
(0, 4, 5) (0, 1, 5) (1, 2, 6)
(2, 3, 7) (3, 4, 8) (4, 5, 9)
(1, 5, 10) (2, 6, 7) (3, 7, 8)
(4, 8, 9) (5, 9, 10) (1, 6, 10)
(6, 7, 11) (7, 8, 11) (8, 9, 11)
(9, 10, 11) (6, 10, 11)
>>> from sympy.combinatorics.polyhedron import cube
>>> cube.edges
{(0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (2, 3), (2, 6), (3, 7), (4, 5), (4, 7), (5, 6), (6, 7)}
If you want to use letters or other names for the corners you
can still use the pre-calculated faces:
>>> corners = list('abcdefgh')
>>> Polyhedron(corners, cube.faces).corners
(a, b, c, d, e, f, g, h)
References
==========
.. [1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf
"""
faces = [minlex(f, directed=False, key=default_sort_key) for f in faces]
corners, faces, pgroup = args = \
[Tuple(*a) for a in (corners, faces, pgroup)]
obj = Basic.__new__(cls, *args)
obj._corners = tuple(corners) # in order given
obj._faces = FiniteSet(*faces)
if pgroup and pgroup[0].size != len(corners):
raise ValueError("Permutation size unequal to number of corners.")
# use the identity permutation if none are given
obj._pgroup = PermutationGroup(
pgroup or [Perm(range(len(corners)))] )
return obj
@property
def corners(self):
"""
Get the corners of the Polyhedron.
The method ``vertices`` is an alias for ``corners``.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c, d
>>> p = Polyhedron(list('abcd'))
>>> p.corners == p.vertices == (a, b, c, d)
True
See Also
========
array_form, cyclic_form
"""
return self._corners
vertices = corners
@property
def array_form(self):
"""Return the indices of the corners.
The indices are given relative to the original position of corners.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron
>>> tetrahedron = tetrahedron.copy()
>>> tetrahedron.array_form
[0, 1, 2, 3]
>>> tetrahedron.rotate(0)
>>> tetrahedron.array_form
[0, 2, 3, 1]
>>> tetrahedron.pgroup[0].array_form
[0, 2, 3, 1]
See Also
========
corners, cyclic_form
"""
corners = list(self.args[0])
return [corners.index(c) for c in self.corners]
@property
def cyclic_form(self):
"""Return the indices of the corners in cyclic notation.
The indices are given relative to the original position of corners.
See Also
========
corners, array_form
"""
return Perm._af_new(self.array_form).cyclic_form
@property
def size(self):
"""
Get the number of corners of the Polyhedron.
"""
return len(self._corners)
@property
def faces(self):
"""
Get the faces of the Polyhedron.
"""
return self._faces
@property
def pgroup(self):
"""
Get the permutations of the Polyhedron.
"""
return self._pgroup
@property
def edges(self):
"""
Given the faces of the polyhedra we can get the edges.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c
>>> corners = (a, b, c)
>>> faces = [(0, 1, 2)]
>>> Polyhedron(corners, faces).edges
{(0, 1), (0, 2), (1, 2)}
"""
if self._edges is None:
output = set()
for face in self.faces:
for i in range(len(face)):
edge = tuple(sorted([face[i], face[i - 1]]))
output.add(edge)
self._edges = FiniteSet(*output)
return self._edges
def rotate(self, perm):
"""
Apply a permutation to the polyhedron *in place*. The permutation
may be given as a Permutation instance or an integer indicating
which permutation from pgroup of the Polyhedron should be
applied.
This is an operation that is analogous to rotation about
an axis by a fixed increment.
Notes
=====
When a Permutation is applied, no check is done to see if that
is a valid permutation for the Polyhedron. For example, a cube
could be given a permutation which effectively swaps only 2
vertices. A valid permutation (that rotates the object in a
physical way) will be obtained if one only uses
permutations from the ``pgroup`` of the Polyhedron. On the other
hand, allowing arbitrary rotations (applications of permutations)
gives a way to follow named elements rather than indices since
Polyhedron allows vertices to be named while Permutation works
only with indices.
Examples
========
>>> from sympy.combinatorics import Polyhedron, Permutation
>>> from sympy.combinatorics.polyhedron import cube
>>> cube = cube.copy()
>>> cube.corners
(0, 1, 2, 3, 4, 5, 6, 7)
>>> cube.rotate(0)
>>> cube.corners
(1, 2, 3, 0, 5, 6, 7, 4)
A non-physical "rotation" that is not prohibited by this method:
>>> cube.reset()
>>> cube.rotate(Permutation([[1, 2]], size=8))
>>> cube.corners
(0, 2, 1, 3, 4, 5, 6, 7)
Polyhedron can be used to follow elements of set that are
identified by letters instead of integers:
>>> shadow = h5 = Polyhedron(list('abcde'))
>>> p = Permutation([3, 0, 1, 2, 4])
>>> h5.rotate(p)
>>> h5.corners
(d, a, b, c, e)
>>> _ == shadow.corners
True
>>> copy = h5.copy()
>>> h5.rotate(p)
>>> h5.corners == copy.corners
False
"""
if not isinstance(perm, Perm):
perm = self.pgroup[perm]
# and we know it's valid
else:
if perm.size != self.size:
raise ValueError('Polyhedron and Permutation sizes differ.')
a = perm.array_form
corners = [self.corners[a[i]] for i in range(len(self.corners))]
self._corners = tuple(corners)
def reset(self):
"""Return corners to their original positions.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron as T
>>> T = T.copy()
>>> T.corners
(0, 1, 2, 3)
>>> T.rotate(0)
>>> T.corners
(0, 2, 3, 1)
>>> T.reset()
>>> T.corners
(0, 1, 2, 3)
"""
self._corners = self.args[0]
def _pgroup_calcs():
"""Return the permutation groups for each of the polyhedra and the face
definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces,
icosahedron_faces
Explanation
===========
(This author didn't find and didn't know of a better way to do it though
there likely is such a way.)
Although only 2 permutations are needed for a polyhedron in order to
generate all the possible orientations, a group of permutations is
provided instead. A set of permutations is called a "group" if::
a*b = c (for any pair of permutations in the group, a and b, their
product, c, is in the group)
a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds)
there is an identity permutation, I, such that I*a = a*I for all elements
in the group
a*b = I (the inverse of each permutation is also in the group)
None of the polyhedron groups defined follow these definitions of a group.
Instead, they are selected to contain those permutations whose powers
alone will construct all orientations of the polyhedron, i.e. for
permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``,
``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of
permutation ``i``) generate all permutations of the polyhedron instead of
mixed products like ``a*b``, ``a*b**2``, etc....
Note that for a polyhedron with n vertices, the valid permutations of the
vertices exclude those that do not maintain its faces. e.g. the
permutation BCDE of a square's four corners, ABCD, is a valid
permutation while CBDE is not (because this would twist the square).
Examples
========
The is_group checks for: closure, the presence of the Identity permutation,
and the presence of the inverse for each of the elements in the group. This
confirms that none of the polyhedra are true groups:
>>> from sympy.combinatorics.polyhedron import (
... tetrahedron, cube, octahedron, dodecahedron, icosahedron)
...
>>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron)
>>> [h.pgroup.is_group for h in polyhedra]
...
[True, True, True, True, True]
Although tests in polyhedron's test suite check that powers of the
permutations in the groups generate all permutations of the vertices
of the polyhedron, here we also demonstrate the powers of the given
permutations create a complete group for the tetrahedron:
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> for h in polyhedra[:1]:
... G = h.pgroup
... perms = set()
... for g in G:
... for e in range(g.order()):
... p = tuple((g**e).array_form)
... perms.add(p)
...
... perms = [Permutation(p) for p in perms]
... assert PermutationGroup(perms).is_group
In addition to doing the above, the tests in the suite confirm that the
faces are all present after the application of each permutation.
References
==========
.. [1] http://dogschool.tripod.com/trianglegroup.html
"""
def _pgroup_of_double(polyh, ordered_faces, pgroup):
n = len(ordered_faces[0])
# the vertices of the double which sits inside a give polyhedron
# can be found by tracking the faces of the outer polyhedron.
# A map between face and the vertex of the double is made so that
# after rotation the position of the vertices can be located
fmap = dict(zip(ordered_faces,
range(len(ordered_faces))))
flat_faces = flatten(ordered_faces)
new_pgroup = []
for i, p in enumerate(pgroup):
h = polyh.copy()
h.rotate(p)
c = h.corners
# reorder corners in the order they should appear when
# enumerating the faces
reorder = unflatten([c[j] for j in flat_faces], n)
# make them canonical
reorder = [tuple(map(as_int,
minlex(f, directed=False)))
for f in reorder]
# map face to vertex: the resulting list of vertices are the
# permutation that we seek for the double
new_pgroup.append(Perm([fmap[f] for f in reorder]))
return new_pgroup
tetrahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3
(1, 2, 3), # bottom
]
# cw from top
#
_t_pgroup = [
Perm([[1, 2, 3], [0]]), # cw from top
Perm([[0, 1, 2], [3]]), # cw from front face
Perm([[0, 3, 2], [1]]), # cw from back right face
Perm([[0, 3, 1], [2]]), # cw from back left face
Perm([[0, 1], [2, 3]]), # through front left edge
Perm([[0, 2], [1, 3]]), # through front right edge
Perm([[0, 3], [1, 2]]), # through back edge
]
tetrahedron = Polyhedron(
range(4),
tetrahedron_faces,
_t_pgroup)
cube_faces = [
(0, 1, 2, 3), # upper
(0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4
(4, 5, 6, 7), # lower
]
# U, D, F, B, L, R = up, down, front, back, left, right
_c_pgroup = [Perm(p) for p in
[
[1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U
[4, 0, 3, 7, 5, 1, 2, 6], # cw from F face
[4, 5, 1, 0, 7, 6, 2, 3], # cw from R face
[1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge
[6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge
[6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge
[3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge
[4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge
[6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge
[0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex
[5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex
[5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex
[7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL
]]
cube = Polyhedron(
range(8),
cube_faces,
_c_pgroup)
octahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4
(1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4
]
octahedron = Polyhedron(
range(6),
octahedron_faces,
_pgroup_of_double(cube, cube_faces, _c_pgroup))
dodecahedron_faces = [
(0, 1, 2, 3, 4), # top
(0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5
(3, 4, 9, 13, 8), (0, 4, 9, 14, 5),
(5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18,
12), # lower 5
(8, 12, 18, 19, 13), (9, 13, 19, 15, 14),
(15, 16, 17, 18, 19) # bottom
]
def _string_to_perm(s):
rv = [Perm(range(20))]
p = None
for si in s:
if si not in '01':
count = int(si) - 1
else:
count = 1
if si == '0':
p = _f0
elif si == '1':
p = _f1
rv.extend([p]*count)
return Perm.rmul(*rv)
# top face cw
_f0 = Perm([
1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11,
12, 13, 14, 10, 16, 17, 18, 19, 15])
# front face cw
_f1 = Perm([
5, 0, 4, 9, 14, 10, 1, 3, 13, 15,
6, 2, 8, 19, 16, 17, 11, 7, 12, 18])
# the strings below, like 0104 are shorthand for F0*F1*F0**4 and are
# the remaining 4 face rotations, 15 edge permutations, and the
# 10 vertex rotations.
_dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in '''
0104 140 014 0410
010 1403 03104 04103 102
120 1304 01303 021302 03130
0412041 041204103 04120410 041204104 041204102
10 01 1402 0140 04102 0412 1204 1302 0130 03120'''.strip().split()]
dodecahedron = Polyhedron(
range(20),
dodecahedron_faces,
_dodeca_pgroup)
icosahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 5), (0, 1, 5),
(1, 6, 7), (1, 2, 7), (2, 7, 8), (2, 3, 8), (3, 8, 9),
(3, 4, 9), (4, 9, 10), (4, 5, 10), (5, 6, 10), (1, 5, 6),
(6, 7, 11), (7, 8, 11), (8, 9, 11), (9, 10, 11), (6, 10, 11)]
icosahedron = Polyhedron(
range(12),
icosahedron_faces,
_pgroup_of_double(
dodecahedron, dodecahedron_faces, _dodeca_pgroup))
return (tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces,
dodecahedron_faces, icosahedron_faces)
# -----------------------------------------------------------------------
# Standard Polyhedron groups
#
# These are generated using _pgroup_calcs() above. However to save
# import time we encode them explicitly here.
# -----------------------------------------------------------------------
tetrahedron = Polyhedron(
Tuple(0, 1, 2, 3),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 1, 3),
Tuple(1, 2, 3)),
Tuple(
Perm(1, 2, 3),
Perm(3)(0, 1, 2),
Perm(0, 3, 2),
Perm(0, 3, 1),
Perm(0, 1)(2, 3),
Perm(0, 2)(1, 3),
Perm(0, 3)(1, 2)
))
cube = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7),
Tuple(
Tuple(0, 1, 2, 3),
Tuple(0, 1, 5, 4),
Tuple(1, 2, 6, 5),
Tuple(2, 3, 7, 6),
Tuple(0, 3, 7, 4),
Tuple(4, 5, 6, 7)),
Tuple(
Perm(0, 1, 2, 3)(4, 5, 6, 7),
Perm(0, 4, 5, 1)(2, 3, 7, 6),
Perm(0, 4, 7, 3)(1, 5, 6, 2),
Perm(0, 1)(2, 4)(3, 5)(6, 7),
Perm(0, 6)(1, 2)(3, 5)(4, 7),
Perm(0, 6)(1, 7)(2, 3)(4, 5),
Perm(0, 3)(1, 7)(2, 4)(5, 6),
Perm(0, 4)(1, 7)(2, 6)(3, 5),
Perm(0, 6)(1, 5)(2, 4)(3, 7),
Perm(1, 3, 4)(2, 7, 5),
Perm(7)(0, 5, 2)(3, 4, 6),
Perm(0, 5, 7)(1, 6, 3),
Perm(0, 7, 2)(1, 4, 6)))
octahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 1, 4),
Tuple(1, 2, 5),
Tuple(2, 3, 5),
Tuple(3, 4, 5),
Tuple(1, 4, 5)),
Tuple(
Perm(5)(1, 2, 3, 4),
Perm(0, 4, 5, 2),
Perm(0, 1, 5, 3),
Perm(0, 1)(2, 4)(3, 5),
Perm(0, 2)(1, 3)(4, 5),
Perm(0, 3)(1, 5)(2, 4),
Perm(0, 4)(1, 3)(2, 5),
Perm(0, 5)(1, 4)(2, 3),
Perm(0, 5)(1, 2)(3, 4),
Perm(0, 4, 1)(2, 3, 5),
Perm(0, 1, 2)(3, 4, 5),
Perm(0, 2, 3)(1, 5, 4),
Perm(0, 4, 3)(1, 5, 2)))
dodecahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
Tuple(
Tuple(0, 1, 2, 3, 4),
Tuple(0, 1, 6, 10, 5),
Tuple(1, 2, 7, 11, 6),
Tuple(2, 3, 8, 12, 7),
Tuple(3, 4, 9, 13, 8),
Tuple(0, 4, 9, 14, 5),
Tuple(5, 10, 16, 15, 14),
Tuple(6, 10, 16, 17, 11),
Tuple(7, 11, 17, 18, 12),
Tuple(8, 12, 18, 19, 13),
Tuple(9, 13, 19, 15, 14),
Tuple(15, 16, 17, 18, 19)),
Tuple(
Perm(0, 1, 2, 3, 4)(5, 6, 7, 8, 9)(10, 11, 12, 13, 14)(15, 16, 17, 18, 19),
Perm(0, 5, 10, 6, 1)(2, 4, 14, 16, 11)(3, 9, 15, 17, 7)(8, 13, 19, 18, 12),
Perm(0, 10, 17, 12, 3)(1, 6, 11, 7, 2)(4, 5, 16, 18, 8)(9, 14, 15, 19, 13),
Perm(0, 6, 17, 19, 9)(1, 11, 18, 13, 4)(2, 7, 12, 8, 3)(5, 10, 16, 15, 14),
Perm(0, 2, 12, 19, 14)(1, 7, 18, 15, 5)(3, 8, 13, 9, 4)(6, 11, 17, 16, 10),
Perm(0, 4, 9, 14, 5)(1, 3, 13, 15, 10)(2, 8, 19, 16, 6)(7, 12, 18, 17, 11),
Perm(0, 1)(2, 5)(3, 10)(4, 6)(7, 14)(8, 16)(9, 11)(12, 15)(13, 17)(18, 19),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 12)(8, 10)(9, 17)(13, 16)(14, 18)(15, 19),
Perm(0, 12)(1, 8)(2, 3)(4, 7)(5, 18)(6, 13)(9, 11)(10, 19)(14, 17)(15, 16),
Perm(0, 8)(1, 13)(2, 9)(3, 4)(5, 12)(6, 19)(7, 14)(10, 18)(11, 15)(16, 17),
Perm(0, 4)(1, 9)(2, 14)(3, 5)(6, 13)(7, 15)(8, 10)(11, 19)(12, 16)(17, 18),
Perm(0, 5)(1, 14)(2, 15)(3, 16)(4, 10)(6, 9)(7, 19)(8, 17)(11, 13)(12, 18),
Perm(0, 11)(1, 6)(2, 10)(3, 16)(4, 17)(5, 7)(8, 15)(9, 18)(12, 14)(13, 19),
Perm(0, 18)(1, 12)(2, 7)(3, 11)(4, 17)(5, 19)(6, 8)(9, 16)(10, 13)(14, 15),
Perm(0, 18)(1, 19)(2, 13)(3, 8)(4, 12)(5, 17)(6, 15)(7, 9)(10, 16)(11, 14),
Perm(0, 13)(1, 19)(2, 15)(3, 14)(4, 9)(5, 8)(6, 18)(7, 16)(10, 12)(11, 17),
Perm(0, 16)(1, 15)(2, 19)(3, 18)(4, 17)(5, 10)(6, 14)(7, 13)(8, 12)(9, 11),
Perm(0, 18)(1, 17)(2, 16)(3, 15)(4, 19)(5, 12)(6, 11)(7, 10)(8, 14)(9, 13),
Perm(0, 15)(1, 19)(2, 18)(3, 17)(4, 16)(5, 14)(6, 13)(7, 12)(8, 11)(9, 10),
Perm(0, 17)(1, 16)(2, 15)(3, 19)(4, 18)(5, 11)(6, 10)(7, 14)(8, 13)(9, 12),
Perm(0, 19)(1, 18)(2, 17)(3, 16)(4, 15)(5, 13)(6, 12)(7, 11)(8, 10)(9, 14),
Perm(1, 4, 5)(2, 9, 10)(3, 14, 6)(7, 13, 16)(8, 15, 11)(12, 19, 17),
Perm(19)(0, 6, 2)(3, 5, 11)(4, 10, 7)(8, 14, 17)(9, 16, 12)(13, 15, 18),
Perm(0, 11, 8)(1, 7, 3)(4, 6, 12)(5, 17, 13)(9, 10, 18)(14, 16, 19),
Perm(0, 7, 13)(1, 12, 9)(2, 8, 4)(5, 11, 19)(6, 18, 14)(10, 17, 15),
Perm(0, 3, 9)(1, 8, 14)(2, 13, 5)(6, 12, 15)(7, 19, 10)(11, 18, 16),
Perm(0, 14, 10)(1, 9, 16)(2, 13, 17)(3, 19, 11)(4, 15, 6)(7, 8, 18),
Perm(0, 16, 7)(1, 10, 11)(2, 5, 17)(3, 14, 18)(4, 15, 12)(8, 9, 19),
Perm(0, 16, 13)(1, 17, 8)(2, 11, 12)(3, 6, 18)(4, 10, 19)(5, 15, 9),
Perm(0, 11, 15)(1, 17, 14)(2, 18, 9)(3, 12, 13)(4, 7, 19)(5, 6, 16),
Perm(0, 8, 15)(1, 12, 16)(2, 18, 10)(3, 19, 5)(4, 13, 14)(6, 7, 17)))
icosahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 4, 5),
Tuple(0, 1, 5),
Tuple(1, 6, 7),
Tuple(1, 2, 7),
Tuple(2, 7, 8),
Tuple(2, 3, 8),
Tuple(3, 8, 9),
Tuple(3, 4, 9),
Tuple(4, 9, 10),
Tuple(4, 5, 10),
Tuple(5, 6, 10),
Tuple(1, 5, 6),
Tuple(6, 7, 11),
Tuple(7, 8, 11),
Tuple(8, 9, 11),
Tuple(9, 10, 11),
Tuple(6, 10, 11)),
Tuple(
Perm(11)(1, 2, 3, 4, 5)(6, 7, 8, 9, 10),
Perm(0, 5, 6, 7, 2)(3, 4, 10, 11, 8),
Perm(0, 1, 7, 8, 3)(4, 5, 6, 11, 9),
Perm(0, 2, 8, 9, 4)(1, 7, 11, 10, 5),
Perm(0, 3, 9, 10, 5)(1, 2, 8, 11, 6),
Perm(0, 4, 10, 6, 1)(2, 3, 9, 11, 7),
Perm(0, 1)(2, 5)(3, 6)(4, 7)(8, 10)(9, 11),
Perm(0, 2)(1, 3)(4, 7)(5, 8)(6, 9)(10, 11),
Perm(0, 3)(1, 9)(2, 4)(5, 8)(6, 11)(7, 10),
Perm(0, 4)(1, 9)(2, 10)(3, 5)(6, 8)(7, 11),
Perm(0, 5)(1, 4)(2, 10)(3, 6)(7, 9)(8, 11),
Perm(0, 6)(1, 5)(2, 10)(3, 11)(4, 7)(8, 9),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 8)(9, 10),
Perm(0, 8)(1, 9)(2, 3)(4, 7)(5, 11)(6, 10),
Perm(0, 9)(1, 11)(2, 10)(3, 4)(5, 8)(6, 7),
Perm(0, 10)(1, 9)(2, 11)(3, 6)(4, 5)(7, 8),
Perm(0, 11)(1, 6)(2, 10)(3, 9)(4, 8)(5, 7),
Perm(0, 11)(1, 8)(2, 7)(3, 6)(4, 10)(5, 9),
Perm(0, 11)(1, 10)(2, 9)(3, 8)(4, 7)(5, 6),
Perm(0, 11)(1, 7)(2, 6)(3, 10)(4, 9)(5, 8),
Perm(0, 11)(1, 9)(2, 8)(3, 7)(4, 6)(5, 10),
Perm(0, 5, 1)(2, 4, 6)(3, 10, 7)(8, 9, 11),
Perm(0, 1, 2)(3, 5, 7)(4, 6, 8)(9, 10, 11),
Perm(0, 2, 3)(1, 8, 4)(5, 7, 9)(6, 11, 10),
Perm(0, 3, 4)(1, 8, 10)(2, 9, 5)(6, 7, 11),
Perm(0, 4, 5)(1, 3, 10)(2, 9, 6)(7, 8, 11),
Perm(0, 10, 7)(1, 5, 6)(2, 4, 11)(3, 9, 8),
Perm(0, 6, 8)(1, 7, 2)(3, 5, 11)(4, 10, 9),
Perm(0, 7, 9)(1, 11, 4)(2, 8, 3)(5, 6, 10),
Perm(0, 8, 10)(1, 7, 6)(2, 11, 5)(3, 9, 4),
Perm(0, 9, 6)(1, 3, 11)(2, 8, 7)(4, 10, 5)))
tetrahedron_faces = list(tuple(arg) for arg in tetrahedron.faces)
cube_faces = list(tuple(arg) for arg in cube.faces)
octahedron_faces = list(tuple(arg) for arg in octahedron.faces)
dodecahedron_faces = list(tuple(arg) for arg in dodecahedron.faces)
icosahedron_faces = list(tuple(arg) for arg in icosahedron.faces)
|
0ed65f829edba433ea254957704bcef9aa407bd36eba395ecae392ad1807168e | import itertools
from sympy.combinatorics.fp_groups import FpGroup, FpSubgroup, simplify_presentation
from sympy.combinatorics.free_groups import FreeGroup
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.core.numbers import igcd
from sympy.ntheory.factor_ import totient
from sympy.core.singleton import S
class GroupHomomorphism:
'''
A class representing group homomorphisms. Instantiate using `homomorphism()`.
References
==========
.. [1] Holt, D., Eick, B. and O'Brien, E. (2005). Handbook of computational group theory.
'''
def __init__(self, domain, codomain, images):
self.domain = domain
self.codomain = codomain
self.images = images
self._inverses = None
self._kernel = None
self._image = None
def _invs(self):
'''
Return a dictionary with `{gen: inverse}` where `gen` is a rewriting
generator of `codomain` (e.g. strong generator for permutation groups)
and `inverse` is an element of its preimage
'''
image = self.image()
inverses = {}
for k in list(self.images.keys()):
v = self.images[k]
if not (v in inverses
or v.is_identity):
inverses[v] = k
if isinstance(self.codomain, PermutationGroup):
gens = image.strong_gens
else:
gens = image.generators
for g in gens:
if g in inverses or g.is_identity:
continue
w = self.domain.identity
if isinstance(self.codomain, PermutationGroup):
parts = image._strong_gens_slp[g][::-1]
else:
parts = g
for s in parts:
if s in inverses:
w = w*inverses[s]
else:
w = w*inverses[s**-1]**-1
inverses[g] = w
return inverses
def invert(self, g):
'''
Return an element of the preimage of ``g`` or of each element
of ``g`` if ``g`` is a list.
Explanation
===========
If the codomain is an FpGroup, the inverse for equal
elements might not always be the same unless the FpGroup's
rewriting system is confluent. However, making a system
confluent can be time-consuming. If it's important, try
`self.codomain.make_confluent()` first.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.free_groups import FreeGroupElement
if isinstance(g, (Permutation, FreeGroupElement)):
if isinstance(self.codomain, FpGroup):
g = self.codomain.reduce(g)
if self._inverses is None:
self._inverses = self._invs()
image = self.image()
w = self.domain.identity
if isinstance(self.codomain, PermutationGroup):
gens = image.generator_product(g)[::-1]
else:
gens = g
# the following can't be "for s in gens:"
# because that would be equivalent to
# "for s in gens.array_form:" when g is
# a FreeGroupElement. On the other hand,
# when you call gens by index, the generator
# (or inverse) at position i is returned.
for i in range(len(gens)):
s = gens[i]
if s.is_identity:
continue
if s in self._inverses:
w = w*self._inverses[s]
else:
w = w*self._inverses[s**-1]**-1
return w
elif isinstance(g, list):
return [self.invert(e) for e in g]
def kernel(self):
'''
Compute the kernel of `self`.
'''
if self._kernel is None:
self._kernel = self._compute_kernel()
return self._kernel
def _compute_kernel(self):
G = self.domain
G_order = G.order()
if G_order is S.Infinity:
raise NotImplementedError(
"Kernel computation is not implemented for infinite groups")
gens = []
if isinstance(G, PermutationGroup):
K = PermutationGroup(G.identity)
else:
K = FpSubgroup(G, gens, normal=True)
i = self.image().order()
while K.order()*i != G_order:
r = G.random()
k = r*self.invert(self(r))**-1
if not k in K:
gens.append(k)
if isinstance(G, PermutationGroup):
K = PermutationGroup(gens)
else:
K = FpSubgroup(G, gens, normal=True)
return K
def image(self):
'''
Compute the image of `self`.
'''
if self._image is None:
values = list(set(self.images.values()))
if isinstance(self.codomain, PermutationGroup):
self._image = self.codomain.subgroup(values)
else:
self._image = FpSubgroup(self.codomain, values)
return self._image
def _apply(self, elem):
'''
Apply `self` to `elem`.
'''
if not elem in self.domain:
if isinstance(elem, (list, tuple)):
return [self._apply(e) for e in elem]
raise ValueError("The supplied element doesn't belong to the domain")
if elem.is_identity:
return self.codomain.identity
else:
images = self.images
value = self.codomain.identity
if isinstance(self.domain, PermutationGroup):
gens = self.domain.generator_product(elem, original=True)
for g in gens:
if g in self.images:
value = images[g]*value
else:
value = images[g**-1]**-1*value
else:
i = 0
for _, p in elem.array_form:
if p < 0:
g = elem[i]**-1
else:
g = elem[i]
value = value*images[g]**p
i += abs(p)
return value
def __call__(self, elem):
return self._apply(elem)
def is_injective(self):
'''
Check if the homomorphism is injective
'''
return self.kernel().order() == 1
def is_surjective(self):
'''
Check if the homomorphism is surjective
'''
im = self.image().order()
oth = self.codomain.order()
if im is S.Infinity and oth is S.Infinity:
return None
else:
return im == oth
def is_isomorphism(self):
'''
Check if `self` is an isomorphism.
'''
return self.is_injective() and self.is_surjective()
def is_trivial(self):
'''
Check is `self` is a trivial homomorphism, i.e. all elements
are mapped to the identity.
'''
return self.image().order() == 1
def compose(self, other):
'''
Return the composition of `self` and `other`, i.e.
the homomorphism phi such that for all g in the domain
of `other`, phi(g) = self(other(g))
'''
if not other.image().is_subgroup(self.domain):
raise ValueError("The image of `other` must be a subgroup of "
"the domain of `self`")
images = {g: self(other(g)) for g in other.images}
return GroupHomomorphism(other.domain, self.codomain, images)
def restrict_to(self, H):
'''
Return the restriction of the homomorphism to the subgroup `H`
of the domain.
'''
if not isinstance(H, PermutationGroup) or not H.is_subgroup(self.domain):
raise ValueError("Given H is not a subgroup of the domain")
domain = H
images = {g: self(g) for g in H.generators}
return GroupHomomorphism(domain, self.codomain, images)
def invert_subgroup(self, H):
'''
Return the subgroup of the domain that is the inverse image
of the subgroup ``H`` of the homomorphism image
'''
if not H.is_subgroup(self.image()):
raise ValueError("Given H is not a subgroup of the image")
gens = []
P = PermutationGroup(self.image().identity)
for h in H.generators:
h_i = self.invert(h)
if h_i not in P:
gens.append(h_i)
P = PermutationGroup(gens)
for k in self.kernel().generators:
if k*h_i not in P:
gens.append(k*h_i)
P = PermutationGroup(gens)
return P
def homomorphism(domain, codomain, gens, images=(), check=True):
'''
Create (if possible) a group homomorphism from the group ``domain``
to the group ``codomain`` defined by the images of the domain's
generators ``gens``. ``gens`` and ``images`` can be either lists or tuples
of equal sizes. If ``gens`` is a proper subset of the group's generators,
the unspecified generators will be mapped to the identity. If the
images are not specified, a trivial homomorphism will be created.
If the given images of the generators do not define a homomorphism,
an exception is raised.
If ``check`` is ``False``, do not check whether the given images actually
define a homomorphism.
'''
if not isinstance(domain, (PermutationGroup, FpGroup, FreeGroup)):
raise TypeError("The domain must be a group")
if not isinstance(codomain, (PermutationGroup, FpGroup, FreeGroup)):
raise TypeError("The codomain must be a group")
generators = domain.generators
if not all(g in generators for g in gens):
raise ValueError("The supplied generators must be a subset of the domain's generators")
if not all(g in codomain for g in images):
raise ValueError("The images must be elements of the codomain")
if images and len(images) != len(gens):
raise ValueError("The number of images must be equal to the number of generators")
gens = list(gens)
images = list(images)
images.extend([codomain.identity]*(len(generators)-len(images)))
gens.extend([g for g in generators if g not in gens])
images = dict(zip(gens,images))
if check and not _check_homomorphism(domain, codomain, images):
raise ValueError("The given images do not define a homomorphism")
return GroupHomomorphism(domain, codomain, images)
def _check_homomorphism(domain, codomain, images):
if hasattr(domain, 'relators'):
rels = domain.relators
else:
gens = domain.presentation().generators
rels = domain.presentation().relators
identity = codomain.identity
def _image(r):
if r.is_identity:
return identity
else:
w = identity
r_arr = r.array_form
i = 0
j = 0
# i is the index for r and j is for
# r_arr. r_arr[j] is the tuple (sym, p)
# where sym is the generator symbol
# and p is the power to which it is
# raised while r[i] is a generator
# (not just its symbol) or the inverse of
# a generator - hence the need for
# both indices
while i < len(r):
power = r_arr[j][1]
if isinstance(domain, PermutationGroup) and r[i] in gens:
s = domain.generators[gens.index(r[i])]
else:
s = r[i]
if s in images:
w = w*images[s]**power
elif s**-1 in images:
w = w*images[s**-1]**power
i += abs(power)
j += 1
return w
for r in rels:
if isinstance(codomain, FpGroup):
s = codomain.equals(_image(r), identity)
if s is None:
# only try to make the rewriting system
# confluent when it can't determine the
# truth of equality otherwise
success = codomain.make_confluent()
s = codomain.equals(_image(r), identity)
if s is None and not success:
raise RuntimeError("Can't determine if the images "
"define a homomorphism. Try increasing "
"the maximum number of rewriting rules "
"(group._rewriting_system.set_max(new_value); "
"the current value is stored in group._rewriting"
"_system.maxeqns)")
else:
s = _image(r).is_identity
if not s:
return False
return True
def orbit_homomorphism(group, omega):
'''
Return the homomorphism induced by the action of the permutation
group ``group`` on the set ``omega`` that is closed under the action.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.named_groups import SymmetricGroup
codomain = SymmetricGroup(len(omega))
identity = codomain.identity
omega = list(omega)
images = {g: identity*Permutation([omega.index(o^g) for o in omega]) for g in group.generators}
group._schreier_sims(base=omega)
H = GroupHomomorphism(group, codomain, images)
if len(group.basic_stabilizers) > len(omega):
H._kernel = group.basic_stabilizers[len(omega)]
else:
H._kernel = PermutationGroup([group.identity])
return H
def block_homomorphism(group, blocks):
'''
Return the homomorphism induced by the action of the permutation
group ``group`` on the block system ``blocks``. The latter should be
of the same form as returned by the ``minimal_block`` method for
permutation groups, namely a list of length ``group.degree`` where
the i-th entry is a representative of the block i belongs to.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.named_groups import SymmetricGroup
n = len(blocks)
# number the blocks; m is the total number,
# b is such that b[i] is the number of the block i belongs to,
# p is the list of length m such that p[i] is the representative
# of the i-th block
m = 0
p = []
b = [None]*n
for i in range(n):
if blocks[i] == i:
p.append(i)
b[i] = m
m += 1
for i in range(n):
b[i] = b[blocks[i]]
codomain = SymmetricGroup(m)
# the list corresponding to the identity permutation in codomain
identity = range(m)
images = {g: Permutation([b[p[i]^g] for i in identity]) for g in group.generators}
H = GroupHomomorphism(group, codomain, images)
return H
def group_isomorphism(G, H, isomorphism=True):
'''
Compute an isomorphism between 2 given groups.
Parameters
==========
G : A finite ``FpGroup`` or a ``PermutationGroup``.
First group.
H : A finite ``FpGroup`` or a ``PermutationGroup``
Second group.
isomorphism : bool
This is used to avoid the computation of homomorphism
when the user only wants to check if there exists
an isomorphism between the groups.
Returns
=======
If isomorphism = False -- Returns a boolean.
If isomorphism = True -- Returns a boolean and an isomorphism between `G` and `H`.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> from sympy.combinatorics.homomorphisms import group_isomorphism
>>> from sympy.combinatorics.named_groups import DihedralGroup, AlternatingGroup
>>> D = DihedralGroup(8)
>>> p = Permutation(0, 1, 2, 3, 4, 5, 6, 7)
>>> P = PermutationGroup(p)
>>> group_isomorphism(D, P)
(False, None)
>>> F, a, b = free_group("a, b")
>>> G = FpGroup(F, [a**3, b**3, (a*b)**2])
>>> H = AlternatingGroup(4)
>>> (check, T) = group_isomorphism(G, H)
>>> check
True
>>> T(b*a*b**-1*a**-1*b**-1)
(0 2 3)
Notes
=====
Uses the approach suggested by Robert Tarjan to compute the isomorphism between two groups.
First, the generators of ``G`` are mapped to the elements of ``H`` and
we check if the mapping induces an isomorphism.
'''
if not isinstance(G, (PermutationGroup, FpGroup)):
raise TypeError("The group must be a PermutationGroup or an FpGroup")
if not isinstance(H, (PermutationGroup, FpGroup)):
raise TypeError("The group must be a PermutationGroup or an FpGroup")
if isinstance(G, FpGroup) and isinstance(H, FpGroup):
G = simplify_presentation(G)
H = simplify_presentation(H)
# Two infinite FpGroups with the same generators are isomorphic
# when the relators are same but are ordered differently.
if G.generators == H.generators and (G.relators).sort() == (H.relators).sort():
if not isomorphism:
return True
return (True, homomorphism(G, H, G.generators, H.generators))
# `_H` is the permutation group isomorphic to `H`.
_H = H
g_order = G.order()
h_order = H.order()
if g_order is S.Infinity:
raise NotImplementedError("Isomorphism methods are not implemented for infinite groups.")
if isinstance(H, FpGroup):
if h_order is S.Infinity:
raise NotImplementedError("Isomorphism methods are not implemented for infinite groups.")
_H, h_isomorphism = H._to_perm_group()
if (g_order != h_order) or (G.is_abelian != H.is_abelian):
if not isomorphism:
return False
return (False, None)
if not isomorphism:
# Two groups of the same cyclic numbered order
# are isomorphic to each other.
n = g_order
if (igcd(n, totient(n))) == 1:
return True
# Match the generators of `G` with subsets of `_H`
gens = list(G.generators)
for subset in itertools.permutations(_H, len(gens)):
images = list(subset)
images.extend([_H.identity]*(len(G.generators)-len(images)))
_images = dict(zip(gens,images))
if _check_homomorphism(G, _H, _images):
if isinstance(H, FpGroup):
images = h_isomorphism.invert(images)
T = homomorphism(G, H, G.generators, images, check=False)
if T.is_isomorphism():
# It is a valid isomorphism
if not isomorphism:
return True
return (True, T)
if not isomorphism:
return False
return (False, None)
def is_isomorphic(G, H):
'''
Check if the groups are isomorphic to each other
Parameters
==========
G : A finite ``FpGroup`` or a ``PermutationGroup``
First group.
H : A finite ``FpGroup`` or a ``PermutationGroup``
Second group.
Returns
=======
boolean
'''
return group_isomorphism(G, H, isomorphism=False)
|
44c743076ae0c72c53f1d5ba806416c601de3db5f5bd2fdcb646fb7614eb1b95 | from sympy.core import Basic, Dict, sympify
from sympy.core.sorting import default_sort_key
from sympy.core.sympify import _sympify
from sympy.functions.combinatorial.numbers import bell
from sympy.matrices import zeros
from sympy.sets.sets import FiniteSet, Union
from sympy.utilities.iterables import flatten, group
from sympy.utilities.misc import as_int
from collections import defaultdict
class Partition(FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, *partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
Creating Partition from Python lists:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a
Partition({3}, {1, 2})
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
Creating Partition from Python sets:
>>> Partition({1, 2, 3}, {4, 5})
Partition({4, 5}, {1, 2, 3})
Creating Partition from SymPy finite sets:
>>> from sympy import FiniteSet
>>> a = FiniteSet(1, 2, 3)
>>> b = FiniteSet(4, 5)
>>> Partition(a, b)
Partition({4, 5}, {1, 2, 3})
"""
args = []
dups = False
for arg in partition:
if isinstance(arg, list):
as_set = set(arg)
if len(as_set) < len(arg):
dups = True
break # error below
arg = as_set
args.append(_sympify(arg))
if not all(isinstance(part, FiniteSet) for part in args):
raise ValueError(
"Each argument to Partition should be " \
"a list, set, or a FiniteSet")
# sort so we have a canonical reference for RGS
U = Union(*args)
if dups or len(U) < sum(len(arg) for arg in args):
raise ValueError("Partition contained duplicate elements.")
obj = FiniteSet.__new__(cls, *args)
obj.members = tuple(U)
obj.size = len(U)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy import default_sort_key
>>> from sympy.combinatorics.partitions import Partition
>>> from sympy.abc import x
>>> a = Partition([1, 2])
>>> b = Partition([3, 4])
>>> c = Partition([1, x])
>>> d = Partition(list(range(4)))
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[Partition({1, 2}), Partition({1}, {2}), Partition({1, x}), Partition({3, 4}), Partition({0, 1, 2, 3})]
"""
if order is None:
members = self.members
else:
members = tuple(sorted(self.members,
key=lambda w: default_sort_key(w, order)))
return tuple(map(default_sort_key, (self.size, members, self.rank)))
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition([1], [2, 3]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted([sorted(p, key=default_sort_key)
for p in self.args])
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = as_int(other)
offset = self.rank + other
result = RGS_unrank((offset) %
RGS_enum(self.size),
self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.rank
13
"""
if self._rank is not None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
Explanation
===========
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
Partition({3}, {4}, {5}, {1, 2})
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple([rgs[i] for i in sorted(
[i for p in partition for i in p], key=default_sort_key)])
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
Explanation
===========
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
Partition({c}, {a, d}, {b, e})
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
Partition({e}, {a, c}, {b, d})
>>> a = Partition([1, 4], [2], [3, 5])
>>> Partition.from_rgs(a.RGS, a.members)
Partition({2}, {1, 4}, {3, 5})
"""
if len(rgs) != len(elements):
raise ValueError('mismatch in rgs and element lengths')
max_elem = max(rgs) + 1
partition = [[] for i in range(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError('some blocks of the partition were empty.')
return Partition(*partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
Explanation
===========
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
References
==========
.. [1] https://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
Explantion
==========
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partition should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(list(partition.items()), reverse=True):
if not v:
continue
k, v = as_int(k), as_int(v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(map(as_int, partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = as_int(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("All integer summands must be greater than one")
obj = Basic.__new__(cls, integer, partition)
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print(p.prev_lex())
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print(p.next_lex())
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
groups = group(self.partition, multiple=False)
self._keys = [g[0] for g in groups]
self._dict = dict(groups)
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print(IntegerPartition([1, 1, 5]).as_ferrers())
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.testing.randtest import _randint
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
Matrix([
[ 1, 1, 1, 1, 1, 1, 1],
[ 1, 2, 3, 4, 5, 6, 0],
[ 2, 5, 10, 17, 26, 0, 0],
[ 5, 15, 37, 77, 0, 0, 0],
[ 15, 52, 151, 0, 0, 0, 0],
[ 52, 203, 0, 0, 0, 0, 0],
[203, 0, 0, 0, 0, 0, 0]])
"""
d = zeros(m + 1)
for i in range(0, m + 1):
d[0, i] = 1
for i in range(1, m + 1):
for j in range(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics.partitions import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition(list(range(4)))
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
return bell(m)
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in range(2, m + 1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return [x - 1 for x in L[1:]]
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in range(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
|
528406cb222c3762873f5d37879e5d87483d1008fe7b2b171e098c418972bab3 | from sympy.combinatorics.permutations import Permutation, _af_invert, _af_rmul
from sympy.ntheory import isprime
rmul = Permutation.rmul
_af_new = Permutation._af_new
############################################
#
# Utilities for computational group theory
#
############################################
def _base_ordering(base, degree):
r"""
Order `\{0, 1, \dots, n-1\}` so that base points come first and in order.
Parameters
==========
``base`` : the base
``degree`` : the degree of the associated permutation group
Returns
=======
A list ``base_ordering`` such that ``base_ordering[point]`` is the
number of ``point`` in the ordering.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _base_ordering
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> _base_ordering(S.base, S.degree)
[0, 1, 2, 3]
Notes
=====
This is used in backtrack searches, when we define a relation `\ll` on
the underlying set for a permutation group of degree `n`,
`\{0, 1, \dots, n-1\}`, so that if `(b_1, b_2, \dots, b_k)` is a base we
have `b_i \ll b_j` whenever `i<j` and `b_i \ll a` for all
`i\in\{1,2, \dots, k\}` and `a` is not in the base. The idea is developed
and applied to backtracking algorithms in [1], pp.108-132. The points
that are not in the base are taken in increasing order.
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
base_len = len(base)
ordering = [0]*degree
for i in range(base_len):
ordering[base[i]] = i
current = base_len
for i in range(degree):
if i not in base:
ordering[i] = current
current += 1
return ordering
def _check_cycles_alt_sym(perm):
"""
Checks for cycles of prime length p with n/2 < p < n-2.
Explanation
===========
Here `n` is the degree of the permutation. This is a helper function for
the function is_alt_sym from sympy.combinatorics.perm_groups.
Examples
========
>>> from sympy.combinatorics.util import _check_cycles_alt_sym
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12]])
>>> _check_cycles_alt_sym(a)
False
>>> b = Permutation([[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10]])
>>> _check_cycles_alt_sym(b)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.is_alt_sym
"""
n = perm.size
af = perm.array_form
current_len = 0
total_len = 0
used = set()
for i in range(n//2):
if not i in used and i < n//2 - total_len:
current_len = 1
used.add(i)
j = i
while af[j] != i:
current_len += 1
j = af[j]
used.add(j)
total_len += current_len
if current_len > n//2 and current_len < n - 2 and isprime(current_len):
return True
return False
def _distribute_gens_by_base(base, gens):
r"""
Distribute the group elements ``gens`` by membership in basic stabilizers.
Explanation
===========
Notice that for a base `(b_1, b_2, \dots, b_k)`, the basic stabilizers
are defined as `G^{(i)} = G_{b_1, \dots, b_{i-1}}` for
`i \in\{1, 2, \dots, k\}`.
Parameters
==========
``base`` : a sequence of points in `\{0, 1, \dots, n-1\}`
``gens`` : a list of elements of a permutation group of degree `n`.
Returns
=======
List of length `k`, where `k` is
the length of ``base``. The `i`-th entry contains those elements in
``gens`` which fix the first `i` elements of ``base`` (so that the
`0`-th entry is equal to ``gens`` itself). If no element fixes the first
`i` elements of ``base``, the `i`-th element is set to a list containing
the identity element.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.util import _distribute_gens_by_base
>>> D = DihedralGroup(3)
>>> D.schreier_sims()
>>> D.strong_gens
[(0 1 2), (0 2), (1 2)]
>>> D.base
[0, 1]
>>> _distribute_gens_by_base(D.base, D.strong_gens)
[[(0 1 2), (0 2), (1 2)],
[(1 2)]]
See Also
========
_strong_gens_from_distr, _orbits_transversals_from_bsgs,
_handle_precomputed_bsgs
"""
base_len = len(base)
degree = gens[0].size
stabs = [[] for _ in range(base_len)]
max_stab_index = 0
for gen in gens:
j = 0
while j < base_len - 1 and gen._array_form[base[j]] == base[j]:
j += 1
if j > max_stab_index:
max_stab_index = j
for k in range(j + 1):
stabs[k].append(gen)
for i in range(max_stab_index + 1, base_len):
stabs[i].append(_af_new(list(range(degree))))
return stabs
def _handle_precomputed_bsgs(base, strong_gens, transversals=None,
basic_orbits=None, strong_gens_distr=None):
"""
Calculate BSGS-related structures from those present.
Explanation
===========
The base and strong generating set must be provided; if any of the
transversals, basic orbits or distributed strong generators are not
provided, they will be calculated from the base and strong generating set.
Parameters
==========
``base`` - the base
``strong_gens`` - the strong generators
``transversals`` - basic transversals
``basic_orbits`` - basic orbits
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Returns
=======
``(transversals, basic_orbits, strong_gens_distr)`` where ``transversals``
are the basic transversals, ``basic_orbits`` are the basic orbits, and
``strong_gens_distr`` are the strong generators distributed by membership
in basic stabilizers.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.util import _handle_precomputed_bsgs
>>> D = DihedralGroup(3)
>>> D.schreier_sims()
>>> _handle_precomputed_bsgs(D.base, D.strong_gens,
... basic_orbits=D.basic_orbits)
([{0: (2), 1: (0 1 2), 2: (0 2)}, {1: (2), 2: (1 2)}], [[0, 1, 2], [1, 2]], [[(0 1 2), (0 2), (1 2)], [(1 2)]])
See Also
========
_orbits_transversals_from_bsgs, _distribute_gens_by_base
"""
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
if transversals is None:
if basic_orbits is None:
basic_orbits, transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
else:
transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=True)
else:
if basic_orbits is None:
base_len = len(base)
basic_orbits = [None]*base_len
for i in range(base_len):
basic_orbits[i] = list(transversals[i].keys())
return transversals, basic_orbits, strong_gens_distr
def _orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=False, slp=False):
"""
Compute basic orbits and transversals from a base and strong generating set.
Explanation
===========
The generators are provided as distributed across the basic stabilizers.
If the optional argument ``transversals_only`` is set to True, only the
transversals are returned.
Parameters
==========
``base`` - The base.
``strong_gens_distr`` - Strong generators distributed by membership in basic
stabilizers.
``transversals_only`` - bool
A flag switching between returning only the
transversals and both orbits and transversals.
``slp`` -
If ``True``, return a list of dictionaries containing the
generator presentations of the elements of the transversals,
i.e. the list of indices of generators from ``strong_gens_distr[i]``
such that their product is the relevant transversal element.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _distribute_gens_by_base
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> (S.base, strong_gens_distr)
([0, 1], [[(0 1 2), (2)(0 1), (1 2)], [(1 2)]])
See Also
========
_distribute_gens_by_base, _handle_precomputed_bsgs
"""
from sympy.combinatorics.perm_groups import _orbit_transversal
base_len = len(base)
degree = strong_gens_distr[0][0].size
transversals = [None]*base_len
slps = [None]*base_len
if transversals_only is False:
basic_orbits = [None]*base_len
for i in range(base_len):
transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
base[i], pairs=True, slp=True)
transversals[i] = dict(transversals[i])
if transversals_only is False:
basic_orbits[i] = list(transversals[i].keys())
if transversals_only:
return transversals
else:
if not slp:
return basic_orbits, transversals
return basic_orbits, transversals, slps
def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None):
"""
Remove redundant generators from a strong generating set.
Parameters
==========
``base`` - a base
``strong_gens`` - a strong generating set relative to ``base``
``basic_orbits`` - basic orbits
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Returns
=======
A strong generating set with respect to ``base`` which is a subset of
``strong_gens``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _remove_gens
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(15)
>>> base, strong_gens = S.schreier_sims_incremental()
>>> new_gens = _remove_gens(base, strong_gens)
>>> len(new_gens)
14
>>> _verify_bsgs(S, base, new_gens)
True
Notes
=====
This procedure is outlined in [1],p.95.
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
from sympy.combinatorics.perm_groups import _orbit
base_len = len(base)
degree = strong_gens[0].size
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
if basic_orbits is None:
basic_orbits = []
for i in range(base_len):
basic_orbit = _orbit(degree, strong_gens_distr[i], base[i])
basic_orbits.append(basic_orbit)
strong_gens_distr.append([])
res = strong_gens[:]
for i in range(base_len - 1, -1, -1):
gens_copy = strong_gens_distr[i][:]
for gen in strong_gens_distr[i]:
if gen not in strong_gens_distr[i + 1]:
temp_gens = gens_copy[:]
temp_gens.remove(gen)
if temp_gens == []:
continue
temp_orbit = _orbit(degree, temp_gens, base[i])
if temp_orbit == basic_orbits[i]:
gens_copy.remove(gen)
res.remove(gen)
return res
def _strip(g, base, orbits, transversals):
"""
Attempt to decompose a permutation using a (possibly partial) BSGS
structure.
Explanation
===========
This is done by treating the sequence ``base`` as an actual base, and
the orbits ``orbits`` and transversals ``transversals`` as basic orbits and
transversals relative to it.
This process is called "sifting". A sift is unsuccessful when a certain
orbit element is not found or when after the sift the decomposition
doesn't end with the identity element.
The argument ``transversals`` is a list of dictionaries that provides
transversal elements for the orbits ``orbits``.
Parameters
==========
``g`` - permutation to be decomposed
``base`` - sequence of points
``orbits`` - a list in which the ``i``-th entry is an orbit of ``base[i]``
under some subgroup of the pointwise stabilizer of `
`base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit
in this function since the only information we need is encoded in the orbits
and transversals
``transversals`` - a list of orbit transversals associated with the orbits
``orbits``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.util import _strip
>>> S = SymmetricGroup(5)
>>> S.schreier_sims()
>>> g = Permutation([0, 2, 3, 1, 4])
>>> _strip(g, S.base, S.basic_orbits, S.basic_transversals)
((4), 5)
Notes
=====
The algorithm is described in [1],pp.89-90. The reason for returning
both the current state of the element being decomposed and the level
at which the sifting ends is that they provide important information for
the randomized version of the Schreier-Sims algorithm.
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E."Handbook of computational group theory"
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random
"""
h = g._array_form
base_len = len(base)
for i in range(base_len):
beta = h[base[i]]
if beta == base[i]:
continue
if beta not in orbits[i]:
return _af_new(h), i + 1
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
return _af_new(h), base_len + 1
def _strip_af(h, base, orbits, transversals, j, slp=[], slps={}):
"""
optimized _strip, with h, transversals and result in array form
if the stripped elements is the identity, it returns False, base_len + 1
j h[base[i]] == base[i] for i <= j
"""
base_len = len(base)
for i in range(j+1, base_len):
beta = h[base[i]]
if beta == base[i]:
continue
if beta not in orbits[i]:
if not slp:
return h, i + 1
return h, i + 1, slp
u = transversals[i][beta]
if h == u:
if not slp:
return False, base_len + 1
return False, base_len + 1, slp
h = _af_rmul(_af_invert(u), h)
if slp:
u_slp = slps[i][beta][:]
u_slp.reverse()
u_slp = [(i, (g,)) for g in u_slp]
slp = u_slp + slp
if not slp:
return h, base_len + 1
return h, base_len + 1, slp
def _strong_gens_from_distr(strong_gens_distr):
"""
Retrieve strong generating set from generators of basic stabilizers.
This is just the union of the generators of the first and second basic
stabilizers.
Parameters
==========
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import (_strong_gens_from_distr,
... _distribute_gens_by_base)
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> S.strong_gens
[(0 1 2), (2)(0 1), (1 2)]
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> _strong_gens_from_distr(strong_gens_distr)
[(0 1 2), (2)(0 1), (1 2)]
See Also
========
_distribute_gens_by_base
"""
if len(strong_gens_distr) == 1:
return strong_gens_distr[0][:]
else:
result = strong_gens_distr[0]
for gen in strong_gens_distr[1]:
if gen not in result:
result.append(gen)
return result
|
3b4f72b1885a134840bfac302040387247e075aca6d4ff4c5c33f49c266f25e7 | from sympy.combinatorics.free_groups import free_group
from sympy.printing.defaults import DefaultPrinting
from itertools import chain, product
from bisect import bisect_left
###############################################################################
# COSET TABLE #
###############################################################################
class CosetTable(DefaultPrinting):
# coset_table: Mathematically a coset table
# represented using a list of lists
# alpha: Mathematically a coset (precisely, a live coset)
# represented by an integer between i with 1 <= i <= n
# alpha in c
# x: Mathematically an element of "A" (set of generators and
# their inverses), represented using "FpGroupElement"
# fp_grp: Finitely Presented Group with < X|R > as presentation.
# H: subgroup of fp_grp.
# NOTE: We start with H as being only a list of words in generators
# of "fp_grp". Since `.subgroup` method has not been implemented.
r"""
Properties
==========
[1] `0 \in \Omega` and `\tau(1) = \epsilon`
[2] `\alpha^x = \beta \Leftrightarrow \beta^{x^{-1}} = \alpha`
[3] If `\alpha^x = \beta`, then `H \tau(\alpha)x = H \tau(\beta)`
[4] `\forall \alpha \in \Omega, 1^{\tau(\alpha)} = \alpha`
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
.. [2] John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490.
"Implementation and Analysis of the Todd-Coxeter Algorithm"
"""
# default limit for the number of cosets allowed in a
# coset enumeration.
coset_table_max_limit = 4096000
# limit for the current instance
coset_table_limit = None
# maximum size of deduction stack above or equal to
# which it is emptied
max_stack_size = 100
def __init__(self, fp_grp, subgroup, max_cosets=None):
if not max_cosets:
max_cosets = CosetTable.coset_table_max_limit
self.fp_group = fp_grp
self.subgroup = subgroup
self.coset_table_limit = max_cosets
# "p" is setup independent of Omega and n
self.p = [0]
# a list of the form `[gen_1, gen_1^{-1}, ... , gen_k, gen_k^{-1}]`
self.A = list(chain.from_iterable((gen, gen**-1) \
for gen in self.fp_group.generators))
#P[alpha, x] Only defined when alpha^x is defined.
self.P = [[None]*len(self.A)]
# the mathematical coset table which is a list of lists
self.table = [[None]*len(self.A)]
self.A_dict = {x: self.A.index(x) for x in self.A}
self.A_dict_inv = {}
for x, index in self.A_dict.items():
if index % 2 == 0:
self.A_dict_inv[x] = self.A_dict[x] + 1
else:
self.A_dict_inv[x] = self.A_dict[x] - 1
# used in the coset-table based method of coset enumeration. Each of
# the element is called a "deduction" which is the form (alpha, x) whenever
# a value is assigned to alpha^x during a definition or "deduction process"
self.deduction_stack = []
# Attributes for modified methods.
H = self.subgroup
self._grp = free_group(', ' .join(["a_%d" % i for i in range(len(H))]))[0]
self.P = [[None]*len(self.A)]
self.p_p = {}
@property
def omega(self):
"""Set of live cosets. """
return [coset for coset in range(len(self.p)) if self.p[coset] == coset]
def copy(self):
"""
Return a shallow copy of Coset Table instance ``self``.
"""
self_copy = self.__class__(self.fp_group, self.subgroup)
self_copy.table = [list(perm_rep) for perm_rep in self.table]
self_copy.p = list(self.p)
self_copy.deduction_stack = list(self.deduction_stack)
return self_copy
def __str__(self):
return "Coset Table on %s with %s as subgroup generators" \
% (self.fp_group, self.subgroup)
__repr__ = __str__
@property
def n(self):
"""The number `n` represents the length of the sublist containing the
live cosets.
"""
if not self.table:
return 0
return max(self.omega) + 1
# Pg. 152 [1]
def is_complete(self):
r"""
The coset table is called complete if it has no undefined entries
on the live cosets; that is, `\alpha^x` is defined for all
`\alpha \in \Omega` and `x \in A`.
"""
return not any(None in self.table[coset] for coset in self.omega)
# Pg. 153 [1]
def define(self, alpha, x, modified=False):
r"""
This routine is used in the relator-based strategy of Todd-Coxeter
algorithm if some `\alpha^x` is undefined. We check whether there is
space available for defining a new coset. If there is enough space
then we remedy this by adjoining a new coset `\beta` to `\Omega`
(i.e to set of live cosets) and put that equal to `\alpha^x`, then
make an assignment satisfying Property[1]. If there is not enough space
then we halt the Coset Table creation. The maximum amount of space that
can be used by Coset Table can be manipulated using the class variable
``CosetTable.coset_table_max_limit``.
See Also
========
define_c
"""
A = self.A
table = self.table
len_table = len(table)
if len_table >= self.coset_table_limit:
# abort the further generation of cosets
raise ValueError("the coset enumeration has defined more than "
"%s cosets. Try with a greater value max number of cosets "
% self.coset_table_limit)
table.append([None]*len(A))
self.P.append([None]*len(self.A))
# beta is the new coset generated
beta = len_table
self.p.append(beta)
table[alpha][self.A_dict[x]] = beta
table[beta][self.A_dict_inv[x]] = alpha
# P[alpha][x] = epsilon, P[beta][x**-1] = epsilon
if modified:
self.P[alpha][self.A_dict[x]] = self._grp.identity
self.P[beta][self.A_dict_inv[x]] = self._grp.identity
self.p_p[beta] = self._grp.identity
def define_c(self, alpha, x):
r"""
A variation of ``define`` routine, described on Pg. 165 [1], used in
the coset table-based strategy of Todd-Coxeter algorithm. It differs
from ``define`` routine in that for each definition it also adds the
tuple `(\alpha, x)` to the deduction stack.
See Also
========
define
"""
A = self.A
table = self.table
len_table = len(table)
if len_table >= self.coset_table_limit:
# abort the further generation of cosets
raise ValueError("the coset enumeration has defined more than "
"%s cosets. Try with a greater value max number of cosets "
% self.coset_table_limit)
table.append([None]*len(A))
# beta is the new coset generated
beta = len_table
self.p.append(beta)
table[alpha][self.A_dict[x]] = beta
table[beta][self.A_dict_inv[x]] = alpha
# append to deduction stack
self.deduction_stack.append((alpha, x))
def scan_c(self, alpha, word):
"""
A variation of ``scan`` routine, described on pg. 165 of [1], which
puts at tuple, whenever a deduction occurs, to deduction stack.
See Also
========
scan, scan_check, scan_and_fill, scan_and_fill_c
"""
# alpha is an integer representing a "coset"
# since scanning can be in two cases
# 1. for alpha=0 and w in Y (i.e generating set of H)
# 2. alpha in Omega (set of live cosets), w in R (relators)
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
f = alpha
i = 0
r = len(word)
b = alpha
j = r - 1
# list of union of generators and their inverses
while i <= j and table[f][A_dict[word[i]]] is not None:
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
if f != b:
self.coincidence_c(f, b)
return
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
# we have an incorrect completed scan with coincidence f ~ b
# run the "coincidence" routine
self.coincidence_c(f, b)
elif j == i:
# deduction process
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
self.deduction_stack.append((f, word[i]))
# otherwise scan is incomplete and yields no information
# alpha, beta coincide, i.e. alpha, beta represent the pair of cosets where
# coincidence occurs
def coincidence_c(self, alpha, beta):
"""
A variation of ``coincidence`` routine used in the coset-table based
method of coset enumeration. The only difference being on addition of
a new coset in coset table(i.e new coset introduction), then it is
appended to ``deduction_stack``.
See Also
========
coincidence
"""
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
# behaves as a queue
q = []
self.merge(alpha, beta, q)
while len(q) > 0:
gamma = q.pop(0)
for x in A_dict:
delta = table[gamma][A_dict[x]]
if delta is not None:
table[delta][A_dict_inv[x]] = None
# only line of difference from ``coincidence`` routine
self.deduction_stack.append((delta, x**-1))
mu = self.rep(gamma)
nu = self.rep(delta)
if table[mu][A_dict[x]] is not None:
self.merge(nu, table[mu][A_dict[x]], q)
elif table[nu][A_dict_inv[x]] is not None:
self.merge(mu, table[nu][A_dict_inv[x]], q)
else:
table[mu][A_dict[x]] = nu
table[nu][A_dict_inv[x]] = mu
def scan(self, alpha, word, y=None, fill=False, modified=False):
r"""
``scan`` performs a scanning process on the input ``word``.
It first locates the largest prefix ``s`` of ``word`` for which
`\alpha^s` is defined (i.e is not ``None``), ``s`` may be empty. Let
``word=sv``, let ``t`` be the longest suffix of ``v`` for which
`\alpha^{t^{-1}}` is defined, and let ``v=ut``. Then three
possibilities are there:
1. If ``t=v``, then we say that the scan completes, and if, in addition
`\alpha^s = \alpha^{t^{-1}}`, then we say that the scan completes
correctly.
2. It can also happen that scan does not complete, but `|u|=1`; that
is, the word ``u`` consists of a single generator `x \in A`. In that
case, if `\alpha^s = \beta` and `\alpha^{t^{-1}} = \gamma`, then we can
set `\beta^x = \gamma` and `\gamma^{x^{-1}} = \beta`. These assignments
are known as deductions and enable the scan to complete correctly.
3. See ``coicidence`` routine for explanation of third condition.
Notes
=====
The code for the procedure of scanning `\alpha \in \Omega`
under `w \in A*` is defined on pg. 155 [1]
See Also
========
scan_c, scan_check, scan_and_fill, scan_and_fill_c
Scan and Fill
=============
Performed when the default argument fill=True.
Modified Scan
=============
Performed when the default argument modified=True
"""
# alpha is an integer representing a "coset"
# since scanning can be in two cases
# 1. for alpha=0 and w in Y (i.e generating set of H)
# 2. alpha in Omega (set of live cosets), w in R (relators)
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
f = alpha
i = 0
r = len(word)
b = alpha
j = r - 1
b_p = y
if modified:
f_p = self._grp.identity
flag = 0
while fill or flag == 0:
flag = 1
while i <= j and table[f][A_dict[word[i]]] is not None:
if modified:
f_p = f_p*self.P[f][A_dict[word[i]]]
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
if f != b:
if modified:
self.modified_coincidence(f, b, f_p**-1*y)
else:
self.coincidence(f, b)
return
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
if modified:
b_p = b_p*self.P[b][self.A_dict_inv[word[j]]]
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
# we have an incorrect completed scan with coincidence f ~ b
# run the "coincidence" routine
if modified:
self.modified_coincidence(f, b, f_p**-1*b_p)
else:
self.coincidence(f, b)
elif j == i:
# deduction process
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
if modified:
self.P[f][self.A_dict[word[i]]] = f_p**-1*b_p
self.P[b][self.A_dict_inv[word[i]]] = b_p**-1*f_p
return
elif fill:
self.define(f, word[i], modified=modified)
# otherwise scan is incomplete and yields no information
# used in the low-index subgroups algorithm
def scan_check(self, alpha, word):
r"""
Another version of ``scan`` routine, described on, it checks whether
`\alpha` scans correctly under `word`, it is a straightforward
modification of ``scan``. ``scan_check`` returns ``False`` (rather than
calling ``coincidence``) if the scan completes incorrectly; otherwise
it returns ``True``.
See Also
========
scan, scan_c, scan_and_fill, scan_and_fill_c
"""
# alpha is an integer representing a "coset"
# since scanning can be in two cases
# 1. for alpha=0 and w in Y (i.e generating set of H)
# 2. alpha in Omega (set of live cosets), w in R (relators)
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
f = alpha
i = 0
r = len(word)
b = alpha
j = r - 1
while i <= j and table[f][A_dict[word[i]]] is not None:
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
return f == b
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
# we have an incorrect completed scan with coincidence f ~ b
# return False, instead of calling coincidence routine
return False
elif j == i:
# deduction process
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
return True
def merge(self, k, lamda, q, w=None, modified=False):
"""
Merge two classes with representatives ``k`` and ``lamda``, described
on Pg. 157 [1] (for pseudocode), start by putting ``p[k] = lamda``.
It is more efficient to choose the new representative from the larger
of the two classes being merged, i.e larger among ``k`` and ``lamda``.
procedure ``merge`` performs the merging operation, adds the deleted
class representative to the queue ``q``.
Parameters
==========
'k', 'lamda' being the two class representatives to be merged.
Notes
=====
Pg. 86-87 [1] contains a description of this method.
See Also
========
coincidence, rep
"""
p = self.p
rep = self.rep
phi = rep(k, modified=modified)
psi = rep(lamda, modified=modified)
if phi != psi:
mu = min(phi, psi)
v = max(phi, psi)
p[v] = mu
if modified:
if v == phi:
self.p_p[phi] = self.p_p[k]**-1*w*self.p_p[lamda]
else:
self.p_p[psi] = self.p_p[lamda]**-1*w**-1*self.p_p[k]
q.append(v)
def rep(self, k, modified=False):
r"""
Parameters
==========
`k \in [0 \ldots n-1]`, as for ``self`` only array ``p`` is used
Returns
=======
Representative of the class containing ``k``.
Returns the representative of `\sim` class containing ``k``, it also
makes some modification to array ``p`` of ``self`` to ease further
computations, described on Pg. 157 [1].
The information on classes under `\sim` is stored in array `p` of
``self`` argument, which will always satisfy the property:
`p[\alpha] \sim \alpha` and `p[\alpha]=\alpha \iff \alpha=rep(\alpha)`
`\forall \in [0 \ldots n-1]`.
So, for `\alpha \in [0 \ldots n-1]`, we find `rep(self, \alpha)` by
continually replacing `\alpha` by `p[\alpha]` until it becomes
constant (i.e satisfies `p[\alpha] = \alpha`):w
To increase the efficiency of later ``rep`` calculations, whenever we
find `rep(self, \alpha)=\beta`, we set
`p[\gamma] = \beta \forall \gamma \in p-chain` from `\alpha` to `\beta`
Notes
=====
``rep`` routine is also described on Pg. 85-87 [1] in Atkinson's
algorithm, this results from the fact that ``coincidence`` routine
introduces functionality similar to that introduced by the
``minimal_block`` routine on Pg. 85-87 [1].
See Also
========
coincidence, merge
"""
p = self.p
lamda = k
rho = p[lamda]
if modified:
s = p[:]
while rho != lamda:
if modified:
s[rho] = lamda
lamda = rho
rho = p[lamda]
if modified:
rho = s[lamda]
while rho != k:
mu = rho
rho = s[mu]
p[rho] = lamda
self.p_p[rho] = self.p_p[rho]*self.p_p[mu]
else:
mu = k
rho = p[mu]
while rho != lamda:
p[mu] = lamda
mu = rho
rho = p[mu]
return lamda
# alpha, beta coincide, i.e. alpha, beta represent the pair of cosets
# where coincidence occurs
def coincidence(self, alpha, beta, w=None, modified=False):
r"""
The third situation described in ``scan`` routine is handled by this
routine, described on Pg. 156-161 [1].
The unfortunate situation when the scan completes but not correctly,
then ``coincidence`` routine is run. i.e when for some `i` with
`1 \le i \le r+1`, we have `w=st` with `s = x_1 x_2 \dots x_{i-1}`,
`t = x_i x_{i+1} \dots x_r`, and `\beta = \alpha^s` and
`\gamma = \alpha^{t-1}` are defined but unequal. This means that
`\beta` and `\gamma` represent the same coset of `H` in `G`. Described
on Pg. 156 [1]. ``rep``
See Also
========
scan
"""
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
# behaves as a queue
q = []
if modified:
self.modified_merge(alpha, beta, w, q)
else:
self.merge(alpha, beta, q)
while len(q) > 0:
gamma = q.pop(0)
for x in A_dict:
delta = table[gamma][A_dict[x]]
if delta is not None:
table[delta][A_dict_inv[x]] = None
mu = self.rep(gamma, modified=modified)
nu = self.rep(delta, modified=modified)
if table[mu][A_dict[x]] is not None:
if modified:
v = self.p_p[delta]**-1*self.P[gamma][self.A_dict[x]]**-1
v = v*self.p_p[gamma]*self.P[mu][self.A_dict[x]]
self.modified_merge(nu, table[mu][self.A_dict[x]], v, q)
else:
self.merge(nu, table[mu][A_dict[x]], q)
elif table[nu][A_dict_inv[x]] is not None:
if modified:
v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]]
v = v*self.p_p[delta]*self.P[mu][self.A_dict_inv[x]]
self.modified_merge(mu, table[nu][self.A_dict_inv[x]], v, q)
else:
self.merge(mu, table[nu][A_dict_inv[x]], q)
else:
table[mu][A_dict[x]] = nu
table[nu][A_dict_inv[x]] = mu
if modified:
v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]]*self.p_p[delta]
self.P[mu][self.A_dict[x]] = v
self.P[nu][self.A_dict_inv[x]] = v**-1
# method used in the HLT strategy
def scan_and_fill(self, alpha, word):
"""
A modified version of ``scan`` routine used in the relator-based
method of coset enumeration, described on pg. 162-163 [1], which
follows the idea that whenever the procedure is called and the scan
is incomplete then it makes new definitions to enable the scan to
complete; i.e it fills in the gaps in the scan of the relator or
subgroup generator.
"""
self.scan(alpha, word, fill=True)
def scan_and_fill_c(self, alpha, word):
"""
A modified version of ``scan`` routine, described on Pg. 165 second
para. [1], with modification similar to that of ``scan_anf_fill`` the
only difference being it calls the coincidence procedure used in the
coset-table based method i.e. the routine ``coincidence_c`` is used.
See Also
========
scan, scan_and_fill
"""
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
r = len(word)
f = alpha
i = 0
b = alpha
j = r - 1
# loop until it has filled the alpha row in the table.
while True:
# do the forward scanning
while i <= j and table[f][A_dict[word[i]]] is not None:
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
if f != b:
self.coincidence_c(f, b)
return
# forward scan was incomplete, scan backwards
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
self.coincidence_c(f, b)
elif j == i:
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
self.deduction_stack.append((f, word[i]))
else:
self.define_c(f, word[i])
# method used in the HLT strategy
def look_ahead(self):
"""
When combined with the HLT method this is known as HLT+Lookahead
method of coset enumeration, described on pg. 164 [1]. Whenever
``define`` aborts due to lack of space available this procedure is
executed. This routine helps in recovering space resulting from
"coincidence" of cosets.
"""
R = self.fp_group.relators
p = self.p
# complete scan all relators under all cosets(obviously live)
# without making new definitions
for beta in self.omega:
for w in R:
self.scan(beta, w)
if p[beta] < beta:
break
# Pg. 166
def process_deductions(self, R_c_x, R_c_x_inv):
"""
Processes the deductions that have been pushed onto ``deduction_stack``,
described on Pg. 166 [1] and is used in coset-table based enumeration.
See Also
========
deduction_stack
"""
p = self.p
table = self.table
while len(self.deduction_stack) > 0:
if len(self.deduction_stack) >= CosetTable.max_stack_size:
self.look_ahead()
del self.deduction_stack[:]
continue
else:
alpha, x = self.deduction_stack.pop()
if p[alpha] == alpha:
for w in R_c_x:
self.scan_c(alpha, w)
if p[alpha] < alpha:
break
beta = table[alpha][self.A_dict[x]]
if beta is not None and p[beta] == beta:
for w in R_c_x_inv:
self.scan_c(beta, w)
if p[beta] < beta:
break
def process_deductions_check(self, R_c_x, R_c_x_inv):
"""
A variation of ``process_deductions``, this calls ``scan_check``
wherever ``process_deductions`` calls ``scan``, described on Pg. [1].
See Also
========
process_deductions
"""
table = self.table
while len(self.deduction_stack) > 0:
alpha, x = self.deduction_stack.pop()
for w in R_c_x:
if not self.scan_check(alpha, w):
return False
beta = table[alpha][self.A_dict[x]]
if beta is not None:
for w in R_c_x_inv:
if not self.scan_check(beta, w):
return False
return True
def switch(self, beta, gamma):
r"""Switch the elements `\beta, \gamma \in \Omega` of ``self``, used
by the ``standardize`` procedure, described on Pg. 167 [1].
See Also
========
standardize
"""
A = self.A
A_dict = self.A_dict
table = self.table
for x in A:
z = table[gamma][A_dict[x]]
table[gamma][A_dict[x]] = table[beta][A_dict[x]]
table[beta][A_dict[x]] = z
for alpha in range(len(self.p)):
if self.p[alpha] == alpha:
if table[alpha][A_dict[x]] == beta:
table[alpha][A_dict[x]] = gamma
elif table[alpha][A_dict[x]] == gamma:
table[alpha][A_dict[x]] = beta
def standardize(self):
r"""
A coset table is standardized if when running through the cosets and
within each coset through the generator images (ignoring generator
inverses), the cosets appear in order of the integers
`0, 1, \dots, n`. "Standardize" reorders the elements of `\Omega`
such that, if we scan the coset table first by elements of `\Omega`
and then by elements of A, then the cosets occur in ascending order.
``standardize()`` is used at the end of an enumeration to permute the
cosets so that they occur in some sort of standard order.
Notes
=====
procedure is described on pg. 167-168 [1], it also makes use of the
``switch`` routine to replace by smaller integer value.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
# Example 5.3 from [1]
>>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
>>> C = coset_enumeration_r(f, [])
>>> C.compress()
>>> C.table
[[1, 3, 1, 3], [2, 0, 2, 0], [3, 1, 3, 1], [0, 2, 0, 2]]
>>> C.standardize()
>>> C.table
[[1, 2, 1, 2], [3, 0, 3, 0], [0, 3, 0, 3], [2, 1, 2, 1]]
"""
A = self.A
A_dict = self.A_dict
gamma = 1
for alpha, x in product(range(self.n), A):
beta = self.table[alpha][A_dict[x]]
if beta >= gamma:
if beta > gamma:
self.switch(gamma, beta)
gamma += 1
if gamma == self.n:
return
# Compression of a Coset Table
def compress(self):
"""Removes the non-live cosets from the coset table, described on
pg. 167 [1].
"""
gamma = -1
A = self.A
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
chi = tuple([i for i in range(len(self.p)) if self.p[i] != i])
for alpha in self.omega:
gamma += 1
if gamma != alpha:
# replace alpha by gamma in coset table
for x in A:
beta = table[alpha][A_dict[x]]
table[gamma][A_dict[x]] = beta
table[beta][A_dict_inv[x]] == gamma
# all the cosets in the table are live cosets
self.p = list(range(gamma + 1))
# delete the useless columns
del table[len(self.p):]
# re-define values
for row in table:
for j in range(len(self.A)):
row[j] -= bisect_left(chi, row[j])
def conjugates(self, R):
R_c = list(chain.from_iterable((rel.cyclic_conjugates(), \
(rel**-1).cyclic_conjugates()) for rel in R))
R_set = set()
for conjugate in R_c:
R_set = R_set.union(conjugate)
R_c_list = []
for x in self.A:
r = {word for word in R_set if word[0] == x}
R_c_list.append(r)
R_set.difference_update(r)
return R_c_list
def coset_representative(self, coset):
'''
Compute the coset representative of a given coset.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_r(f, [x])
>>> C.compress()
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]]
>>> C.coset_representative(0)
<identity>
>>> C.coset_representative(1)
y
>>> C.coset_representative(2)
y**-1
'''
for x in self.A:
gamma = self.table[coset][self.A_dict[x]]
if coset == 0:
return self.fp_group.identity
if gamma < coset:
return self.coset_representative(gamma)*x**-1
##############################
# Modified Methods #
##############################
def modified_define(self, alpha, x):
r"""
Define a function p_p from from [1..n] to A* as
an additional component of the modified coset table.
Parameters
==========
\alpha \in \Omega
x \in A*
See Also
========
define
"""
self.define(alpha, x, modified=True)
def modified_scan(self, alpha, w, y, fill=False):
r"""
Parameters
==========
\alpha \in \Omega
w \in A*
y \in (YUY^-1)
fill -- `modified_scan_and_fill` when set to True.
See Also
========
scan
"""
self.scan(alpha, w, y=y, fill=fill, modified=True)
def modified_scan_and_fill(self, alpha, w, y):
self.modified_scan(alpha, w, y, fill=True)
def modified_merge(self, k, lamda, w, q):
r"""
Parameters
==========
'k', 'lamda' -- the two class representatives to be merged.
q -- queue of length l of elements to be deleted from `\Omega` *.
w -- Word in (YUY^-1)
See Also
========
merge
"""
self.merge(k, lamda, q, w=w, modified=True)
def modified_rep(self, k):
r"""
Parameters
==========
`k \in [0 \ldots n-1]`
See Also
========
rep
"""
self.rep(k, modified=True)
def modified_coincidence(self, alpha, beta, w):
r"""
Parameters
==========
A coincident pair `\alpha, \beta \in \Omega, w \in Y \cup Y^{-1}`
See Also
========
coincidence
"""
self.coincidence(alpha, beta, w=w, modified=True)
###############################################################################
# COSET ENUMERATION #
###############################################################################
# relator-based method
def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None,
incomplete=False, modified=False):
"""
This is easier of the two implemented methods of coset enumeration.
and is often called the HLT method, after Hazelgrove, Leech, Trotter
The idea is that we make use of ``scan_and_fill`` makes new definitions
whenever the scan is incomplete to enable the scan to complete; this way
we fill in the gaps in the scan of the relator or subgroup generator,
that's why the name relator-based method.
An instance of `CosetTable` for `fp_grp` can be passed as the keyword
argument `draft` in which case the coset enumeration will start with
that instance and attempt to complete it.
When `incomplete` is `True` and the function is unable to complete for
some reason, the partially complete table will be returned.
# TODO: complete the docstring
See Also
========
scan_and_fill,
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
# Example 5.1 from [1]
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_r(f, [x])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 1, 2]
[1, 1, 2, 0]
[2, 2, 0, 1]
>>> C.p
[0, 1, 2, 1, 1]
# Example from exercises Q2 [1]
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> C = coset_enumeration_r(f, [])
>>> C.compress(); C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# Example 5.2
>>> f = FpGroup(F, [x**2, y**3, (x*y)**3])
>>> Y = [x*y]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 1, 2, 1]
[0, 0, 0, 2]
[3, 3, 1, 0]
[2, 2, 3, 3]
# Example 5.3
>>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 3, 1, 3]
[2, 0, 2, 0]
[3, 1, 3, 1]
[0, 2, 0, 2]
# Example 5.4
>>> F, a, b, c, d, e = free_group("a, b, c, d, e")
>>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1])
>>> Y = [a]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# example of "compress" method
>>> C.compress()
>>> C.table
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Exercises Pg. 161, Q2.
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> C.compress()
>>> C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
# Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490
# from 1973chwd.pdf
# Table 1. Ex. 1
>>> F, r, s, t = free_group("r, s, t")
>>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2])
>>> C = coset_enumeration_r(E1, [r])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0]
Ex. 2
>>> F, a, b = free_group("a, b")
>>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5])
>>> C = coset_enumeration_r(Cox, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
500
# Ex. 3
>>> F, a, b = free_group("a, b")
>>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \
(a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4])
>>> C = coset_enumeration_r(B_2_4, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
1024
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
# 1. Initialize a coset table C for < X|R >
C = CosetTable(fp_grp, Y, max_cosets=max_cosets)
# Define coset table methods.
if modified:
_scan_and_fill = C.modified_scan_and_fill
_define = C.modified_define
else:
_scan_and_fill = C.scan_and_fill
_define = C.define
if draft:
C.table = draft.table[:]
C.p = draft.p[:]
R = fp_grp.relators
A_dict = C.A_dict
p = C.p
for i in range(0, len(Y)):
if modified:
_scan_and_fill(0, Y[i], C._grp.generators[i])
else:
_scan_and_fill(0, Y[i])
alpha = 0
while alpha < C.n:
if p[alpha] == alpha:
try:
for w in R:
if modified:
_scan_and_fill(alpha, w, C._grp.identity)
else:
_scan_and_fill(alpha, w)
# if alpha was eliminated during the scan then break
if p[alpha] < alpha:
break
if p[alpha] == alpha:
for x in A_dict:
if C.table[alpha][A_dict[x]] is None:
_define(alpha, x)
except ValueError as e:
if incomplete:
return C
raise e
alpha += 1
return C
def modified_coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None,
incomplete=False):
r"""
Introduce a new set of symbols y \in Y that correspond to the
generators of the subgroup. Store the elements of Y as a
word P[\alpha, x] and compute the coset table similar to that of
the regular coset enumeration methods.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> from sympy.combinatorics.coset_table import modified_coset_enumeration_r
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = modified_coset_enumeration_r(f, [x])
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1], [None, 1, None, None], [1, 3, None, None]]
See Also
========
coset_enumertation_r
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.,
"Handbook of Computational Group Theory",
Section 5.3.2
"""
return coset_enumeration_r(fp_grp, Y, max_cosets=max_cosets, draft=draft,
incomplete=incomplete, modified=True)
# Pg. 166
# coset-table based method
def coset_enumeration_c(fp_grp, Y, max_cosets=None, draft=None,
incomplete=False):
"""
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_c
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_c(f, [x])
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]]
"""
# Initialize a coset table C for < X|R >
X = fp_grp.generators
R = fp_grp.relators
C = CosetTable(fp_grp, Y, max_cosets=max_cosets)
if draft:
C.table = draft.table[:]
C.p = draft.p[:]
C.deduction_stack = draft.deduction_stack
for alpha, x in product(range(len(C.table)), X):
if not C.table[alpha][C.A_dict[x]] is None:
C.deduction_stack.append((alpha, x))
A = C.A
# replace all the elements by cyclic reductions
R_cyc_red = [rel.identity_cyclic_reduction() for rel in R]
R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \
for rel in R_cyc_red))
R_set = set()
for conjugate in R_c:
R_set = R_set.union(conjugate)
# a list of subsets of R_c whose words start with "x".
R_c_list = []
for x in C.A:
r = {word for word in R_set if word[0] == x}
R_c_list.append(r)
R_set.difference_update(r)
for w in Y:
C.scan_and_fill_c(0, w)
for x in A:
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
alpha = 0
while alpha < len(C.table):
if C.p[alpha] == alpha:
try:
for x in C.A:
if C.p[alpha] != alpha:
break
if C.table[alpha][C.A_dict[x]] is None:
C.define_c(alpha, x)
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
except ValueError as e:
if incomplete:
return C
raise e
alpha += 1
return C
|
9abc5e5de5be78739b2ae86703ea14e60b3d19967a24c48c26fb2c0ac8d44821 | from sympy.core import Basic
from sympy.utilities.iterables import flatten, iterable
from sympy.utilities.misc import as_int
from collections import defaultdict
class Prufer(Basic):
"""
The Prufer correspondence is an algorithm that describes the
bijection between labeled trees and the Prufer code. A Prufer
code of a labeled tree is unique up to isomorphism and has
a length of n - 2.
Prufer sequences were first used by Heinz Prufer to give a
proof of Cayley's formula.
References
==========
.. [1] http://mathworld.wolfram.com/LabeledTree.html
"""
_prufer_repr = None
_tree_repr = None
_nodes = None
_rank = None
@property
def prufer_repr(self):
"""Returns Prufer sequence for the Prufer object.
This sequence is found by removing the highest numbered vertex,
recording the node it was attached to, and continuing until only
two vertices remain. The Prufer sequence is the list of recorded nodes.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).prufer_repr
[3, 3, 3, 4]
>>> Prufer([1, 0, 0]).prufer_repr
[1, 0, 0]
See Also
========
to_prufer
"""
if self._prufer_repr is None:
self._prufer_repr = self.to_prufer(self._tree_repr[:], self.nodes)
return self._prufer_repr
@property
def tree_repr(self):
"""Returns the tree representation of the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).tree_repr
[[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]
>>> Prufer([1, 0, 0]).tree_repr
[[1, 2], [0, 1], [0, 3], [0, 4]]
See Also
========
to_tree
"""
if self._tree_repr is None:
self._tree_repr = self.to_tree(self._prufer_repr[:])
return self._tree_repr
@property
def nodes(self):
"""Returns the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).nodes
6
>>> Prufer([1, 0, 0]).nodes
5
"""
return self._nodes
@property
def rank(self):
"""Returns the rank of the Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> p = Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]])
>>> p.rank
778
>>> p.next(1).rank
779
>>> p.prev().rank
777
See Also
========
prufer_rank, next, prev, size
"""
if self._rank is None:
self._rank = self.prufer_rank()
return self._rank
@property
def size(self):
"""Return the number of possible trees of this Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([0]*4).size == Prufer([6]*4).size == 1296
True
See Also
========
prufer_rank, rank, next, prev
"""
return self.prev(self.rank).prev().rank + 1
@staticmethod
def to_prufer(tree, n):
"""Return the Prufer sequence for a tree given as a list of edges where
``n`` is the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
>>> Prufer.to_prufer([[0, 1], [0, 2], [0, 3]], 4)
[0, 0]
See Also
========
prufer_repr: returns Prufer sequence of a Prufer object.
"""
d = defaultdict(int)
L = []
for edge in tree:
# Increment the value of the corresponding
# node in the degree list as we encounter an
# edge involving it.
d[edge[0]] += 1
d[edge[1]] += 1
for i in range(n - 2):
# find the smallest leaf
for x in range(n):
if d[x] == 1:
break
# find the node it was connected to
y = None
for edge in tree:
if x == edge[0]:
y = edge[1]
elif x == edge[1]:
y = edge[0]
if y is not None:
break
# record and update
L.append(y)
for j in (x, y):
d[j] -= 1
if not d[j]:
d.pop(j)
tree.remove(edge)
return L
@staticmethod
def to_tree(prufer):
"""Return the tree (as a list of edges) of the given Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([0, 2], 4)
>>> a.tree_repr
[[0, 1], [0, 2], [2, 3]]
>>> Prufer.to_tree([0, 2])
[[0, 1], [0, 2], [2, 3]]
References
==========
.. [1] https://hamberg.no/erlend/posts/2010-11-06-prufer-sequence-compact-tree-representation.html
See Also
========
tree_repr: returns tree representation of a Prufer object.
"""
tree = []
last = []
n = len(prufer) + 2
d = defaultdict(lambda: 1)
for p in prufer:
d[p] += 1
for i in prufer:
for j in range(n):
# find the smallest leaf (degree = 1)
if d[j] == 1:
break
# (i, j) is the new edge that we append to the tree
# and remove from the degree dictionary
d[i] -= 1
d[j] -= 1
tree.append(sorted([i, j]))
last = [i for i in range(n) if d[i] == 1] or [0, 1]
tree.append(last)
return tree
@staticmethod
def edges(*runs):
"""Return a list of edges and the number of nodes from the given runs
that connect nodes in an integer-labelled tree.
All node numbers will be shifted so that the minimum node is 0. It is
not a problem if edges are repeated in the runs; only unique edges are
returned. There is no assumption made about what the range of the node
labels should be, but all nodes from the smallest through the largest
must be present.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.edges([1, 2, 3], [2, 4, 5]) # a T
([[0, 1], [1, 2], [1, 3], [3, 4]], 5)
Duplicate edges are removed:
>>> Prufer.edges([0, 1, 2, 3], [1, 4, 5], [1, 4, 6]) # a K
([[0, 1], [1, 2], [1, 4], [2, 3], [4, 5], [4, 6]], 7)
"""
e = set()
nmin = runs[0][0]
for r in runs:
for i in range(len(r) - 1):
a, b = r[i: i + 2]
if b < a:
a, b = b, a
e.add((a, b))
rv = []
got = set()
nmin = nmax = None
for ei in e:
for i in ei:
got.add(i)
nmin = min(ei[0], nmin) if nmin is not None else ei[0]
nmax = max(ei[1], nmax) if nmax is not None else ei[1]
rv.append(list(ei))
missing = set(range(nmin, nmax + 1)) - got
if missing:
missing = [i + nmin for i in missing]
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % list(sorted(missing))
raise ValueError(msg)
if nmin != 0:
for i, ei in enumerate(rv):
rv[i] = [n - nmin for n in ei]
nmax -= nmin
return sorted(rv), nmax + 1
def prufer_rank(self):
"""Computes the rank of a Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_rank()
0
See Also
========
rank, next, prev, size
"""
r = 0
p = 1
for i in range(self.nodes - 3, -1, -1):
r += p*self.prufer_repr[i]
p *= self.nodes
return r
@classmethod
def unrank(self, rank, n):
"""Finds the unranked Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.unrank(0, 4)
Prufer([0, 0])
"""
n, rank = as_int(n), as_int(rank)
L = defaultdict(int)
for i in range(n - 3, -1, -1):
L[i] = rank % n
rank = (rank - L[i])//n
return Prufer([L[i] for i in range(len(L))])
def __new__(cls, *args, **kw_args):
"""The constructor for the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
A Prufer object can be constructed from a list of edges:
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
If the number of nodes is given, no checking of the nodes will
be performed; it will be assumed that nodes 0 through n - 1 are
present:
>>> Prufer([[0, 1], [0, 2], [0, 3]], 4)
Prufer([[0, 1], [0, 2], [0, 3]], 4)
A Prufer object can be constructed from a Prufer sequence:
>>> b = Prufer([1, 3])
>>> b.tree_repr
[[0, 1], [1, 3], [2, 3]]
"""
ret_obj = Basic.__new__(cls, *args, **kw_args)
args = [list(args[0])]
if args[0] and iterable(args[0][0]):
if not args[0][0]:
raise ValueError(
'Prufer expects at least one edge in the tree.')
if len(args) > 1:
nnodes = args[1]
else:
nodes = set(flatten(args[0]))
nnodes = max(nodes) + 1
if nnodes != len(nodes):
missing = set(range(nnodes)) - nodes
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % list(sorted(missing))
raise ValueError(msg)
ret_obj._tree_repr = [list(i) for i in args[0]]
ret_obj._nodes = nnodes
else:
ret_obj._prufer_repr = args[0]
ret_obj._nodes = len(ret_obj._prufer_repr) + 2
return ret_obj
def next(self, delta=1):
"""Generates the Prufer sequence that is delta beyond the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> b = a.next(1) # == a.next()
>>> b.tree_repr
[[0, 2], [0, 1], [1, 3]]
>>> b.rank
1
See Also
========
prufer_rank, rank, prev, size
"""
return Prufer.unrank(self.rank + delta, self.nodes)
def prev(self, delta=1):
"""Generates the Prufer sequence that is -delta before the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [1, 2], [2, 3], [1, 4]])
>>> a.rank
36
>>> b = a.prev()
>>> b
Prufer([1, 2, 0])
>>> b.rank
35
See Also
========
prufer_rank, rank, next, size
"""
return Prufer.unrank(self.rank -delta, self.nodes)
|
4846e43e29ee152580e401e36e793442860c3712fc6f9b6ec78bd27ffad2e462 | """Finitely Presented Groups and its algorithms. """
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.combinatorics.free_groups import (FreeGroup, FreeGroupElement,
free_group)
from sympy.combinatorics.rewritingsystem import RewritingSystem
from sympy.combinatorics.coset_table import (CosetTable,
coset_enumeration_r,
coset_enumeration_c)
from sympy.combinatorics import PermutationGroup
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.magic import pollute
from itertools import product
@public
def fp_group(fr_grp, relators=()):
_fp_group = FpGroup(fr_grp, relators)
return (_fp_group,) + tuple(_fp_group._generators)
@public
def xfp_group(fr_grp, relators=()):
_fp_group = FpGroup(fr_grp, relators)
return (_fp_group, _fp_group._generators)
# Does not work. Both symbols and pollute are undefined. Never tested.
@public
def vfp_group(fr_grpm, relators):
_fp_group = FpGroup(symbols, relators)
pollute([sym.name for sym in _fp_group.symbols], _fp_group.generators)
return _fp_group
def _parse_relators(rels):
"""Parse the passed relators."""
return rels
###############################################################################
# FINITELY PRESENTED GROUPS #
###############################################################################
class FpGroup(DefaultPrinting):
"""
The FpGroup would take a FreeGroup and a list/tuple of relators, the
relators would be specified in such a way that each of them be equal to the
identity of the provided free group.
"""
is_group = True
is_FpGroup = True
is_PermutationGroup = False
def __init__(self, fr_grp, relators):
relators = _parse_relators(relators)
self.free_group = fr_grp
self.relators = relators
self.generators = self._generators()
self.dtype = type("FpGroupElement", (FpGroupElement,), {"group": self})
# CosetTable instance on identity subgroup
self._coset_table = None
# returns whether coset table on identity subgroup
# has been standardized
self._is_standardized = False
self._order = None
self._center = None
self._rewriting_system = RewritingSystem(self)
self._perm_isomorphism = None
return
def _generators(self):
return self.free_group.generators
def make_confluent(self):
'''
Try to make the group's rewriting system confluent
'''
self._rewriting_system.make_confluent()
return
def reduce(self, word):
'''
Return the reduced form of `word` in `self` according to the group's
rewriting system. If it's confluent, the reduced form is the unique normal
form of the word in the group.
'''
return self._rewriting_system.reduce(word)
def equals(self, word1, word2):
'''
Compare `word1` and `word2` for equality in the group
using the group's rewriting system. If the system is
confluent, the returned answer is necessarily correct.
(If it isn't, `False` could be returned in some cases
where in fact `word1 == word2`)
'''
if self.reduce(word1*word2**-1) == self.identity:
return True
elif self._rewriting_system.is_confluent:
return False
return None
@property
def identity(self):
return self.free_group.identity
def __contains__(self, g):
return g in self.free_group
def subgroup(self, gens, C=None, homomorphism=False):
'''
Return the subgroup generated by `gens` using the
Reidemeister-Schreier algorithm
homomorphism -- When set to True, return a dictionary containing the images
of the presentation generators in the original group.
Examples
========
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**5, (x*y)**2])
>>> H = [x*y, x**-1*y**-1*x*y*x]
>>> K, T = f.subgroup(H, homomorphism=True)
>>> T(K.generators)
[x*y, x**-1*y**2*x**-1]
'''
if not all(isinstance(g, FreeGroupElement) for g in gens):
raise ValueError("Generators must be `FreeGroupElement`s")
if not all(g.group == self.free_group for g in gens):
raise ValueError("Given generators are not members of the group")
if homomorphism:
g, rels, _gens = reidemeister_presentation(self, gens, C=C, homomorphism=True)
else:
g, rels = reidemeister_presentation(self, gens, C=C)
if g:
g = FpGroup(g[0].group, rels)
else:
g = FpGroup(free_group('')[0], [])
if homomorphism:
from sympy.combinatorics.homomorphisms import homomorphism
return g, homomorphism(g, self, g.generators, _gens, check=False)
return g
def coset_enumeration(self, H, strategy="relator_based", max_cosets=None,
draft=None, incomplete=False):
"""
Return an instance of ``coset table``, when Todd-Coxeter algorithm is
run over the ``self`` with ``H`` as subgroup, using ``strategy``
argument as strategy. The returned coset table is compressed but not
standardized.
An instance of `CosetTable` for `fp_grp` can be passed as the keyword
argument `draft` in which case the coset enumeration will start with
that instance and attempt to complete it.
When `incomplete` is `True` and the function is unable to complete for
some reason, the partially complete table will be returned.
"""
if not max_cosets:
max_cosets = CosetTable.coset_table_max_limit
if strategy == 'relator_based':
C = coset_enumeration_r(self, H, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
else:
C = coset_enumeration_c(self, H, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
if C.is_complete():
C.compress()
return C
def standardize_coset_table(self):
"""
Standardized the coset table ``self`` and makes the internal variable
``_is_standardized`` equal to ``True``.
"""
self._coset_table.standardize()
self._is_standardized = True
def coset_table(self, H, strategy="relator_based", max_cosets=None,
draft=None, incomplete=False):
"""
Return the mathematical coset table of ``self`` in ``H``.
"""
if not H:
if self._coset_table is not None:
if not self._is_standardized:
self.standardize_coset_table()
else:
C = self.coset_enumeration([], strategy, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
self._coset_table = C
self.standardize_coset_table()
return self._coset_table.table
else:
C = self.coset_enumeration(H, strategy, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
C.standardize()
return C.table
def order(self, strategy="relator_based"):
"""
Returns the order of the finitely presented group ``self``. It uses
the coset enumeration with identity group as subgroup, i.e ``H=[]``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x, y**2])
>>> f.order(strategy="coset_table_based")
2
"""
from sympy.polys.polytools import gcd
if self._order is not None:
return self._order
if self._coset_table is not None:
self._order = len(self._coset_table.table)
elif len(self.relators) == 0:
self._order = self.free_group.order()
elif len(self.generators) == 1:
self._order = abs(gcd([r.array_form[0][1] for r in self.relators]))
elif self._is_infinite():
self._order = S.Infinity
else:
gens, C = self._finite_index_subgroup()
if C:
ind = len(C.table)
self._order = ind*self.subgroup(gens, C=C).order()
else:
self._order = self.index([])
return self._order
def _is_infinite(self):
'''
Test if the group is infinite. Return `True` if the test succeeds
and `None` otherwise
'''
used_gens = set()
for r in self.relators:
used_gens.update(r.contains_generators())
if not set(self.generators) <= used_gens:
return True
# Abelianisation test: check is the abelianisation is infinite
abelian_rels = []
from sympy.matrices.normalforms import invariant_factors
from sympy.matrices import Matrix
for rel in self.relators:
abelian_rels.append([rel.exponent_sum(g) for g in self.generators])
m = Matrix(Matrix(abelian_rels))
if 0 in invariant_factors(m):
return True
else:
return None
def _finite_index_subgroup(self, s=None):
'''
Find the elements of `self` that generate a finite index subgroup
and, if found, return the list of elements and the coset table of `self` by
the subgroup, otherwise return `(None, None)`
'''
gen = self.most_frequent_generator()
rels = list(self.generators)
rels.extend(self.relators)
if not s:
if len(self.generators) == 2:
s = [gen] + [g for g in self.generators if g != gen]
else:
rand = self.free_group.identity
i = 0
while ((rand in rels or rand**-1 in rels or rand.is_identity)
and i<10):
rand = self.random()
i += 1
s = [gen, rand] + [g for g in self.generators if g != gen]
mid = (len(s)+1)//2
half1 = s[:mid]
half2 = s[mid:]
draft1 = None
draft2 = None
m = 200
C = None
while not C and (m/2 < CosetTable.coset_table_max_limit):
m = min(m, CosetTable.coset_table_max_limit)
draft1 = self.coset_enumeration(half1, max_cosets=m,
draft=draft1, incomplete=True)
if draft1.is_complete():
C = draft1
half = half1
else:
draft2 = self.coset_enumeration(half2, max_cosets=m,
draft=draft2, incomplete=True)
if draft2.is_complete():
C = draft2
half = half2
if not C:
m *= 2
if not C:
return None, None
C.compress()
return half, C
def most_frequent_generator(self):
gens = self.generators
rels = self.relators
freqs = [sum([r.generator_count(g) for r in rels]) for g in gens]
return gens[freqs.index(max(freqs))]
def random(self):
import random
r = self.free_group.identity
for i in range(random.randint(2,3)):
r = r*random.choice(self.generators)**random.choice([1,-1])
return r
def index(self, H, strategy="relator_based"):
"""
Return the index of subgroup ``H`` in group ``self``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**5, y**4, y*x*y**3*x**3])
>>> f.index([x])
4
"""
# TODO: use |G:H| = |G|/|H| (currently H can't be made into a group)
# when we know |G| and |H|
if H == []:
return self.order()
else:
C = self.coset_enumeration(H, strategy)
return len(C.table)
def __str__(self):
if self.free_group.rank > 30:
str_form = "<fp group with %s generators>" % self.free_group.rank
else:
str_form = "<fp group on the generators %s>" % str(self.generators)
return str_form
__repr__ = __str__
#==============================================================================
# PERMUTATION GROUP METHODS
#==============================================================================
def _to_perm_group(self):
'''
Return an isomorphic permutation group and the isomorphism.
The implementation is dependent on coset enumeration so
will only terminate for finite groups.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.homomorphisms import homomorphism
if self.order() is S.Infinity:
raise NotImplementedError("Permutation presentation of infinite "
"groups is not implemented")
if self._perm_isomorphism:
T = self._perm_isomorphism
P = T.image()
else:
C = self.coset_table([])
gens = self.generators
images = [[C[i][2*gens.index(g)] for i in range(len(C))] for g in gens]
images = [Permutation(i) for i in images]
P = PermutationGroup(images)
T = homomorphism(self, P, gens, images, check=False)
self._perm_isomorphism = T
return P, T
def _perm_group_list(self, method_name, *args):
'''
Given the name of a `PermutationGroup` method (returning a subgroup
or a list of subgroups) and (optionally) additional arguments it takes,
return a list or a list of lists containing the generators of this (or
these) subgroups in terms of the generators of `self`.
'''
P, T = self._to_perm_group()
perm_result = getattr(P, method_name)(*args)
single = False
if isinstance(perm_result, PermutationGroup):
perm_result, single = [perm_result], True
result = []
for group in perm_result:
gens = group.generators
result.append(T.invert(gens))
return result[0] if single else result
def derived_series(self):
'''
Return the list of lists containing the generators
of the subgroups in the derived series of `self`.
'''
return self._perm_group_list('derived_series')
def lower_central_series(self):
'''
Return the list of lists containing the generators
of the subgroups in the lower central series of `self`.
'''
return self._perm_group_list('lower_central_series')
def center(self):
'''
Return the list of generators of the center of `self`.
'''
return self._perm_group_list('center')
def derived_subgroup(self):
'''
Return the list of generators of the derived subgroup of `self`.
'''
return self._perm_group_list('derived_subgroup')
def centralizer(self, other):
'''
Return the list of generators of the centralizer of `other`
(a list of elements of `self`) in `self`.
'''
T = self._to_perm_group()[1]
other = T(other)
return self._perm_group_list('centralizer', other)
def normal_closure(self, other):
'''
Return the list of generators of the normal closure of `other`
(a list of elements of `self`) in `self`.
'''
T = self._to_perm_group()[1]
other = T(other)
return self._perm_group_list('normal_closure', other)
def _perm_property(self, attr):
'''
Given an attribute of a `PermutationGroup`, return
its value for a permutation group isomorphic to `self`.
'''
P = self._to_perm_group()[0]
return getattr(P, attr)
@property
def is_abelian(self):
'''
Check if `self` is abelian.
'''
return self._perm_property("is_abelian")
@property
def is_nilpotent(self):
'''
Check if `self` is nilpotent.
'''
return self._perm_property("is_nilpotent")
@property
def is_solvable(self):
'''
Check if `self` is solvable.
'''
return self._perm_property("is_solvable")
@property
def elements(self):
'''
List the elements of `self`.
'''
P, T = self._to_perm_group()
return T.invert(P._elements)
@property
def is_cyclic(self):
"""
Return ``True`` if group is Cyclic.
"""
if len(self.generators) <= 1:
return True
try:
P, T = self._to_perm_group()
except NotImplementedError:
raise NotImplementedError("Check for infinite Cyclic group "
"is not implemented")
return P.is_cyclic
def abelian_invariants(self):
"""
Return Abelian Invariants of a group.
"""
try:
P, T = self._to_perm_group()
except NotImplementedError:
raise NotImplementedError("abelian invariants is not implemented"
"for infinite group")
return P.abelian_invariants()
def composition_series(self):
"""
Return subnormal series of maximum length for a group.
"""
try:
P, T = self._to_perm_group()
except NotImplementedError:
raise NotImplementedError("composition series is not implemented"
"for infinite group")
return P.composition_series()
class FpSubgroup(DefaultPrinting):
'''
The class implementing a subgroup of an FpGroup or a FreeGroup
(only finite index subgroups are supported at this point). This
is to be used if one wishes to check if an element of the original
group belongs to the subgroup
'''
def __init__(self, G, gens, normal=False):
super().__init__()
self.parent = G
self.generators = list({g for g in gens if g != G.identity})
self._min_words = None #for use in __contains__
self.C = None
self.normal = normal
def __contains__(self, g):
if isinstance(self.parent, FreeGroup):
if self._min_words is None:
# make _min_words - a list of subwords such that
# g is in the subgroup if and only if it can be
# partitioned into these subwords. Infinite families of
# subwords are presented by tuples, e.g. (r, w)
# stands for the family of subwords r*w**n*r**-1
def _process(w):
# this is to be used before adding new words
# into _min_words; if the word w is not cyclically
# reduced, it will generate an infinite family of
# subwords so should be written as a tuple;
# if it is, w**-1 should be added to the list
# as well
p, r = w.cyclic_reduction(removed=True)
if not r.is_identity:
return [(r, p)]
else:
return [w, w**-1]
# make the initial list
gens = []
for w in self.generators:
if self.normal:
w = w.cyclic_reduction()
gens.extend(_process(w))
for w1 in gens:
for w2 in gens:
# if w1 and w2 are equal or are inverses, continue
if w1 == w2 or (not isinstance(w1, tuple)
and w1**-1 == w2):
continue
# if the start of one word is the inverse of the
# end of the other, their multiple should be added
# to _min_words because of cancellation
if isinstance(w1, tuple):
# start, end
s1, s2 = w1[0][0], w1[0][0]**-1
else:
s1, s2 = w1[0], w1[len(w1)-1]
if isinstance(w2, tuple):
# start, end
r1, r2 = w2[0][0], w2[0][0]**-1
else:
r1, r2 = w2[0], w2[len(w1)-1]
# p1 and p2 are w1 and w2 or, in case when
# w1 or w2 is an infinite family, a representative
p1, p2 = w1, w2
if isinstance(w1, tuple):
p1 = w1[0]*w1[1]*w1[0]**-1
if isinstance(w2, tuple):
p2 = w2[0]*w2[1]*w2[0]**-1
# add the product of the words to the list is necessary
if r1**-1 == s2 and not (p1*p2).is_identity:
new = _process(p1*p2)
if not new in gens:
gens.extend(new)
if r2**-1 == s1 and not (p2*p1).is_identity:
new = _process(p2*p1)
if not new in gens:
gens.extend(new)
self._min_words = gens
min_words = self._min_words
def _is_subword(w):
# check if w is a word in _min_words or one of
# the infinite families in it
w, r = w.cyclic_reduction(removed=True)
if r.is_identity or self.normal:
return w in min_words
else:
t = [s[1] for s in min_words if isinstance(s, tuple)
and s[0] == r]
return [s for s in t if w.power_of(s)] != []
# store the solution of words for which the result of
# _word_break (below) is known
known = {}
def _word_break(w):
# check if w can be written as a product of words
# in min_words
if len(w) == 0:
return True
i = 0
while i < len(w):
i += 1
prefix = w.subword(0, i)
if not _is_subword(prefix):
continue
rest = w.subword(i, len(w))
if rest not in known:
known[rest] = _word_break(rest)
if known[rest]:
return True
return False
if self.normal:
g = g.cyclic_reduction()
return _word_break(g)
else:
if self.C is None:
C = self.parent.coset_enumeration(self.generators)
self.C = C
i = 0
C = self.C
for j in range(len(g)):
i = C.table[i][C.A_dict[g[j]]]
return i == 0
def order(self):
if not self.generators:
return S.One
if isinstance(self.parent, FreeGroup):
return S.Infinity
if self.C is None:
C = self.parent.coset_enumeration(self.generators)
self.C = C
# This is valid because `len(self.C.table)` (the index of the subgroup)
# will always be finite - otherwise coset enumeration doesn't terminate
return self.parent.order()/len(self.C.table)
def to_FpGroup(self):
if isinstance(self.parent, FreeGroup):
gen_syms = [('x_%d'%i) for i in range(len(self.generators))]
return free_group(', '.join(gen_syms))[0]
return self.parent.subgroup(C=self.C)
def __str__(self):
if len(self.generators) > 30:
str_form = "<fp subgroup with %s generators>" % len(self.generators)
else:
str_form = "<fp subgroup on the generators %s>" % str(self.generators)
return str_form
__repr__ = __str__
###############################################################################
# LOW INDEX SUBGROUPS #
###############################################################################
def low_index_subgroups(G, N, Y=()):
"""
Implements the Low Index Subgroups algorithm, i.e find all subgroups of
``G`` upto a given index ``N``. This implements the method described in
[Sim94]. This procedure involves a backtrack search over incomplete Coset
Tables, rather than over forced coincidences.
Parameters
==========
G: An FpGroup < X|R >
N: positive integer, representing the maximum index value for subgroups
Y: (an optional argument) specifying a list of subgroup generators, such
that each of the resulting subgroup contains the subgroup generated by Y.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, low_index_subgroups
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**4])
>>> L = low_index_subgroups(f, 4)
>>> for coset_table in L:
... print(coset_table.table)
[[0, 0, 0, 0]]
[[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]]
[[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]]
[[1, 1, 0, 0], [0, 0, 1, 1]]
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 5.4
.. [2] Marston Conder and Peter Dobcsanyi
"Applications and Adaptions of the Low Index Subgroups Procedure"
"""
C = CosetTable(G, [])
R = G.relators
# length chosen for the length of the short relators
len_short_rel = 5
# elements of R2 only checked at the last step for complete
# coset tables
R2 = {rel for rel in R if len(rel) > len_short_rel}
# elements of R1 are used in inner parts of the process to prune
# branches of the search tree,
R1 = {rel.identity_cyclic_reduction() for rel in set(R) - R2}
R1_c_list = C.conjugates(R1)
S = []
descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y)
return S
def descendant_subgroups(S, C, R1_c_list, x, R2, N, Y):
A_dict = C.A_dict
A_dict_inv = C.A_dict_inv
if C.is_complete():
# if C is complete then it only needs to test
# whether the relators in R2 are satisfied
for w, alpha in product(R2, C.omega):
if not C.scan_check(alpha, w):
return
# relators in R2 are satisfied, append the table to list
S.append(C)
else:
# find the first undefined entry in Coset Table
for alpha, x in product(range(len(C.table)), C.A):
if C.table[alpha][A_dict[x]] is None:
# this is "x" in pseudo-code (using "y" makes it clear)
undefined_coset, undefined_gen = alpha, x
break
# for filling up the undefine entry we try all possible values
# of beta in Omega or beta = n where beta^(undefined_gen^-1) is undefined
reach = C.omega + [C.n]
for beta in reach:
if beta < N:
if beta == C.n or C.table[beta][A_dict_inv[undefined_gen]] is None:
try_descendant(S, C, R1_c_list, R2, N, undefined_coset, \
undefined_gen, beta, Y)
def try_descendant(S, C, R1_c_list, R2, N, alpha, x, beta, Y):
r"""
Solves the problem of trying out each individual possibility
for `\alpha^x.
"""
D = C.copy()
if beta == D.n and beta < N:
D.table.append([None]*len(D.A))
D.p.append(beta)
D.table[alpha][D.A_dict[x]] = beta
D.table[beta][D.A_dict_inv[x]] = alpha
D.deduction_stack.append((alpha, x))
if not D.process_deductions_check(R1_c_list[D.A_dict[x]], \
R1_c_list[D.A_dict_inv[x]]):
return
for w in Y:
if not D.scan_check(0, w):
return
if first_in_class(D, Y):
descendant_subgroups(S, D, R1_c_list, x, R2, N, Y)
def first_in_class(C, Y=()):
"""
Checks whether the subgroup ``H=G1`` corresponding to the Coset Table
could possibly be the canonical representative of its conjugacy class.
Parameters
==========
C: CosetTable
Returns
=======
bool: True/False
If this returns False, then no descendant of C can have that property, and
so we can abandon C. If it returns True, then we need to process further
the node of the search tree corresponding to C, and so we call
``descendant_subgroups`` recursively on C.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, first_in_class
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**4])
>>> C = CosetTable(f, [])
>>> C.table = [[0, 0, None, None]]
>>> first_in_class(C)
True
>>> C.table = [[1, 1, 1, None], [0, 0, None, 1]]; C.p = [0, 1]
>>> first_in_class(C)
True
>>> C.table = [[1, 1, 2, 1], [0, 0, 0, None], [None, None, None, 0]]
>>> C.p = [0, 1, 2]
>>> first_in_class(C)
False
>>> C.table = [[1, 1, 1, 2], [0, 0, 2, 0], [2, None, 0, 1]]
>>> first_in_class(C)
False
# TODO:: Sims points out in [Sim94] that performance can be improved by
# remembering some of the information computed by ``first_in_class``. If
# the ``continue alpha`` statement is executed at line 14, then the same thing
# will happen for that value of alpha in any descendant of the table C, and so
# the values the values of alpha for which this occurs could profitably be
# stored and passed through to the descendants of C. Of course this would
# make the code more complicated.
# The code below is taken directly from the function on page 208 of [Sim94]
# nu[alpha]
"""
n = C.n
# lamda is the largest numbered point in Omega_c_alpha which is currently defined
lamda = -1
# for alpha in Omega_c, nu[alpha] is the point in Omega_c_alpha corresponding to alpha
nu = [None]*n
# for alpha in Omega_c_alpha, mu[alpha] is the point in Omega_c corresponding to alpha
mu = [None]*n
# mutually nu and mu are the mutually-inverse equivalence maps between
# Omega_c_alpha and Omega_c
next_alpha = False
# For each 0!=alpha in [0 .. nc-1], we start by constructing the equivalent
# standardized coset table C_alpha corresponding to H_alpha
for alpha in range(1, n):
# reset nu to "None" after previous value of alpha
for beta in range(lamda+1):
nu[mu[beta]] = None
# we only want to reject our current table in favour of a preceding
# table in the ordering in which 1 is replaced by alpha, if the subgroup
# G_alpha corresponding to this preceding table definitely contains the
# given subgroup
for w in Y:
# TODO: this should support input of a list of general words
# not just the words which are in "A" (i.e gen and gen^-1)
if C.table[alpha][C.A_dict[w]] != alpha:
# continue with alpha
next_alpha = True
break
if next_alpha:
next_alpha = False
continue
# try alpha as the new point 0 in Omega_C_alpha
mu[0] = alpha
nu[alpha] = 0
# compare corresponding entries in C and C_alpha
lamda = 0
for beta in range(n):
for x in C.A:
gamma = C.table[beta][C.A_dict[x]]
delta = C.table[mu[beta]][C.A_dict[x]]
# if either of the entries is undefined,
# we move with next alpha
if gamma is None or delta is None:
# continue with alpha
next_alpha = True
break
if nu[delta] is None:
# delta becomes the next point in Omega_C_alpha
lamda += 1
nu[delta] = lamda
mu[lamda] = delta
if nu[delta] < gamma:
return False
if nu[delta] > gamma:
# continue with alpha
next_alpha = True
break
if next_alpha:
next_alpha = False
break
return True
#========================================================================
# Simplifying Presentation
#========================================================================
def simplify_presentation(*args, change_gens=False):
'''
For an instance of `FpGroup`, return a simplified isomorphic copy of
the group (e.g. remove redundant generators or relators). Alternatively,
a list of generators and relators can be passed in which case the
simplified lists will be returned.
By default, the generators of the group are unchanged. If you would
like to remove redundant generators, set the keyword argument
`change_gens = True`.
'''
if len(args) == 1:
if not isinstance(args[0], FpGroup):
raise TypeError("The argument must be an instance of FpGroup")
G = args[0]
gens, rels = simplify_presentation(G.generators, G.relators,
change_gens=change_gens)
if gens:
return FpGroup(gens[0].group, rels)
return FpGroup(FreeGroup([]), [])
elif len(args) == 2:
gens, rels = args[0][:], args[1][:]
if not gens:
return gens, rels
identity = gens[0].group.identity
else:
if len(args) == 0:
m = "Not enough arguments"
else:
m = "Too many arguments"
raise RuntimeError(m)
prev_gens = []
prev_rels = []
while not set(prev_rels) == set(rels):
prev_rels = rels
while change_gens and not set(prev_gens) == set(gens):
prev_gens = gens
gens, rels = elimination_technique_1(gens, rels, identity)
rels = _simplify_relators(rels, identity)
if change_gens:
syms = [g.array_form[0][0] for g in gens]
F = free_group(syms)[0]
identity = F.identity
gens = F.generators
subs = dict(zip(syms, gens))
for j, r in enumerate(rels):
a = r.array_form
rel = identity
for sym, p in a:
rel = rel*subs[sym]**p
rels[j] = rel
return gens, rels
def _simplify_relators(rels, identity):
"""Relies upon ``_simplification_technique_1`` for its functioning. """
rels = rels[:]
rels = list(set(_simplification_technique_1(rels)))
rels.sort()
rels = [r.identity_cyclic_reduction() for r in rels]
try:
rels.remove(identity)
except ValueError:
pass
return rels
# Pg 350, section 2.5.1 from [2]
def elimination_technique_1(gens, rels, identity):
rels = rels[:]
# the shorter relators are examined first so that generators selected for
# elimination will have shorter strings as equivalent
rels.sort()
gens = gens[:]
redundant_gens = {}
redundant_rels = []
used_gens = set()
# examine each relator in relator list for any generator occurring exactly
# once
for rel in rels:
# don't look for a redundant generator in a relator which
# depends on previously found ones
contained_gens = rel.contains_generators()
if any(g in contained_gens for g in redundant_gens):
continue
contained_gens = list(contained_gens)
contained_gens.sort(reverse = True)
for gen in contained_gens:
if rel.generator_count(gen) == 1 and gen not in used_gens:
k = rel.exponent_sum(gen)
gen_index = rel.index(gen**k)
bk = rel.subword(gen_index + 1, len(rel))
fw = rel.subword(0, gen_index)
chi = bk*fw
redundant_gens[gen] = chi**(-1*k)
used_gens.update(chi.contains_generators())
redundant_rels.append(rel)
break
rels = [r for r in rels if r not in redundant_rels]
# eliminate the redundant generators from remaining relators
rels = [r.eliminate_words(redundant_gens, _all = True).identity_cyclic_reduction() for r in rels]
rels = list(set(rels))
try:
rels.remove(identity)
except ValueError:
pass
gens = [g for g in gens if g not in redundant_gens]
return gens, rels
def _simplification_technique_1(rels):
"""
All relators are checked to see if they are of the form `gen^n`. If any
such relators are found then all other relators are processed for strings
in the `gen` known order.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import _simplification_technique_1
>>> F, x, y = free_group("x, y")
>>> w1 = [x**2*y**4, x**3]
>>> _simplification_technique_1(w1)
[x**-1*y**4, x**3]
>>> w2 = [x**2*y**-4*x**5, x**3, x**2*y**8, y**5]
>>> _simplification_technique_1(w2)
[x**-1*y*x**-1, x**3, x**-1*y**-2, y**5]
>>> w3 = [x**6*y**4, x**4]
>>> _simplification_technique_1(w3)
[x**2*y**4, x**4]
"""
from sympy.polys.polytools import gcd
rels = rels[:]
# dictionary with "gen: n" where gen^n is one of the relators
exps = {}
for i in range(len(rels)):
rel = rels[i]
if rel.number_syllables() == 1:
g = rel[0]
exp = abs(rel.array_form[0][1])
if rel.array_form[0][1] < 0:
rels[i] = rels[i]**-1
g = g**-1
if g in exps:
exp = gcd(exp, exps[g].array_form[0][1])
exps[g] = g**exp
one_syllables_words = exps.values()
# decrease some of the exponents in relators, making use of the single
# syllable relators
for i in range(len(rels)):
rel = rels[i]
if rel in one_syllables_words:
continue
rel = rel.eliminate_words(one_syllables_words, _all = True)
# if rels[i] contains g**n where abs(n) is greater than half of the power p
# of g in exps, g**n can be replaced by g**(n-p) (or g**(p-n) if n<0)
for g in rel.contains_generators():
if g in exps:
exp = exps[g].array_form[0][1]
max_exp = (exp + 1)//2
rel = rel.eliminate_word(g**(max_exp), g**(max_exp-exp), _all = True)
rel = rel.eliminate_word(g**(-max_exp), g**(-(max_exp-exp)), _all = True)
rels[i] = rel
rels = [r.identity_cyclic_reduction() for r in rels]
return rels
###############################################################################
# SUBGROUP PRESENTATIONS #
###############################################################################
# Pg 175 [1]
def define_schreier_generators(C, homomorphism=False):
'''
Parameters
==========
C -- Coset table.
homomorphism -- When set to True, return a dictionary containing the images
of the presentation generators in the original group.
'''
y = []
gamma = 1
f = C.fp_group
X = f.generators
if homomorphism:
# `_gens` stores the elements of the parent group to
# to which the schreier generators correspond to.
_gens = {}
# compute the schreier Traversal
tau = {}
tau[0] = f.identity
C.P = [[None]*len(C.A) for i in range(C.n)]
for alpha, x in product(C.omega, C.A):
beta = C.table[alpha][C.A_dict[x]]
if beta == gamma:
C.P[alpha][C.A_dict[x]] = "<identity>"
C.P[beta][C.A_dict_inv[x]] = "<identity>"
gamma += 1
if homomorphism:
tau[beta] = tau[alpha]*x
elif x in X and C.P[alpha][C.A_dict[x]] is None:
y_alpha_x = '%s_%s' % (x, alpha)
y.append(y_alpha_x)
C.P[alpha][C.A_dict[x]] = y_alpha_x
if homomorphism:
_gens[y_alpha_x] = tau[alpha]*x*tau[beta]**-1
grp_gens = list(free_group(', '.join(y)))
C._schreier_free_group = grp_gens.pop(0)
C._schreier_generators = grp_gens
if homomorphism:
C._schreier_gen_elem = _gens
# replace all elements of P by, free group elements
for i, j in product(range(len(C.P)), range(len(C.A))):
# if equals "<identity>", replace by identity element
if C.P[i][j] == "<identity>":
C.P[i][j] = C._schreier_free_group.identity
elif isinstance(C.P[i][j], str):
r = C._schreier_generators[y.index(C.P[i][j])]
C.P[i][j] = r
beta = C.table[i][j]
C.P[beta][j + 1] = r**-1
def reidemeister_relators(C):
R = C.fp_group.relators
rels = [rewrite(C, coset, word) for word in R for coset in range(C.n)]
order_1_gens = {i for i in rels if len(i) == 1}
# remove all the order 1 generators from relators
rels = list(filter(lambda rel: rel not in order_1_gens, rels))
# replace order 1 generators by identity element in reidemeister relators
for i in range(len(rels)):
w = rels[i]
w = w.eliminate_words(order_1_gens, _all=True)
rels[i] = w
C._schreier_generators = [i for i in C._schreier_generators
if not (i in order_1_gens or i**-1 in order_1_gens)]
# Tietze transformation 1 i.e TT_1
# remove cyclic conjugate elements from relators
i = 0
while i < len(rels):
w = rels[i]
j = i + 1
while j < len(rels):
if w.is_cyclic_conjugate(rels[j]):
del rels[j]
else:
j += 1
i += 1
C._reidemeister_relators = rels
def rewrite(C, alpha, w):
"""
Parameters
==========
C: CosetTable
alpha: A live coset
w: A word in `A*`
Returns
=======
rho(tau(alpha), w)
Examples
========
>>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, define_schreier_generators, rewrite
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**6])
>>> C = CosetTable(f, [])
>>> C.table = [[1, 1, 2, 3], [0, 0, 4, 5], [4, 4, 3, 0], [5, 5, 0, 2], [2, 2, 5, 1], [3, 3, 1, 4]]
>>> C.p = [0, 1, 2, 3, 4, 5]
>>> define_schreier_generators(C)
>>> rewrite(C, 0, (x*y)**6)
x_4*y_2*x_3*x_1*x_2*y_4*x_5
"""
v = C._schreier_free_group.identity
for i in range(len(w)):
x_i = w[i]
v = v*C.P[alpha][C.A_dict[x_i]]
alpha = C.table[alpha][C.A_dict[x_i]]
return v
# Pg 350, section 2.5.2 from [2]
def elimination_technique_2(C):
"""
This technique eliminates one generator at a time. Heuristically this
seems superior in that we may select for elimination the generator with
shortest equivalent string at each stage.
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r, \
reidemeister_relators, define_schreier_generators, elimination_technique_2
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**5, (x*y)**2]); H = [x*y, x**-1*y**-1*x*y*x]
>>> C = coset_enumeration_r(f, H)
>>> C.compress(); C.standardize()
>>> define_schreier_generators(C)
>>> reidemeister_relators(C)
>>> elimination_technique_2(C)
([y_1, y_2], [y_2**-3, y_2*y_1*y_2*y_1*y_2*y_1, y_1**2])
"""
rels = C._reidemeister_relators
rels.sort(reverse=True)
gens = C._schreier_generators
for i in range(len(gens) - 1, -1, -1):
rel = rels[i]
for j in range(len(gens) - 1, -1, -1):
gen = gens[j]
if rel.generator_count(gen) == 1:
k = rel.exponent_sum(gen)
gen_index = rel.index(gen**k)
bk = rel.subword(gen_index + 1, len(rel))
fw = rel.subword(0, gen_index)
rep_by = (bk*fw)**(-1*k)
del rels[i]; del gens[j]
for l in range(len(rels)):
rels[l] = rels[l].eliminate_word(gen, rep_by)
break
C._reidemeister_relators = rels
C._schreier_generators = gens
return C._schreier_generators, C._reidemeister_relators
def reidemeister_presentation(fp_grp, H, C=None, homomorphism=False):
"""
Parameters
==========
fp_group: A finitely presented group, an instance of FpGroup
H: A subgroup whose presentation is to be found, given as a list
of words in generators of `fp_grp`
homomorphism: When set to True, return a homomorphism from the subgroup
to the parent group
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, reidemeister_presentation
>>> F, x, y = free_group("x, y")
Example 5.6 Pg. 177 from [1]
>>> f = FpGroup(F, [x**3, y**5, (x*y)**2])
>>> H = [x*y, x**-1*y**-1*x*y*x]
>>> reidemeister_presentation(f, H)
((y_1, y_2), (y_1**2, y_2**3, y_2*y_1*y_2*y_1*y_2*y_1))
Example 5.8 Pg. 183 from [1]
>>> f = FpGroup(F, [x**3, y**3, (x*y)**3])
>>> H = [x*y, x*y**-1]
>>> reidemeister_presentation(f, H)
((x_0, y_0), (x_0**3, y_0**3, x_0*y_0*x_0*y_0*x_0*y_0))
Exercises Q2. Pg 187 from [1]
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> H = [x]
>>> reidemeister_presentation(f, H)
((x_0,), (x_0**4,))
Example 5.9 Pg. 183 from [1]
>>> f = FpGroup(F, [x**3*y**-3, (x*y)**3, (x*y**-1)**2])
>>> H = [x]
>>> reidemeister_presentation(f, H)
((x_0,), (x_0**6,))
"""
if not C:
C = coset_enumeration_r(fp_grp, H)
C.compress(); C.standardize()
define_schreier_generators(C, homomorphism=homomorphism)
reidemeister_relators(C)
gens, rels = C._schreier_generators, C._reidemeister_relators
gens, rels = simplify_presentation(gens, rels, change_gens=True)
C.schreier_generators = tuple(gens)
C.reidemeister_relators = tuple(rels)
if homomorphism:
_gens = []
for gen in gens:
_gens.append(C._schreier_gen_elem[str(gen)])
return C.schreier_generators, C.reidemeister_relators, _gens
return C.schreier_generators, C.reidemeister_relators
FpGroupElement = FreeGroupElement
|
c81156a7015a561bfcfa65f2872b0eca3e6ccf77fbe5e5ef4bd3c3054856f7d0 | from typing import Tuple as tTuple
from .expr_with_intlimits import ExprWithIntLimits
from .summations import Sum, summation, _dummy_with_inherited_properties_concrete
from sympy.core.expr import Expr
from sympy.core.exprtools import factor_terms
from sympy.core.function import Derivative
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol
from sympy.functions.combinatorial.factorials import RisingFactorial
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.polys import quo, roots
from sympy.simplify.powsimp import powsimp
from sympy.simplify.simplify import product_simplify
class Product(ExprWithIntLimits):
r"""
Represents unevaluated products.
Explanation
===========
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, oo
>>> Product(k, (k, 1, m))
Product(k, (k, 1, m))
>>> Product(k, (k, 1, m)).doit()
factorial(m)
>>> Product(k**2,(k, 1, m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k, 1, m)).doit()
factorial(m)**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
4**n*factorial(n)**2/(2**(2*n)*RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import combsimp, pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 - pi/2, n)*RisingFactorial(1 + pi/2, n)/(2*factorial(n)**2)
>>> limit(Pe, n, oo).gammasimp()
sin(pi**2/2)
>>> Pe.rewrite(gamma)
(-1)**n*pi**2*gamma(pi/2)*gamma(n + 1 + pi/2)/(2*gamma(1 + pi/2)*gamma(-n + pi/2)*gamma(n + 1)**2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> combsimp(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] https://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] https://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ('is_commutative',)
limits: tTuple[tTuple[Symbol, Expr, Expr]]
def __new__(cls, function, *symbols, **assumptions):
obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def _eval_rewrite_as_Sum(self, *args, **kwargs):
return exp(Sum(log(self.function), *self.limits))
@property
def term(self):
return self._args[0]
function = term
def _eval_is_zero(self):
if self.has_empty_sequence:
return False
z = self.term.is_zero
if z is True:
return True
if self.has_finite_limits:
# A Product is zero only if its term is zero assuming finite limits.
return z
def _eval_is_extended_real(self):
if self.has_empty_sequence:
return True
return self.function.is_extended_real
def _eval_is_positive(self):
if self.has_empty_sequence:
return True
if self.function.is_positive and self.has_finite_limits:
return True
def _eval_is_nonnegative(self):
if self.has_empty_sequence:
return True
if self.function.is_nonnegative and self.has_finite_limits:
return True
def _eval_is_extended_nonnegative(self):
if self.has_empty_sequence:
return True
if self.function.is_extended_nonnegative:
return True
def _eval_is_extended_nonpositive(self):
if self.has_empty_sequence:
return True
def _eval_is_finite(self):
if self.has_finite_limits and self.function.is_finite:
return True
def doit(self, **hints):
# first make sure any definite limits have product
# variables with matching assumptions
reps = {}
for xab in self.limits:
d = _dummy_with_inherited_properties_concrete(xab)
if d:
reps[xab[0]] = d
if reps:
undo = {v: k for k, v in reps.items()}
did = self.xreplace(reps).doit(**hints)
if isinstance(did, tuple): # when separate=True
did = tuple([i.xreplace(undo) for i in did])
else:
did = did.xreplace(undo)
return did
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_integer and dif.is_negative:
a, b = b + 1, a - 1
f = 1 / f
g = self._eval_product(f, (i, a, b))
if g in (None, S.NaN):
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
(k, a, n) = limits
if k not in term.free_symbols:
if (term - 1).is_zero:
return S.One
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
from .delta import deltaproduct, _has_simple_delta
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
definite = dif.is_Integer
if definite and (dif < 100):
return self._eval_product_direct(term, limits)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly)
M = 0
for r, m in all_roots.items():
M += m
A *= RisingFactorial(a - r, n - a + 1)**m
Q *= (n - r)**m
if M < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
factored = factor_terms(term, fraction=True)
if factored.is_Mul:
return self._eval_product(factored, (k, a, n))
elif term.is_Mul:
# Factor in part without the summation variable and part with
without_k, with_k = term.as_coeff_mul(k)
if len(with_k) >= 2:
# More than one term including k, so still a multiplication
exclude, include = [], []
for t in with_k:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return without_k**(n - a + 1)*A * B
else:
# Just a single term
p = self._eval_product(with_k[0], (k, a, n))
if p is None:
p = self.func(with_k[0], (k, a, n)).doit()
return without_k**(n - a + 1)*p
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
if definite:
return self._eval_product_direct(term, limits)
def _eval_simplify(self, **kwargs):
rv = product_simplify(self)
return rv.doit() if kwargs['doit'] else rv
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_product_direct(self, term, limits):
(k, a, n) = limits
return Mul(*[term.subs(k, a + i) for i in range(n - a + 1)])
def _eval_derivative(self, x):
if isinstance(x, Symbol) and x not in self.free_symbols:
return S.Zero
f, limits = self.function, list(self.limits)
limit = limits.pop(-1)
if limits:
f = self.func(f, *limits)
i, a, b = limit
if x in a.free_symbols or x in b.free_symbols:
return None
h = Dummy()
rv = Sum( Product(f, (i, a, h - 1)) * Product(f, (i, h + 1, b)) * Derivative(f, x, evaluate=True).subs(i, h), (h, a, b))
return rv
def is_convergent(self):
r"""
See docs of :obj:`.Sum.is_convergent()` for explanation of convergence
in SymPy.
Explanation
===========
The infinite product:
.. math::
\prod_{1 \leq i < \infty} f(i)
is defined by the sequence of partial products:
.. math::
\prod_{i=1}^{n} f(i) = f(1) f(2) \cdots f(n)
as n increases without bound. The product converges to a non-zero
value if and only if the sum:
.. math::
\sum_{1 \leq i < \infty} \log{f(n)}
converges.
Examples
========
>>> from sympy import Product, Symbol, cos, pi, exp, oo
>>> n = Symbol('n', integer=True)
>>> Product(n/(n + 1), (n, 1, oo)).is_convergent()
False
>>> Product(1/n**2, (n, 1, oo)).is_convergent()
False
>>> Product(cos(pi/n), (n, 1, oo)).is_convergent()
True
>>> Product(exp(-n**2), (n, 1, oo)).is_convergent()
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Infinite_product
"""
sequence_term = self.function
log_sum = log(sequence_term)
lim = self.limits
try:
is_conv = Sum(log_sum, *lim).is_convergent()
except NotImplementedError:
if Sum(sequence_term - 1, *lim).is_absolutely_convergent() is S.true:
return S.true
raise NotImplementedError("The algorithm to find the product convergence of %s "
"is not yet implemented" % (sequence_term))
return is_conv
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Product.
Explanation
===========
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import gamma, Product, simplify, Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> P = Product(x, (x, a, b))
>>> Pr = P.reverse_order(x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr.rewrite(gamma))
Piecewise((gamma(b + 1)/gamma(a), b > -1), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P.rewrite(gamma))
Piecewise((gamma(b + 1)/gamma(a), a > 0), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x*y, (x, a, b), (y, c, d))
>>> S
Sum(x*y, (x, a, b), (y, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index,
reorder_limit,
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = expr.index(indx)
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Product(expr.function ** e, *limits)
def product(*args, **kwargs):
r"""
Compute the product.
Explanation
===========
The notation for symbols is similar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
Examples
========
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
|
872205c9beb3acbc41021572372a148c938ae7fbbf92aacfa109a937b06a169f | """
This module implements sums and products containing the Kronecker Delta function.
References
==========
.. [1] http://mathworld.wolfram.com/KroneckerDelta.html
"""
from .products import product
from .summations import Sum, summation
from sympy.core import Add, Mul, S, Dummy
from sympy.core.cache import cacheit
from sympy.core.sorting import default_sort_key
from sympy.functions import KroneckerDelta, Piecewise, piecewise_fold
from sympy.polys.polytools import factor
from sympy.sets.sets import Interval
from sympy.solvers.solvers import solve
@cacheit
def _expand_delta(expr, index):
"""
Expand the first Add containing a simple KroneckerDelta.
"""
if not expr.is_Mul:
return expr
delta = None
func = Add
terms = [S.One]
for h in expr.args:
if delta is None and h.is_Add and _has_simple_delta(h, index):
delta = True
func = h.func
terms = [terms[0]*t for t in h.args]
else:
terms = [t*h for t in terms]
return func(*terms)
@cacheit
def _extract_delta(expr, index):
"""
Extract a simple KroneckerDelta from the expression.
Explanation
===========
Returns the tuple ``(delta, newexpr)`` where:
- ``delta`` is a simple KroneckerDelta expression if one was found,
or ``None`` if no simple KroneckerDelta expression was found.
- ``newexpr`` is a Mul containing the remaining terms; ``expr`` is
returned unchanged if no simple KroneckerDelta expression was found.
Examples
========
>>> from sympy import KroneckerDelta
>>> from sympy.concrete.delta import _extract_delta
>>> from sympy.abc import x, y, i, j, k
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), i)
(KroneckerDelta(i, j), 4*x*y)
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), k)
(None, 4*x*y*KroneckerDelta(i, j))
See Also
========
sympy.functions.special.tensor_functions.KroneckerDelta
deltaproduct
deltasummation
"""
if not _has_simple_delta(expr, index):
return (None, expr)
if isinstance(expr, KroneckerDelta):
return (expr, S.One)
if not expr.is_Mul:
raise ValueError("Incorrect expr")
delta = None
terms = []
for arg in expr.args:
if delta is None and _is_simple_delta(arg, index):
delta = arg
else:
terms.append(arg)
return (delta, expr.func(*terms))
@cacheit
def _has_simple_delta(expr, index):
"""
Returns True if ``expr`` is an expression that contains a KroneckerDelta
that is simple in the index ``index``, meaning that this KroneckerDelta
is nonzero for a single value of the index ``index``.
"""
if expr.has(KroneckerDelta):
if _is_simple_delta(expr, index):
return True
if expr.is_Add or expr.is_Mul:
for arg in expr.args:
if _has_simple_delta(arg, index):
return True
return False
@cacheit
def _is_simple_delta(delta, index):
"""
Returns True if ``delta`` is a KroneckerDelta and is nonzero for a single
value of the index ``index``.
"""
if isinstance(delta, KroneckerDelta) and delta.has(index):
p = (delta.args[0] - delta.args[1]).as_poly(index)
if p:
return p.degree() == 1
return False
@cacheit
def _remove_multiple_delta(expr):
"""
Evaluate products of KroneckerDelta's.
"""
if expr.is_Add:
return expr.func(*list(map(_remove_multiple_delta, expr.args)))
if not expr.is_Mul:
return expr
eqs = []
newargs = []
for arg in expr.args:
if isinstance(arg, KroneckerDelta):
eqs.append(arg.args[0] - arg.args[1])
else:
newargs.append(arg)
if not eqs:
return expr
solns = solve(eqs, dict=True)
if len(solns) == 0:
return S.Zero
elif len(solns) == 1:
for key in solns[0].keys():
newargs.append(KroneckerDelta(key, solns[0][key]))
expr2 = expr.func(*newargs)
if expr != expr2:
return _remove_multiple_delta(expr2)
return expr
@cacheit
def _simplify_delta(expr):
"""
Rewrite a KroneckerDelta's indices in its simplest form.
"""
if isinstance(expr, KroneckerDelta):
try:
slns = solve(expr.args[0] - expr.args[1], dict=True)
if slns and len(slns) == 1:
return Mul(*[KroneckerDelta(*(key, value))
for key, value in slns[0].items()])
except NotImplementedError:
pass
return expr
@cacheit
def deltaproduct(f, limit):
"""
Handle products containing a KroneckerDelta.
See Also
========
deltasummation
sympy.functions.special.tensor_functions.KroneckerDelta
sympy.concrete.products.product
"""
if ((limit[2] - limit[1]) < 0) == True:
return S.One
if not f.has(KroneckerDelta):
return product(f, limit)
if f.is_Add:
# Identify the term in the Add that has a simple KroneckerDelta
delta = None
terms = []
for arg in sorted(f.args, key=default_sort_key):
if delta is None and _has_simple_delta(arg, limit[0]):
delta = arg
else:
terms.append(arg)
newexpr = f.func(*terms)
k = Dummy("kprime", integer=True)
if isinstance(limit[1], int) and isinstance(limit[2], int):
result = deltaproduct(newexpr, limit) + sum([
deltaproduct(newexpr, (limit[0], limit[1], ik - 1)) *
delta.subs(limit[0], ik) *
deltaproduct(newexpr, (limit[0], ik + 1, limit[2])) for ik in range(int(limit[1]), int(limit[2] + 1))]
)
else:
result = deltaproduct(newexpr, limit) + deltasummation(
deltaproduct(newexpr, (limit[0], limit[1], k - 1)) *
delta.subs(limit[0], k) *
deltaproduct(newexpr, (limit[0], k + 1, limit[2])),
(k, limit[1], limit[2]),
no_piecewise=_has_simple_delta(newexpr, limit[0])
)
return _remove_multiple_delta(result)
delta, _ = _extract_delta(f, limit[0])
if not delta:
g = _expand_delta(f, limit[0])
if f != g:
try:
return factor(deltaproduct(g, limit))
except AssertionError:
return deltaproduct(g, limit)
return product(f, limit)
return _remove_multiple_delta(f.subs(limit[0], limit[1])*KroneckerDelta(limit[2], limit[1])) + \
S.One*_simplify_delta(KroneckerDelta(limit[2], limit[1] - 1))
@cacheit
def deltasummation(f, limit, no_piecewise=False):
"""
Handle summations containing a KroneckerDelta.
Explanation
===========
The idea for summation is the following:
- If we are dealing with a KroneckerDelta expression, i.e. KroneckerDelta(g(x), j),
we try to simplify it.
If we could simplify it, then we sum the resulting expression.
We already know we can sum a simplified expression, because only
simple KroneckerDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the summation,
taking care if we are dealing with a Derivative or with a proper
KroneckerDelta.
2) The expression is not simple (i.e. KroneckerDelta(cos(x))): we can do
nothing at all.
- If the expr is a multiplication expr having a KroneckerDelta term:
First we expand it.
If the expansion did work, then we try to sum the expansion.
If not, we try to extract a simple KroneckerDelta term, then we have two
cases:
1) We have a simple KroneckerDelta term, so we return the summation.
2) We didn't have a simple term, but we do have an expression with
simplified KroneckerDelta terms, so we sum this expression.
Examples
========
>>> from sympy import oo, symbols
>>> from sympy.abc import k
>>> i, j = symbols('i, j', integer=True, finite=True)
>>> from sympy.concrete.delta import deltasummation
>>> from sympy import KroneckerDelta
>>> deltasummation(KroneckerDelta(i, k), (k, -oo, oo))
1
>>> deltasummation(KroneckerDelta(i, k), (k, 0, oo))
Piecewise((1, i >= 0), (0, True))
>>> deltasummation(KroneckerDelta(i, k), (k, 1, 3))
Piecewise((1, (i >= 1) & (i <= 3)), (0, True))
>>> deltasummation(k*KroneckerDelta(i, j)*KroneckerDelta(j, k), (k, -oo, oo))
j*KroneckerDelta(i, j)
>>> deltasummation(j*KroneckerDelta(i, j), (j, -oo, oo))
i
>>> deltasummation(i*KroneckerDelta(i, j), (i, -oo, oo))
j
See Also
========
deltaproduct
sympy.functions.special.tensor_functions.KroneckerDelta
sympy.concrete.sums.summation
"""
if ((limit[2] - limit[1]) < 0) == True:
return S.Zero
if not f.has(KroneckerDelta):
return summation(f, limit)
x = limit[0]
g = _expand_delta(f, x)
if g.is_Add:
return piecewise_fold(
g.func(*[deltasummation(h, limit, no_piecewise) for h in g.args]))
# try to extract a simple KroneckerDelta term
delta, expr = _extract_delta(g, x)
if (delta is not None) and (delta.delta_range is not None):
dinf, dsup = delta.delta_range
if (limit[1] - dinf <= 0) == True and (limit[2] - dsup >= 0) == True:
no_piecewise = True
if not delta:
return summation(f, limit)
solns = solve(delta.args[0] - delta.args[1], x)
if len(solns) == 0:
return S.Zero
elif len(solns) != 1:
return Sum(f, limit)
value = solns[0]
if no_piecewise:
return expr.subs(x, value)
return Piecewise(
(expr.subs(x, value), Interval(*limit[1:3]).as_relational(value)),
(S.Zero, True)
)
|
79f35275ad1487b1261bdeeb8b9804546b0e538afe9cf792543c3f3668de201e | """Gosper's algorithm for hypergeometric summation. """
from sympy.core import S, Dummy, symbols
from sympy.polys import Poly, parallel_poly_from_expr, factor
from sympy.solvers import solve
from sympy.simplify import hypersimp
from sympy.utilities.iterables import is_sequence
def gosper_normal(f, g, n, polys=True):
r"""
Compute the Gosper's normal form of ``f`` and ``g``.
Explanation
===========
Given relatively prime univariate polynomials ``f`` and ``g``,
rewrite their quotient to a normal form defined as follows:
.. math::
\frac{f(n)}{g(n)} = Z \cdot \frac{A(n) C(n+1)}{B(n) C(n)}
where ``Z`` is an arbitrary constant and ``A``, ``B``, ``C`` are
monic polynomials in ``n`` with the following properties:
1. `\gcd(A(n), B(n+h)) = 1 \forall h \in \mathbb{N}`
2. `\gcd(B(n), C(n+1)) = 1`
3. `\gcd(A(n), C(n)) = 1`
This normal form, or rational factorization in other words, is a
crucial step in Gosper's algorithm and in solving of difference
equations. It can be also used to decide if two hypergeometric
terms are similar or not.
This procedure will return a tuple containing elements of this
factorization in the form ``(Z*A, B, C)``.
Examples
========
>>> from sympy.concrete.gosper import gosper_normal
>>> from sympy.abc import n
>>> gosper_normal(4*n+5, 2*(4*n+1)*(2*n+3), n, polys=False)
(1/4, n + 3/2, n + 1/4)
"""
(p, q), opt = parallel_poly_from_expr(
(f, g), n, field=True, extension=True)
a, A = p.LC(), p.monic()
b, B = q.LC(), q.monic()
C, Z = A.one, a/b
h = Dummy('h')
D = Poly(n + h, n, h, domain=opt.domain)
R = A.resultant(B.compose(D))
roots = set(R.ground_roots().keys())
for r in set(roots):
if not r.is_Integer or r < 0:
roots.remove(r)
for i in sorted(roots):
d = A.gcd(B.shift(+i))
A = A.quo(d)
B = B.quo(d.shift(-i))
for j in range(1, i + 1):
C *= d.shift(-j)
A = A.mul_ground(Z)
if not polys:
A = A.as_expr()
B = B.as_expr()
C = C.as_expr()
return A, B, C
def gosper_term(f, n):
r"""
Compute Gosper's hypergeometric term for ``f``.
Explanation
===========
Suppose ``f`` is a hypergeometric term such that:
.. math::
s_n = \sum_{k=0}^{n-1} f_k
and `f_k` doesn't depend on `n`. Returns a hypergeometric
term `g_n` such that `g_{n+1} - g_n = f_n`.
Examples
========
>>> from sympy.concrete.gosper import gosper_term
>>> from sympy import factorial
>>> from sympy.abc import n
>>> gosper_term((4*n + 1)*factorial(n)/factorial(2*n + 1), n)
(-n - 1/2)/(n + 1/4)
"""
r = hypersimp(f, n)
if r is None:
return None # 'f' is *not* a hypergeometric term
p, q = r.as_numer_denom()
A, B, C = gosper_normal(p, q, n)
B = B.shift(-1)
N = S(A.degree())
M = S(B.degree())
K = S(C.degree())
if (N != M) or (A.LC() != B.LC()):
D = {K - max(N, M)}
elif not N:
D = {K - N + 1, S.Zero}
else:
D = {K - N + 1, (B.nth(N - 1) - A.nth(N - 1))/A.LC()}
for d in set(D):
if not d.is_Integer or d < 0:
D.remove(d)
if not D:
return None # 'f(n)' is *not* Gosper-summable
d = max(D)
coeffs = symbols('c:%s' % (d + 1), cls=Dummy)
domain = A.get_domain().inject(*coeffs)
x = Poly(coeffs, n, domain=domain)
H = A*x.shift(1) - B*x - C
solution = solve(H.coeffs(), coeffs)
if solution is None:
return None # 'f(n)' is *not* Gosper-summable
x = x.as_expr().subs(solution)
for coeff in coeffs:
if coeff not in solution:
x = x.subs(coeff, 0)
if x.is_zero:
return None # 'f(n)' is *not* Gosper-summable
else:
return B.as_expr()*x/C.as_expr()
def gosper_sum(f, k):
r"""
Gosper's hypergeometric summation algorithm.
Explanation
===========
Given a hypergeometric term ``f`` such that:
.. math ::
s_n = \sum_{k=0}^{n-1} f_k
and `f(n)` doesn't depend on `n`, returns `g_{n} - g(0)` where
`g_{n+1} - g_n = f_n`, or ``None`` if `s_n` cannot be expressed
in closed form as a sum of hypergeometric terms.
Examples
========
>>> from sympy.concrete.gosper import gosper_sum
>>> from sympy import factorial
>>> from sympy.abc import n, k
>>> f = (4*k + 1)*factorial(k)/factorial(2*k + 1)
>>> gosper_sum(f, (k, 0, n))
(-factorial(n) + 2*factorial(2*n + 1))/factorial(2*n + 1)
>>> _.subs(n, 2) == sum(f.subs(k, i) for i in [0, 1, 2])
True
>>> gosper_sum(f, (k, 3, n))
(-60*factorial(n) + factorial(2*n + 1))/(60*factorial(2*n + 1))
>>> _.subs(n, 5) == sum(f.subs(k, i) for i in [3, 4, 5])
True
References
==========
.. [1] Marko Petkovsek, Herbert S. Wilf, Doron Zeilberger, A = B,
AK Peters, Ltd., Wellesley, MA, USA, 1997, pp. 73--100
"""
indefinite = False
if is_sequence(k):
k, a, b = k
else:
indefinite = True
g = gosper_term(f, k)
if g is None:
return None
if indefinite:
result = f*g
else:
result = (f*(g + 1)).subs(k, b) - (f*g).subs(k, a)
if result is S.NaN:
try:
result = (f*(g + 1)).limit(k, b) - (f*g).limit(k, a)
except NotImplementedError:
result = None
return factor(result)
|
fd47e5c6553bc9c8464a6782aeb114d2473aeed2dc1aa360ed248330119fec30 | """Various algorithms for helping identifying numbers and sequences."""
from sympy.utilities import public
from sympy.core import Function, Symbol, S
from sympy.core.numbers import Zero
from sympy.concrete.products import (Product, product)
from sympy.core.numbers import (Integer, Rational)
from sympy.core.symbol import symbols
from sympy.core.sympify import sympify
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.integers import floor
from sympy.integrals.integrals import integrate
from sympy.polys.polytools import lcm
from sympy.simplify.radsimp import denom
from sympy.polys.polyfuncs import rational_interpolate as rinterp
@public
def find_simple_recurrence_vector(l):
"""
This function is used internally by other functions from the
sympy.concrete.guess module. While most users may want to rather use the
function find_simple_recurrence when looking for recurrence relations
among rational numbers, the current function may still be useful when
some post-processing has to be done.
Explanation
===========
The function returns a vector of length n when a recurrence relation of
order n is detected in the sequence of rational numbers v.
If the returned vector has a length 1, then the returned value is always
the list [0], which means that no relation has been found.
While the functions is intended to be used with rational numbers, it should
work for other kinds of real numbers except for some cases involving
quadratic numbers; for that reason it should be used with some caution when
the argument is not a list of rational numbers.
Examples
========
>>> from sympy.concrete.guess import find_simple_recurrence_vector
>>> from sympy import fibonacci
>>> find_simple_recurrence_vector([fibonacci(k) for k in range(12)])
[1, -1, -1]
See Also
========
See the function sympy.concrete.guess.find_simple_recurrence which is more
user-friendly.
"""
q1 = [0]
q2 = [Integer(1)]
b, z = 0, len(l) >> 1
while len(q2) <= z:
while l[b]==0:
b += 1
if b == len(l):
c = 1
for x in q2:
c = lcm(c, denom(x))
if q2[0]*c < 0: c = -c
for k in range(len(q2)):
q2[k] = int(q2[k]*c)
return q2
a = Integer(1)/l[b]
m = [a]
for k in range(b+1, len(l)):
m.append(-sum(l[j+1]*m[b-j-1] for j in range(b, k))*a)
l, m = m, [0] * max(len(q2), b+len(q1))
for k in range(len(q2)):
m[k] = a*q2[k]
for k in range(b, b+len(q1)):
m[k] += q1[k-b]
while m[-1]==0: m.pop() # because trailing zeros can occur
q1, q2, b = q2, m, 1
return [0]
@public
def find_simple_recurrence(v, A=Function('a'), N=Symbol('n')):
"""
Detects and returns a recurrence relation from a sequence of several integer
(or rational) terms. The name of the function in the returned expression is
'a' by default; the main variable is 'n' by default. The smallest index in
the returned expression is always n (and never n-1, n-2, etc.).
Examples
========
>>> from sympy.concrete.guess import find_simple_recurrence
>>> from sympy import fibonacci
>>> find_simple_recurrence([fibonacci(k) for k in range(12)])
-a(n) - a(n + 1) + a(n + 2)
>>> from sympy import Function, Symbol
>>> a = [1, 1, 1]
>>> for k in range(15): a.append(5*a[-1]-3*a[-2]+8*a[-3])
>>> find_simple_recurrence(a, A=Function('f'), N=Symbol('i'))
-8*f(i) + 3*f(i + 1) - 5*f(i + 2) + f(i + 3)
"""
p = find_simple_recurrence_vector(v)
n = len(p)
if n <= 1: return Zero()
rel = Zero()
for k in range(n):
rel += A(N+n-1-k)*p[k]
return rel
@public
def rationalize(x, maxcoeff=10000):
"""
Helps identifying a rational number from a float (or mpmath.mpf) value by
using a continued fraction. The algorithm stops as soon as a large partial
quotient is detected (greater than 10000 by default).
Examples
========
>>> from sympy.concrete.guess import rationalize
>>> from mpmath import cos, pi
>>> rationalize(cos(pi/3))
1/2
>>> from mpmath import mpf
>>> rationalize(mpf("0.333333333333333"))
1/3
While the function is rather intended to help 'identifying' rational
values, it may be used in some cases for approximating real numbers.
(Though other functions may be more relevant in that case.)
>>> rationalize(pi, maxcoeff = 250)
355/113
See Also
========
Several other methods can approximate a real number as a rational, like:
* fractions.Fraction.from_decimal
* fractions.Fraction.from_float
* mpmath.identify
* mpmath.pslq by using the following syntax: mpmath.pslq([x, 1])
* mpmath.findpoly by using the following syntax: mpmath.findpoly(x, 1)
* sympy.simplify.nsimplify (which is a more general function)
The main difference between the current function and all these variants is
that control focuses on magnitude of partial quotients here rather than on
global precision of the approximation. If the real is "known to be" a
rational number, the current function should be able to detect it correctly
with the default settings even when denominator is great (unless its
expansion contains unusually big partial quotients) which may occur
when studying sequences of increasing numbers. If the user cares more
on getting simple fractions, other methods may be more convenient.
"""
p0, p1 = 0, 1
q0, q1 = 1, 0
a = floor(x)
while a < maxcoeff or q1==0:
p = a*p1 + p0
q = a*q1 + q0
p0, p1 = p1, p
q0, q1 = q1, q
if x==a: break
x = 1/(x-a)
a = floor(x)
return sympify(p) / q
@public
def guess_generating_function_rational(v, X=Symbol('x')):
"""
Tries to "guess" a rational generating function for a sequence of rational
numbers v.
Examples
========
>>> from sympy.concrete.guess import guess_generating_function_rational
>>> from sympy import fibonacci
>>> l = [fibonacci(k) for k in range(5,15)]
>>> guess_generating_function_rational(l)
(3*x + 5)/(-x**2 - x + 1)
See Also
========
sympy.series.approximants
mpmath.pade
"""
# a) compute the denominator as q
q = find_simple_recurrence_vector(v)
n = len(q)
if n <= 1: return None
# b) compute the numerator as p
p = [sum(v[i-k]*q[k] for k in range(min(i+1, n)))
for i in range(len(v)>>1)]
return (sum(p[k]*X**k for k in range(len(p)))
/ sum(q[k]*X**k for k in range(n)))
@public
def guess_generating_function(v, X=Symbol('x'), types=['all'], maxsqrtn=2):
"""
Tries to "guess" a generating function for a sequence of rational numbers v.
Only a few patterns are implemented yet.
Explanation
===========
The function returns a dictionary where keys are the name of a given type of
generating function. Six types are currently implemented:
type | formal definition
-------+----------------------------------------------------------------
ogf | f(x) = Sum( a_k * x^k , k: 0..infinity )
egf | f(x) = Sum( a_k * x^k / k! , k: 0..infinity )
lgf | f(x) = Sum( (-1)^(k+1) a_k * x^k / k , k: 1..infinity )
| (with initial index being hold as 1 rather than 0)
hlgf | f(x) = Sum( a_k * x^k / k , k: 1..infinity )
| (with initial index being hold as 1 rather than 0)
lgdogf | f(x) = derivate( log(Sum( a_k * x^k, k: 0..infinity )), x)
lgdegf | f(x) = derivate( log(Sum( a_k * x^k / k!, k: 0..infinity )), x)
In order to spare time, the user can select only some types of generating
functions (default being ['all']). While forgetting to use a list in the
case of a single type may seem to work most of the time as in: types='ogf'
this (convenient) syntax may lead to unexpected extra results in some cases.
Discarding a type when calling the function does not mean that the type will
not be present in the returned dictionary; it only means that no extra
computation will be performed for that type, but the function may still add
it in the result when it can be easily converted from another type.
Two generating functions (lgdogf and lgdegf) are not even computed if the
initial term of the sequence is 0; it may be useful in that case to try
again after having removed the leading zeros.
Examples
========
>>> from sympy.concrete.guess import guess_generating_function as ggf
>>> ggf([k+1 for k in range(12)], types=['ogf', 'lgf', 'hlgf'])
{'hlgf': 1/(1 - x), 'lgf': 1/(x + 1), 'ogf': 1/(x**2 - 2*x + 1)}
>>> from sympy import sympify
>>> l = sympify("[3/2, 11/2, 0, -121/2, -363/2, 121]")
>>> ggf(l)
{'ogf': (x + 3/2)/(11*x**2 - 3*x + 1)}
>>> from sympy import fibonacci
>>> ggf([fibonacci(k) for k in range(5, 15)], types=['ogf'])
{'ogf': (3*x + 5)/(-x**2 - x + 1)}
>>> from sympy import factorial
>>> ggf([factorial(k) for k in range(12)], types=['ogf', 'egf', 'lgf'])
{'egf': 1/(1 - x)}
>>> ggf([k+1 for k in range(12)], types=['egf'])
{'egf': (x + 1)*exp(x), 'lgdegf': (x + 2)/(x + 1)}
N-th root of a rational function can also be detected (below is an example
coming from the sequence A108626 from http://oeis.org).
The greatest n-th root to be tested is specified as maxsqrtn (default 2).
>>> ggf([1, 2, 5, 14, 41, 124, 383, 1200, 3799, 12122, 38919])['ogf']
sqrt(1/(x**4 + 2*x**2 - 4*x + 1))
References
==========
.. [1] "Concrete Mathematics", R.L. Graham, D.E. Knuth, O. Patashnik
.. [2] https://oeis.org/wiki/Generating_functions
"""
# List of all types of all g.f. known by the algorithm
if 'all' in types:
types = ['ogf', 'egf', 'lgf', 'hlgf', 'lgdogf', 'lgdegf']
result = {}
# Ordinary Generating Function (ogf)
if 'ogf' in types:
# Perform some convolutions of the sequence with itself
t = [1 if k==0 else 0 for k in range(len(v))]
for d in range(max(1, maxsqrtn)):
t = [sum(t[n-i]*v[i] for i in range(n+1)) for n in range(len(v))]
g = guess_generating_function_rational(t, X=X)
if g:
result['ogf'] = g**Rational(1, d+1)
break
# Exponential Generating Function (egf)
if 'egf' in types:
# Transform sequence (division by factorial)
w, f = [], S.One
for i, k in enumerate(v):
f *= i if i else 1
w.append(k/f)
# Perform some convolutions of the sequence with itself
t = [1 if k==0 else 0 for k in range(len(w))]
for d in range(max(1, maxsqrtn)):
t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))]
g = guess_generating_function_rational(t, X=X)
if g:
result['egf'] = g**Rational(1, d+1)
break
# Logarithmic Generating Function (lgf)
if 'lgf' in types:
# Transform sequence (multiplication by (-1)^(n+1) / n)
w, f = [], S.NegativeOne
for i, k in enumerate(v):
f = -f
w.append(f*k/Integer(i+1))
# Perform some convolutions of the sequence with itself
t = [1 if k==0 else 0 for k in range(len(w))]
for d in range(max(1, maxsqrtn)):
t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))]
g = guess_generating_function_rational(t, X=X)
if g:
result['lgf'] = g**Rational(1, d+1)
break
# Hyperbolic logarithmic Generating Function (hlgf)
if 'hlgf' in types:
# Transform sequence (division by n+1)
w = []
for i, k in enumerate(v):
w.append(k/Integer(i+1))
# Perform some convolutions of the sequence with itself
t = [1 if k==0 else 0 for k in range(len(w))]
for d in range(max(1, maxsqrtn)):
t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))]
g = guess_generating_function_rational(t, X=X)
if g:
result['hlgf'] = g**Rational(1, d+1)
break
# Logarithmic derivative of ordinary generating Function (lgdogf)
if v[0] != 0 and ('lgdogf' in types
or ('ogf' in types and 'ogf' not in result)):
# Transform sequence by computing f'(x)/f(x)
# because log(f(x)) = integrate( f'(x)/f(x) )
a, w = sympify(v[0]), []
for n in range(len(v)-1):
w.append(
(v[n+1]*(n+1) - sum(w[-i-1]*v[i+1] for i in range(n)))/a)
# Perform some convolutions of the sequence with itself
t = [1 if k==0 else 0 for k in range(len(w))]
for d in range(max(1, maxsqrtn)):
t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))]
g = guess_generating_function_rational(t, X=X)
if g:
result['lgdogf'] = g**Rational(1, d+1)
if 'ogf' not in result:
result['ogf'] = exp(integrate(result['lgdogf'], X))
break
# Logarithmic derivative of exponential generating Function (lgdegf)
if v[0] != 0 and ('lgdegf' in types
or ('egf' in types and 'egf' not in result)):
# Transform sequence / step 1 (division by factorial)
z, f = [], Integer(1)
for i, k in enumerate(v):
f *= i if i else 1
z.append(k/f)
# Transform sequence / step 2 by computing f'(x)/f(x)
# because log(f(x)) = integrate( f'(x)/f(x) )
a, w = z[0], []
for n in range(len(z)-1):
w.append(
(z[n+1]*(n+1) - sum(w[-i-1]*z[i+1] for i in range(n)))/a)
# Perform some convolutions of the sequence with itself
t = [1 if k==0 else 0 for k in range(len(w))]
for d in range(max(1, maxsqrtn)):
t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))]
g = guess_generating_function_rational(t, X=X)
if g:
result['lgdegf'] = g**Rational(1, d+1)
if 'egf' not in result:
result['egf'] = exp(integrate(result['lgdegf'], X))
break
return result
@public
def guess(l, all=False, evaluate=True, niter=2, variables=None):
"""
This function is adapted from the Rate.m package for Mathematica
written by Christian Krattenthaler.
It tries to guess a formula from a given sequence of rational numbers.
Explanation
===========
In order to speed up the process, the 'all' variable is set to False by
default, stopping the computation as some results are returned during an
iteration; the variable can be set to True if more iterations are needed
(other formulas may be found; however they may be equivalent to the first
ones).
Another option is the 'evaluate' variable (default is True); setting it
to False will leave the involved products unevaluated.
By default, the number of iterations is set to 2 but a greater value (up
to len(l)-1) can be specified with the optional 'niter' variable.
More and more convoluted results are found when the order of the
iteration gets higher:
* first iteration returns polynomial or rational functions;
* second iteration returns products of rising factorials and their
inverses;
* third iteration returns products of products of rising factorials
and their inverses;
* etc.
The returned formulas contain symbols i0, i1, i2, ... where the main
variables is i0 (and auxiliary variables are i1, i2, ...). A list of
other symbols can be provided in the 'variables' option; the length of
the least should be the value of 'niter' (more is acceptable but only
the first symbols will be used); in this case, the main variable will be
the first symbol in the list.
Examples
========
>>> from sympy.concrete.guess import guess
>>> guess([1,2,6,24,120], evaluate=False)
[Product(i1 + 1, (i1, 1, i0 - 1))]
>>> from sympy import symbols
>>> r = guess([1,2,7,42,429,7436,218348,10850216], niter=4)
>>> i0 = symbols("i0")
>>> [r[0].subs(i0,n).doit() for n in range(1,10)]
[1, 2, 7, 42, 429, 7436, 218348, 10850216, 911835460]
"""
if any(a==0 for a in l[:-1]):
return []
N = len(l)
niter = min(N-1, niter)
myprod = product if evaluate else Product
g = []
res = []
if variables is None:
symb = symbols('i:'+str(niter))
else:
symb = variables
for k, s in enumerate(symb):
g.append(l)
n, r = len(l), []
for i in range(n-2-1, -1, -1):
ri = rinterp(enumerate(g[k][:-1], start=1), i, X=s)
if ((denom(ri).subs({s:n}) != 0)
and (ri.subs({s:n}) - g[k][-1] == 0)
and ri not in r):
r.append(ri)
if r:
for i in range(k-1, -1, -1):
r = list(map(lambda v: g[i][0]
* myprod(v, (symb[i+1], 1, symb[i]-1)), r))
if not all: return r
res += r
l = [Rational(l[i+1], l[i]) for i in range(N-k-1)]
return res
|
255bfa0edcd31745a3798a585625e0d2693fde617598661647980a28ddbedb29 | from sympy.core.add import Add
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import AppliedUndef, UndefinedFunction
from sympy.core.mul import Mul
from sympy.core.relational import Equality, Relational
from sympy.core.singleton import S
from sympy.core.symbol import Symbol, Dummy
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import (piecewise_fold,
Piecewise)
from sympy.logic.boolalg import BooleanFunction
from sympy.matrices.matrices import MatrixBase
from sympy.sets.sets import Interval, Set
from sympy.sets.fancysets import Range
from sympy.tensor.indexed import Idx
from sympy.utilities import flatten
from sympy.utilities.iterables import sift, is_sequence
from sympy.utilities.exceptions import SymPyDeprecationWarning
def _common_new(cls, function, *symbols, discrete, **assumptions):
"""Return either a special return value or the tuple,
(function, limits, orientation). This code is common to
both ExprWithLimits and AddWithLimits."""
function = sympify(function)
if isinstance(function, Equality):
# This transforms e.g. Integral(Eq(x, y)) to Eq(Integral(x), Integral(y))
# but that is only valid for definite integrals.
limits, orientation = _process_limits(*symbols, discrete=discrete)
if not (limits and all(len(limit) == 3 for limit in limits)):
SymPyDeprecationWarning(
feature='Integral(Eq(x, y))',
useinstead='Eq(Integral(x, z), Integral(y, z))',
issue=18053,
deprecated_since_version=1.6,
).warn()
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols, discrete=discrete)
for i, li in enumerate(limits):
if len(li) == 4:
function = function.subs(li[0], li[-1])
limits[i] = Tuple(*li[:-1])
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Any embedded piecewise functions need to be brought out to the
# top level. We only fold Piecewise that contain the integration
# variable.
reps = {}
symbols_of_integration = {i[0] for i in limits}
for p in function.atoms(Piecewise):
if not p.has(*symbols_of_integration):
reps[p] = Dummy()
# mask off those that don't
function = function.xreplace(reps)
# do the fold
function = piecewise_fold(function)
# remove the masking
function = function.xreplace({v: k for k, v in reps.items()})
return function, limits, orientation
def _process_limits(*symbols, discrete=None):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
In the case that a limit is specified as (symbol, Range), a list of
length 4 may be returned if a change of variables is needed; the
expression that should replace the symbol in the expression is
the fourth element in the list.
"""
limits = []
orientation = 1
if discrete is None:
err_msg = 'discrete must be True or False'
elif discrete:
err_msg = 'use Range, not Interval or Relational'
else:
err_msg = 'use Interval or Relational, not Range'
for V in symbols:
if isinstance(V, (Relational, BooleanFunction)):
if discrete:
raise TypeError(err_msg)
variable = V.atoms(Symbol).pop()
V = (variable, V.as_set())
elif isinstance(V, Symbol) or getattr(V, '_diff_wrt', False):
if isinstance(V, Idx):
if V.lower is None or V.upper is None:
limits.append(Tuple(V))
else:
limits.append(Tuple(V, V.lower, V.upper))
else:
limits.append(Tuple(V))
continue
if is_sequence(V) and not isinstance(V, Set):
if len(V) == 2 and isinstance(V[1], Set):
V = list(V)
if isinstance(V[1], Interval): # includes Reals
if discrete:
raise TypeError(err_msg)
V[1:] = V[1].inf, V[1].sup
elif isinstance(V[1], Range):
if not discrete:
raise TypeError(err_msg)
lo = V[1].inf
hi = V[1].sup
dx = abs(V[1].step) # direction doesn't matter
if dx == 1:
V[1:] = [lo, hi]
else:
if lo is not S.NegativeInfinity:
V = [V[0]] + [0, (hi - lo)//dx, dx*V[0] + lo]
else:
V = [V[0]] + [0, S.Infinity, -dx*V[0] + hi]
else:
# more complicated sets would require splitting, e.g.
# Union(Interval(1, 3), interval(6,10))
raise NotImplementedError(
'expecting Range' if discrete else
'Relational or single Interval' )
V = sympify(flatten(V)) # a list of sympified elements
if isinstance(V[0], (Symbol, Idx)) or getattr(V[0], '_diff_wrt', False):
newsymbol = V[0]
if len(V) == 3:
# general case
if V[2] is None and not V[1] is None:
orientation *= -1
V = [newsymbol] + [i for i in V[1:] if i is not None]
lenV = len(V)
if not isinstance(newsymbol, Idx) or lenV == 3:
if lenV == 4:
limits.append(Tuple(*V))
continue
if lenV == 3:
if isinstance(newsymbol, Idx):
# Idx represents an integer which may have
# specified values it can take on; if it is
# given such a value, an error is raised here
# if the summation would try to give it a larger
# or smaller value than permitted. None and Symbolic
# values will not raise an error.
lo, hi = newsymbol.lower, newsymbol.upper
try:
if lo is not None and not bool(V[1] >= lo):
raise ValueError("Summation will set Idx value too low.")
except TypeError:
pass
try:
if hi is not None and not bool(V[2] <= hi):
raise ValueError("Summation will set Idx value too high.")
except TypeError:
pass
limits.append(Tuple(*V))
continue
if lenV == 1 or (lenV == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif lenV == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ('is_commutative',)
def __new__(cls, function, *symbols, **assumptions):
from sympy.concrete.products import Product
pre = _common_new(cls, function, *symbols,
discrete=issubclass(cls, Product), **assumptions)
if isinstance(pre, tuple):
function, limits, _ = pre
else:
return pre
# limits must have upper and lower bounds; the indefinite form
# is not supported. This restriction does not apply to AddWithLimits
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def kind(self):
return self.function.kind
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the limit variables.
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def bound_symbols(self):
"""Return only variables that are dummy variables.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i, j, k
>>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols
[i, j]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits if len(l) != 1]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
{y}
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def _eval_interval(self, x, a, b):
limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s, n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x, a
>>> Integral(a*x**2, x).subs(x, 4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for integrals
change_index : Perform mapping on the sum and product dummy variables
"""
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
if new._diff_wrt:
xab = (new,)
else:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old, (AppliedUndef, UndefinedFunction)):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution cannot create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
@property
def has_finite_limits(self):
"""
Returns True if the limits are known to be finite, either by the
explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be infinite, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 1, 8)).has_finite_limits
True
>>> Integral(x, (x, 1, oo)).has_finite_limits
False
>>> M = Symbol('M')
>>> Sum(x, (x, 1, M)).has_finite_limits
>>> N = Symbol('N', integer=True)
>>> Product(x, (x, 1, N)).has_finite_limits
True
See Also
========
has_reversed_limits
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
if any(l.is_infinite for l in lim[1:]):
# Any of the bounds are +/-oo
return False
elif any(l.is_infinite is None for l in lim[1:]):
# Maybe there are assumptions on the variable?
if lim[0].is_infinite is None:
ret_None = True
else:
if lim[0].is_infinite is None:
ret_None = True
if ret_None:
return None
return True
@property
def has_reversed_limits(self):
"""
Returns True if the limits are known to be in reversed order, either
by the explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be in normal order, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 8, 1)).has_reversed_limits
True
>>> Sum(x, (x, 1, oo)).has_reversed_limits
False
>>> M = Symbol('M')
>>> Integral(x, (x, 1, M)).has_reversed_limits
>>> N = Symbol('N', integer=True, positive=True)
>>> Sum(x, (x, 1, N)).has_reversed_limits
False
>>> Product(x, (x, 2, N)).has_reversed_limits
>>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits
False
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
var, a, b = lim
dif = b - a
if dif.is_extended_negative:
return True
elif dif.is_extended_nonnegative:
continue
else:
ret_None = True
else:
return None
if ret_None:
return None
return False
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
from sympy.concrete.summations import Sum
pre = _common_new(cls, function, *symbols,
discrete=issubclass(cls, Sum), **assumptions)
if isinstance(pre, tuple):
function, limits, orientation = pre
else:
return pre
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function] # orientation not used in ExprWithLimits
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all(x.is_real for x in flatten(self.limits)):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all(x.is_real for x in flatten(self.limits)):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all(x.is_real for x in flatten(self.limits)):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not set(self.variables) & w.free_symbols)
return Mul(*out[True])*self.func(Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, *self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return Add(*[self.func(i, *self.limits) for i in summand.args])
elif isinstance(summand, MatrixBase):
return summand.applyfunc(lambda x: self.func(x, *self.limits))
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
3aea2d5f4b9e756aaf991136cd2e20e0d9d513ad7442f12b096d3877d189495f | from typing import Tuple as tTuple
from sympy.calculus.singularities import is_decreasing
from sympy.calculus.util import AccumulationBounds
from .expr_with_intlimits import ExprWithIntLimits
from .expr_with_limits import AddWithLimits
from .gosper import gosper_sum
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.containers import Tuple
from sympy.core.function import Derivative, expand
from sympy.core.mul import Mul
from sympy.core.numbers import Float
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy, Wild, Symbol, symbols
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.combinatorial.numbers import bernoulli, harmonic
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import cot, csc
from sympy.functions.special.hyper import hyper
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.functions.special.zeta_functions import zeta
from sympy.integrals.integrals import Integral
from sympy.logic.boolalg import And
from sympy.polys.partfrac import apart
from sympy.polys.polyerrors import PolynomialError, PolificationFailed
from sympy.polys.polytools import parallel_poly_from_expr, Poly, factor
from sympy.polys.rationaltools import together
from sympy.series.limitseq import limit_seq
from sympy.series.order import O
from sympy.series.residues import residue
from sympy.sets.sets import FiniteSet, Interval
from sympy.simplify.combsimp import combsimp
from sympy.simplify.hyperexpand import hyperexpand
from sympy.simplify.powsimp import powsimp
from sympy.simplify.radsimp import denom, fraction
from sympy.simplify.simplify import (factor_sum, sum_combine, simplify,
nsimplify, hypersimp)
from sympy.solvers.solvers import solve
from sympy.solvers.solveset import solveset
from sympy.utilities.iterables import sift
import itertools
class Sum(AddWithLimits, ExprWithIntLimits):
r"""
Represents unevaluated summation.
Explanation
===========
``Sum`` represents a finite or infinite series, with the first argument
being the general form of terms in the series, and the second argument
being ``(dummy_variable, start, end)``, with ``dummy_variable`` taking
all integer values from ``start`` through ``end``. In accordance with
long-standing mathematical convention, the end term is included in the
summation.
Finite sums
===========
For finite sums (and sums with symbolic limits assumed to be finite) we
follow the summation convention described by Karr [1], especially
definition 3 of section 1.4. The sum:
.. math::
\sum_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\sum_{m \leq i < n} f(i) = f(m) + f(m+1) + \ldots + f(n-2) + f(n-1)
with the upper limit value `f(n)` excluded. The sum over an empty set is
zero if and only if `m = n`:
.. math::
\sum_{m \leq i < n} f(i) = 0 \quad \mathrm{for} \quad m = n
Finally, for all other sums over empty sets we assume the following
definition:
.. math::
\sum_{m \leq i < n} f(i) = - \sum_{n \leq i < m} f(i) \quad \mathrm{for} \quad m > n
It is important to note that Karr defines all sums with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the summation convention. Indeed we have:
.. math::
\sum_{m \leq i < n} f(i) = \sum_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import i, k, m, n, x
>>> from sympy import Sum, factorial, oo, IndexedBase, Function
>>> Sum(k, (k, 1, m))
Sum(k, (k, 1, m))
>>> Sum(k, (k, 1, m)).doit()
m**2/2 + m/2
>>> Sum(k**2, (k, 1, m))
Sum(k**2, (k, 1, m))
>>> Sum(k**2, (k, 1, m)).doit()
m**3/3 + m**2/2 + m/6
>>> Sum(x**k, (k, 0, oo))
Sum(x**k, (k, 0, oo))
>>> Sum(x**k, (k, 0, oo)).doit()
Piecewise((1/(1 - x), Abs(x) < 1), (Sum(x**k, (k, 0, oo)), True))
>>> Sum(x**k/factorial(k), (k, 0, oo)).doit()
exp(x)
Here are examples to do summation with symbolic indices. You
can use either Function of IndexedBase classes:
>>> f = Function('f')
>>> Sum(f(n), (n, 0, 3)).doit()
f(0) + f(1) + f(2) + f(3)
>>> Sum(f(n), (n, 0, oo)).doit()
Sum(f(n), (n, 0, oo))
>>> f = IndexedBase('f')
>>> Sum(f[n]**2, (n, 0, 3)).doit()
f[0]**2 + f[1]**2 + f[2]**2 + f[3]**2
An example showing that the symbolic result of a summation is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those sums by interchanging the limits according to the above rules:
>>> S = Sum(i, (i, 1, n)).doit()
>>> S
n**2/2 + n/2
>>> S.subs(n, -4)
6
>>> Sum(i, (i, 1, -4)).doit()
6
>>> Sum(-i, (i, -3, 0)).doit()
6
An explicit example of the Karr summation convention:
>>> S1 = Sum(i**2, (i, m, m+n-1)).doit()
>>> S1
m**2*n + m*n**2 - m*n + n**3/3 - n**2/2 + n/6
>>> S2 = Sum(i**2, (i, m+n, m-1)).doit()
>>> S2
-m**2*n - m*n**2 + m*n - n**3/3 + n**2/2 - n/6
>>> S1 + S2
0
>>> S3 = Sum(i, (i, m, m-1)).doit()
>>> S3
0
See Also
========
summation
Product, sympy.concrete.products.product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] https://en.wikipedia.org/wiki/Summation#Capital-sigma_notation
.. [3] https://en.wikipedia.org/wiki/Empty_sum
"""
__slots__ = ('is_commutative',)
limits: tTuple[tTuple[Symbol, Expr, Expr]]
def __new__(cls, function, *symbols, **assumptions):
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
if not hasattr(obj, 'limits'):
return obj
if any(len(l) != 3 or None in l for l in obj.limits):
raise ValueError('Sum requires values for lower and upper bounds.')
return obj
def _eval_is_zero(self):
# a Sum is only zero if its function is zero or if all terms
# cancel out. This only answers whether the summand is zero; if
# not then None is returned since we don't analyze whether all
# terms cancel out.
if self.function.is_zero or self.has_empty_sequence:
return True
def _eval_is_extended_real(self):
if self.has_empty_sequence:
return True
return self.function.is_extended_real
def _eval_is_positive(self):
if self.has_finite_limits and self.has_reversed_limits is False:
return self.function.is_positive
def _eval_is_negative(self):
if self.has_finite_limits and self.has_reversed_limits is False:
return self.function.is_negative
def _eval_is_finite(self):
if self.has_finite_limits and self.function.is_finite:
return True
def doit(self, **hints):
if hints.get('deep', True):
f = self.function.doit(**hints)
else:
f = self.function
# first make sure any definite limits have summation
# variables with matching assumptions
reps = {}
for xab in self.limits:
d = _dummy_with_inherited_properties_concrete(xab)
if d:
reps[xab[0]] = d
if reps:
undo = {v: k for k, v in reps.items()}
did = self.xreplace(reps).doit(**hints)
if isinstance(did, tuple): # when separate=True
did = tuple([i.xreplace(undo) for i in did])
elif did is not None:
did = did.xreplace(undo)
else:
did = self
return did
if self.function.is_Matrix:
expanded = self.expand()
if self != expanded:
return expanded.doit()
return _eval_matrix_sum(self)
for n, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif == -1:
# Any summation over an empty set is zero
return S.Zero
if dif.is_integer and dif.is_negative:
a, b = b + 1, a - 1
f = -f
newf = eval_sum(f, (i, a, b))
if newf is None:
if f == self.function:
zeta_function = self.eval_zeta_function(f, (i, a, b))
if zeta_function is not None:
return zeta_function
return self
else:
return self.func(f, *self.limits[n:])
f = newf
if hints.get('deep', True):
# eval_sum could return partially unevaluated
# result with Piecewise. In this case we won't
# doit() recursively.
if not isinstance(f, Piecewise):
return f.doit(**hints)
return f
def eval_zeta_function(self, f, limits):
"""
Check whether the function matches with the zeta function.
If it matches, then return a `Piecewise` expression because
zeta function does not converge unless `s > 1` and `q > 0`
"""
i, a, b = limits
w, y, z = Wild('w', exclude=[i]), Wild('y', exclude=[i]), Wild('z', exclude=[i])
result = f.match((w * i + y) ** (-z))
if result is not None and b is S.Infinity:
coeff = 1 / result[w] ** result[z]
s = result[z]
q = result[y] / result[w] + a
return Piecewise((coeff * zeta(s, q), And(q > 0, s > 1)), (self, True))
def _eval_derivative(self, x):
"""
Differentiate wrt x as long as x is not in the free symbols of any of
the upper or lower limits.
Explanation
===========
Sum(a*b*x, (x, 1, a)) can be differentiated wrt x or b but not `a`
since the value of the sum is discontinuous in `a`. In a case
involving a limit variable, the unevaluated derivative is returned.
"""
# diff already confirmed that x is in the free symbols of self, but we
# don't want to differentiate wrt any free symbol in the upper or lower
# limits
# XXX remove this test for free_symbols when the default _eval_derivative is in
if isinstance(x, Symbol) and x not in self.free_symbols:
return S.Zero
# get limits and the function
f, limits = self.function, list(self.limits)
limit = limits.pop(-1)
if limits: # f is the argument to a Sum
f = self.func(f, *limits)
_, a, b = limit
if x in a.free_symbols or x in b.free_symbols:
return None
df = Derivative(f, x, evaluate=True)
rv = self.func(df, limit)
return rv
def _eval_difference_delta(self, n, step):
k, _, upper = self.args[-1]
new_upper = upper.subs(n, n + step)
if len(self.args) == 2:
f = self.args[0]
else:
f = self.func(*self.args[:-1])
return Sum(f, (k, upper + 1, new_upper)).doit()
def _eval_simplify(self, **kwargs):
# split the function into adds
terms = Add.make_args(expand(self.function))
s_t = [] # Sum Terms
o_t = [] # Other Terms
for term in terms:
if term.has(Sum):
# if there is an embedded sum here
# it is of the form x * (Sum(whatever))
# hence we make a Mul out of it, and simplify all interior sum terms
subterms = Mul.make_args(expand(term))
out_terms = []
for subterm in subterms:
# go through each term
if isinstance(subterm, Sum):
# if it's a sum, simplify it
out_terms.append(subterm._eval_simplify())
else:
# otherwise, add it as is
out_terms.append(subterm)
# turn it back into a Mul
s_t.append(Mul(*out_terms))
else:
o_t.append(term)
# next try to combine any interior sums for further simplification
result = Add(sum_combine(s_t), *o_t)
return factor_sum(result, limits=self.limits)
def is_convergent(self):
r"""
Checks for the convergence of a Sum.
Explanation
===========
We divide the study of convergence of infinite sums and products in
two parts.
First Part:
One part is the question whether all the terms are well defined, i.e.,
they are finite in a sum and also non-zero in a product. Zero
is the analogy of (minus) infinity in products as
:math:`e^{-\infty} = 0`.
Second Part:
The second part is the question of convergence after infinities,
and zeros in products, have been omitted assuming that their number
is finite. This means that we only consider the tail of the sum or
product, starting from some point after which all terms are well
defined.
For example, in a sum of the form:
.. math::
\sum_{1 \leq i < \infty} \frac{1}{n^2 + an + b}
where a and b are numbers. The routine will return true, even if there
are infinities in the term sequence (at most two). An analogous
product would be:
.. math::
\prod_{1 \leq i < \infty} e^{\frac{1}{n^2 + an + b}}
This is how convergence is interpreted. It is concerned with what
happens at the limit. Finding the bad terms is another independent
matter.
Note: It is responsibility of user to see that the sum or product
is well defined.
There are various tests employed to check the convergence like
divergence test, root test, integral test, alternating series test,
comparison tests, Dirichlet tests. It returns true if Sum is convergent
and false if divergent and NotImplementedError if it cannot be checked.
References
==========
.. [1] https://en.wikipedia.org/wiki/Convergence_tests
Examples
========
>>> from sympy import factorial, S, Sum, Symbol, oo
>>> n = Symbol('n', integer=True)
>>> Sum(n/(n - 1), (n, 4, 7)).is_convergent()
True
>>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent()
False
>>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent()
False
>>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent()
True
See Also
========
Sum.is_absolutely_convergent()
sympy.concrete.products.Product.is_convergent()
"""
p, q, r = symbols('p q r', cls=Wild)
sym = self.limits[0][0]
lower_limit = self.limits[0][1]
upper_limit = self.limits[0][2]
sequence_term = self.function.simplify()
if len(sequence_term.free_symbols) > 1:
raise NotImplementedError("convergence checking for more than one symbol "
"containing series is not handled")
if lower_limit.is_finite and upper_limit.is_finite:
return S.true
# transform sym -> -sym and swap the upper_limit = S.Infinity
# and lower_limit = - upper_limit
if lower_limit is S.NegativeInfinity:
if upper_limit is S.Infinity:
return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \
Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent()
sequence_term = simplify(sequence_term.xreplace({sym: -sym}))
lower_limit = -upper_limit
upper_limit = S.Infinity
sym_ = Dummy(sym.name, integer=True, positive=True)
sequence_term = sequence_term.xreplace({sym: sym_})
sym = sym_
interval = Interval(lower_limit, upper_limit)
# Piecewise function handle
if sequence_term.is_Piecewise:
for func, cond in sequence_term.args:
# see if it represents something going to oo
if cond == True or cond.as_set().sup is S.Infinity:
s = Sum(func, (sym, lower_limit, upper_limit))
return s.is_convergent()
return S.true
### -------- Divergence test ----------- ###
try:
lim_val = limit_seq(sequence_term, sym)
if lim_val is not None and lim_val.is_zero is False:
return S.false
except NotImplementedError:
pass
try:
lim_val_abs = limit_seq(abs(sequence_term), sym)
if lim_val_abs is not None and lim_val_abs.is_zero is False:
return S.false
except NotImplementedError:
pass
order = O(sequence_term, (sym, S.Infinity))
### --------- p-series test (1/n**p) ---------- ###
p_series_test = order.expr.match(sym**p)
if p_series_test is not None:
if p_series_test[p] < -1:
return S.true
if p_series_test[p] >= -1:
return S.false
### ------------- comparison test ------------- ###
# 1/(n**p*log(n)**q*log(log(n))**r) comparison
n_log_test = order.expr.match(1/(sym**p*log(sym)**q*log(log(sym))**r))
if n_log_test is not None:
if (n_log_test[p] > 1 or
(n_log_test[p] == 1 and n_log_test[q] > 1) or
(n_log_test[p] == n_log_test[q] == 1 and n_log_test[r] > 1)):
return S.true
return S.false
### ------------- Limit comparison test -----------###
# (1/n) comparison
try:
lim_comp = limit_seq(sym*sequence_term, sym)
if lim_comp is not None and lim_comp.is_number and lim_comp > 0:
return S.false
except NotImplementedError:
pass
### ----------- ratio test ---------------- ###
next_sequence_term = sequence_term.xreplace({sym: sym + 1})
ratio = combsimp(powsimp(next_sequence_term/sequence_term))
try:
lim_ratio = limit_seq(ratio, sym)
if lim_ratio is not None and lim_ratio.is_number:
if abs(lim_ratio) > 1:
return S.false
if abs(lim_ratio) < 1:
return S.true
except NotImplementedError:
lim_ratio = None
### ---------- Raabe's test -------------- ###
if lim_ratio == 1: # ratio test inconclusive
test_val = sym*(sequence_term/
sequence_term.subs(sym, sym + 1) - 1)
test_val = test_val.gammasimp()
try:
lim_val = limit_seq(test_val, sym)
if lim_val is not None and lim_val.is_number:
if lim_val > 1:
return S.true
if lim_val < 1:
return S.false
except NotImplementedError:
pass
### ----------- root test ---------------- ###
# lim = Limit(abs(sequence_term)**(1/sym), sym, S.Infinity)
try:
lim_evaluated = limit_seq(abs(sequence_term)**(1/sym), sym)
if lim_evaluated is not None and lim_evaluated.is_number:
if lim_evaluated < 1:
return S.true
if lim_evaluated > 1:
return S.false
except NotImplementedError:
pass
### ------------- alternating series test ----------- ###
dict_val = sequence_term.match(S.NegativeOne**(sym + p)*q)
if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval):
return S.true
### ------------- integral test -------------- ###
check_interval = None
maxima = solveset(sequence_term.diff(sym), sym, interval)
if not maxima:
check_interval = interval
elif isinstance(maxima, FiniteSet) and maxima.sup.is_number:
check_interval = Interval(maxima.sup, interval.sup)
if (check_interval is not None and
(is_decreasing(sequence_term, check_interval) or
is_decreasing(-sequence_term, check_interval))):
integral_val = Integral(
sequence_term, (sym, lower_limit, upper_limit))
try:
integral_val_evaluated = integral_val.doit()
if integral_val_evaluated.is_number:
return S(integral_val_evaluated.is_finite)
except NotImplementedError:
pass
### ----- Dirichlet and bounded times convergent tests ----- ###
# TODO
#
# Dirichlet_test
# https://en.wikipedia.org/wiki/Dirichlet%27s_test
#
# Bounded times convergent test
# It is based on comparison theorems for series.
# In particular, if the general term of a series can
# be written as a product of two terms a_n and b_n
# and if a_n is bounded and if Sum(b_n) is absolutely
# convergent, then the original series Sum(a_n * b_n)
# is absolutely convergent and so convergent.
#
# The following code can grows like 2**n where n is the
# number of args in order.expr
# Possibly combined with the potentially slow checks
# inside the loop, could make this test extremely slow
# for larger summation expressions.
if order.expr.is_Mul:
args = order.expr.args
argset = set(args)
### -------------- Dirichlet tests -------------- ###
m = Dummy('m', integer=True)
def _dirichlet_test(g_n):
try:
ing_val = limit_seq(Sum(g_n, (sym, interval.inf, m)).doit(), m)
if ing_val is not None and ing_val.is_finite:
return S.true
except NotImplementedError:
pass
### -------- bounded times convergent test ---------###
def _bounded_convergent_test(g1_n, g2_n):
try:
lim_val = limit_seq(g1_n, sym)
if lim_val is not None and (lim_val.is_finite or (
isinstance(lim_val, AccumulationBounds)
and (lim_val.max - lim_val.min).is_finite)):
if Sum(g2_n, (sym, lower_limit, upper_limit)).is_absolutely_convergent():
return S.true
except NotImplementedError:
pass
for n in range(1, len(argset)):
for a_tuple in itertools.combinations(args, n):
b_set = argset - set(a_tuple)
a_n = Mul(*a_tuple)
b_n = Mul(*b_set)
if is_decreasing(a_n, interval):
dirich = _dirichlet_test(b_n)
if dirich is not None:
return dirich
bc_test = _bounded_convergent_test(a_n, b_n)
if bc_test is not None:
return bc_test
_sym = self.limits[0][0]
sequence_term = sequence_term.xreplace({sym: _sym})
raise NotImplementedError("The algorithm to find the Sum convergence of %s "
"is not yet implemented" % (sequence_term))
def is_absolutely_convergent(self):
"""
Checks for the absolute convergence of an infinite series.
Same as checking convergence of absolute value of sequence_term of
an infinite series.
References
==========
.. [1] https://en.wikipedia.org/wiki/Absolute_convergence
Examples
========
>>> from sympy import Sum, Symbol, oo
>>> n = Symbol('n', integer=True)
>>> Sum((-1)**n, (n, 1, oo)).is_absolutely_convergent()
False
>>> Sum((-1)**n/n**2, (n, 1, oo)).is_absolutely_convergent()
True
See Also
========
Sum.is_convergent()
"""
return Sum(abs(self.function), self.limits).is_convergent()
def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True):
"""
Return an Euler-Maclaurin approximation of self, where m is the
number of leading terms to sum directly and n is the number of
terms in the tail.
With m = n = 0, this is simply the corresponding integral
plus a first-order endpoint correction.
Returns (s, e) where s is the Euler-Maclaurin approximation
and e is the estimated error (taken to be the magnitude of
the first omitted term in the tail):
>>> from sympy.abc import k, a, b
>>> from sympy import Sum
>>> Sum(1/k, (k, 2, 5)).doit().evalf()
1.28333333333333
>>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin()
>>> s
-log(2) + 7/20 + log(5)
>>> from sympy import sstr
>>> print(sstr((s.evalf(), e.evalf()), full_prec=True))
(1.26629073187415, 0.0175000000000000)
The endpoints may be symbolic:
>>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin()
>>> s
-log(a) + log(b) + 1/(2*b) + 1/(2*a)
>>> e
Abs(1/(12*b**2) - 1/(12*a**2))
If the function is a polynomial of degree at most 2n+1, the
Euler-Maclaurin formula becomes exact (and e = 0 is returned):
>>> Sum(k, (k, 2, b)).euler_maclaurin()
(b**2/2 + b/2 - 1, 0)
>>> Sum(k, (k, 2, b)).doit()
b**2/2 + b/2 - 1
With a nonzero eps specified, the summation is ended
as soon as the remainder term is less than the epsilon.
"""
m = int(m)
n = int(n)
f = self.function
if len(self.limits) != 1:
raise ValueError("More than 1 limit")
i, a, b = self.limits[0]
if (a > b) == True:
if a - b == 1:
return S.Zero, S.Zero
a, b = b + 1, a - 1
f = -f
s = S.Zero
if m:
if b.is_Integer and a.is_Integer:
m = min(m, b - a + 1)
if not eps or f.is_polynomial(i):
for k in range(m):
s += f.subs(i, a + k)
else:
term = f.subs(i, a)
if term:
test = abs(term.evalf(3)) < eps
if test == True:
return s, abs(term)
elif not (test == False):
# a symbolic Relational class, can't go further
return term, S.Zero
s += term
for k in range(1, m):
term = f.subs(i, a + k)
if abs(term.evalf(3)) < eps and term != 0:
return s, abs(term)
s += term
if b - a + 1 == m:
return s, S.Zero
a += m
x = Dummy('x')
I = Integral(f.subs(i, x), (x, a, b))
if eval_integral:
I = I.doit()
s += I
def fpoint(expr):
if b is S.Infinity:
return expr.subs(i, a), 0
return expr.subs(i, a), expr.subs(i, b)
fa, fb = fpoint(f)
iterm = (fa + fb)/2
g = f.diff(i)
for k in range(1, n + 2):
ga, gb = fpoint(g)
term = bernoulli(2*k)/factorial(2*k)*(gb - ga)
if k > n:
break
if eps and term:
term_evalf = term.evalf(3)
if term_evalf is S.NaN:
return S.NaN, S.NaN
if abs(term_evalf) < eps:
break
s += term
g = g.diff(i, 2, simplify=False)
return s + iterm, abs(term)
def reverse_order(self, *indices):
"""
Reverse the order of a limit in a Sum.
Explanation
===========
``reverse_order(self, *indices)`` reverses some limits in the expression
``self`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> Sum(x, (x, 0, 3)).reverse_order(x)
Sum(-x, (x, 4, -1))
>>> Sum(x*y, (x, 1, 5), (y, 0, 6)).reverse_order(x, y)
Sum(x*y, (x, 6, 0), (y, 7, -1))
>>> Sum(x, (x, a, b)).reverse_order(x)
Sum(-x, (x, b + 1, a - 1))
>>> Sum(x, (x, a, b)).reverse_order(0)
Sum(-x, (x, b + 1, a - 1))
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x**2, (x, a, b), (x, c, d))
>>> S
Sum(x**2, (x, a, b), (x, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x**2, (x, b + 1, a - 1), (x, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x**2, (x, b + 1, a - 1), (x, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index, reorder_limit,
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = self.index(indx)
e = 1
limits = []
for i, limit in enumerate(self.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Sum(e * self.function, *limits)
def summation(f, *symbols, **kwargs):
r"""
Compute the summation of f with respect to symbols.
Explanation
===========
The notation for symbols is similar to the notation used in Integral.
summation(f, (i, a, b)) computes the sum of f with respect to i from a to b,
i.e.,
::
b
____
\ `
summation(f, (i, a, b)) = ) f
/___,
i = a
If it cannot compute the sum, it returns an unevaluated Sum object.
Repeated sums can be computed by introducing additional symbols tuples::
Examples
========
>>> from sympy import summation, oo, symbols, log
>>> i, n, m = symbols('i n m', integer=True)
>>> summation(2*i - 1, (i, 1, n))
n**2
>>> summation(1/2**i, (i, 0, oo))
2
>>> summation(1/log(n)**n, (n, 2, oo))
Sum(log(n)**(-n), (n, 2, oo))
>>> summation(i, (i, 0, n), (n, 0, m))
m**3/6 + m**2/2 + m/3
>>> from sympy.abc import x
>>> from sympy import factorial
>>> summation(x**n/factorial(n), (n, 0, oo))
exp(x)
See Also
========
Sum
Product, sympy.concrete.products.product
"""
return Sum(f, *symbols, **kwargs).doit(deep=False)
def telescopic_direct(L, R, n, limits):
"""
Returns the direct summation of the terms of a telescopic sum
Explanation
===========
L is the term with lower index
R is the term with higher index
n difference between the indexes of L and R
Examples
========
>>> from sympy.concrete.summations import telescopic_direct
>>> from sympy.abc import k, a, b
>>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b))
-1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a
"""
(i, a, b) = limits
s = 0
for m in range(n):
s += L.subs(i, a + m) + R.subs(i, b - m)
return s
def telescopic(L, R, limits):
'''
Tries to perform the summation using the telescopic property.
Return None if not possible.
'''
(i, a, b) = limits
if L.is_Add or R.is_Add:
return None
# We want to solve(L.subs(i, i + m) + R, m)
# First we try a simple match since this does things that
# solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails
k = Wild("k")
sol = (-R).match(L.subs(i, i + k))
s = None
if sol and k in sol:
s = sol[k]
if not (s.is_Integer and L.subs(i, i + s) == -R):
# sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))
s = None
# But there are things that match doesn't do that solve
# can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1
if s is None:
m = Dummy('m')
try:
sol = solve(L.subs(i, i + m) + R, m) or []
except NotImplementedError:
return None
sol = [si for si in sol if si.is_Integer and
(L.subs(i, i + si) + R).expand().is_zero]
if len(sol) != 1:
return None
s = sol[0]
if s < 0:
return telescopic_direct(R, L, abs(s), (i, a, b))
elif s > 0:
return telescopic_direct(L, R, s, (i, a, b))
def eval_sum(f, limits):
(i, a, b) = limits
if f.is_zero:
return S.Zero
if i not in f.free_symbols:
return f*(b - a + 1)
if a == b:
return f.subs(i, a)
if isinstance(f, Piecewise):
if not any(i in arg.args[1].free_symbols for arg in f.args):
# Piecewise conditions do not depend on the dummy summation variable,
# therefore we can fold: Sum(Piecewise((e, c), ...), limits)
# --> Piecewise((Sum(e, limits), c), ...)
newargs = []
for arg in f.args:
newexpr = eval_sum(arg.expr, limits)
if newexpr is None:
return None
newargs.append((newexpr, arg.cond))
return f.func(*newargs)
if f.has(KroneckerDelta):
from .delta import deltasummation, _has_simple_delta
f = f.replace(
lambda x: isinstance(x, Sum),
lambda x: x.factor()
)
if _has_simple_delta(f, limits[0]):
return deltasummation(f, limits)
dif = b - a
definite = dif.is_Integer
# Doing it directly may be faster if there are very few terms.
if definite and (dif < 100):
return eval_sum_direct(f, (i, a, b))
if isinstance(f, Piecewise):
return None
# Try to do it symbolically. Even when the number of terms is known,
# this can save time when b-a is big.
# We should try to transform to partial fractions
value = eval_sum_symbolic(f.expand(), (i, a, b))
if value is not None:
return value
# Do it directly
if definite:
return eval_sum_direct(f, (i, a, b))
def eval_sum_direct(expr, limits):
"""
Evaluate expression directly, but perform some simple checks first
to possibly result in a smaller expression and faster execution.
"""
(i, a, b) = limits
dif = b - a
# Linearity
if expr.is_Mul:
# Try factor out everything not including i
without_i, with_i = expr.as_independent(i)
if without_i != 1:
s = eval_sum_direct(with_i, (i, a, b))
if s:
r = without_i*s
if r is not S.NaN:
return r
else:
# Try term by term
L, R = expr.as_two_terms()
if not L.has(i):
sR = eval_sum_direct(R, (i, a, b))
if sR:
return L*sR
if not R.has(i):
sL = eval_sum_direct(L, (i, a, b))
if sL:
return sL*R
try:
expr = apart(expr, i) # see if it becomes an Add
except PolynomialError:
pass
if expr.is_Add:
# Try factor out everything not including i
without_i, with_i = expr.as_independent(i)
if without_i != 0:
s = eval_sum_direct(with_i, (i, a, b))
if s:
r = without_i*(dif + 1) + s
if r is not S.NaN:
return r
else:
# Try term by term
L, R = expr.as_two_terms()
lsum = eval_sum_direct(L, (i, a, b))
rsum = eval_sum_direct(R, (i, a, b))
if None not in (lsum, rsum):
r = lsum + rsum
if r is not S.NaN:
return r
return Add(*[expr.subs(i, a + j) for j in range(dif + 1)])
def eval_sum_symbolic(f, limits):
f_orig = f
(i, a, b) = limits
if not f.has(i):
return f*(b - a + 1)
# Linearity
if f.is_Mul:
# Try factor out everything not including i
without_i, with_i = f.as_independent(i)
if without_i != 1:
s = eval_sum_symbolic(with_i, (i, a, b))
if s:
r = without_i*s
if r is not S.NaN:
return r
else:
# Try term by term
L, R = f.as_two_terms()
if not L.has(i):
sR = eval_sum_symbolic(R, (i, a, b))
if sR:
return L*sR
if not R.has(i):
sL = eval_sum_symbolic(L, (i, a, b))
if sL:
return sL*R
try:
f = apart(f, i) # see if it becomes an Add
except PolynomialError:
pass
if f.is_Add:
L, R = f.as_two_terms()
lrsum = telescopic(L, R, (i, a, b))
if lrsum:
return lrsum
# Try factor out everything not including i
without_i, with_i = f.as_independent(i)
if without_i != 0:
s = eval_sum_symbolic(with_i, (i, a, b))
if s:
r = without_i*(b - a + 1) + s
if r is not S.NaN:
return r
else:
# Try term by term
lsum = eval_sum_symbolic(L, (i, a, b))
rsum = eval_sum_symbolic(R, (i, a, b))
if None not in (lsum, rsum):
r = lsum + rsum
if r is not S.NaN:
return r
# Polynomial terms with Faulhaber's formula
n = Wild('n')
result = f.match(i**n)
if result is not None:
n = result[n]
if n.is_Integer:
if n >= 0:
if (b is S.Infinity and not a is S.NegativeInfinity) or \
(a is S.NegativeInfinity and not b is S.Infinity):
return S.Infinity
return ((bernoulli(n + 1, b + 1) - bernoulli(n + 1, a))/(n + 1)).expand()
elif a.is_Integer and a >= 1:
if n == -1:
return harmonic(b) - harmonic(a - 1)
else:
return harmonic(b, abs(n)) - harmonic(a - 1, abs(n))
if not (a.has(S.Infinity, S.NegativeInfinity) or
b.has(S.Infinity, S.NegativeInfinity)):
# Geometric terms
c1 = Wild('c1', exclude=[i])
c2 = Wild('c2', exclude=[i])
c3 = Wild('c3', exclude=[i])
wexp = Wild('wexp')
# Here we first attempt powsimp on f for easier matching with the
# exponential pattern, and attempt expansion on the exponent for easier
# matching with the linear pattern.
e = f.powsimp().match(c1 ** wexp)
if e is not None:
e_exp = e.pop(wexp).expand().match(c2*i + c3)
if e_exp is not None:
e.update(e_exp)
p = (c1**c3).subs(e)
q = (c1**c2).subs(e)
r = p*(q**a - q**(b + 1))/(1 - q)
l = p*(b - a + 1)
return Piecewise((l, Eq(q, S.One)), (r, True))
r = gosper_sum(f, (i, a, b))
if isinstance(r, (Mul,Add)):
non_limit = r.free_symbols - Tuple(*limits[1:]).free_symbols
den = denom(together(r))
den_sym = non_limit & den.free_symbols
args = []
for v in ordered(den_sym):
try:
s = solve(den, v)
m = Eq(v, s[0]) if s else S.false
if m != False:
args.append((Sum(f_orig.subs(*m.args), limits).doit(), m))
break
except NotImplementedError:
continue
args.append((r, True))
return Piecewise(*args)
if not r in (None, S.NaN):
return r
h = eval_sum_hyper(f_orig, (i, a, b))
if h is not None:
return h
r = eval_sum_residue(f_orig, (i, a, b))
if r is not None:
return r
factored = f_orig.factor()
if factored != f_orig:
return eval_sum_symbolic(factored, (i, a, b))
def _eval_sum_hyper(f, i, a):
""" Returns (res, cond). Sums from a to oo. """
if a != 0:
return _eval_sum_hyper(f.subs(i, i + a), i, 0)
if f.subs(i, 0) == 0:
if simplify(f.subs(i, Dummy('i', integer=True, positive=True))) == 0:
return S.Zero, True
return _eval_sum_hyper(f.subs(i, i + 1), i, 0)
hs = hypersimp(f, i)
if hs is None:
return None
if isinstance(hs, Float):
hs = nsimplify(hs)
numer, denom = fraction(factor(hs))
top, topl = numer.as_coeff_mul(i)
bot, botl = denom.as_coeff_mul(i)
ab = [top, bot]
factors = [topl, botl]
params = [[], []]
for k in range(2):
for fac in factors[k]:
mul = 1
if fac.is_Pow:
mul = fac.exp
fac = fac.base
if not mul.is_Integer:
return None
p = Poly(fac, i)
if p.degree() != 1:
return None
m, n = p.all_coeffs()
ab[k] *= m**mul
params[k] += [n/m]*mul
# Add "1" to numerator parameters, to account for implicit n! in
# hypergeometric series.
ap = params[0] + [1]
bq = params[1]
x = ab[0]/ab[1]
h = hyper(ap, bq, x)
f = combsimp(f)
return f.subs(i, 0)*hyperexpand(h), h.convergence_statement
def eval_sum_hyper(f, i_a_b):
i, a, b = i_a_b
if (b - a).is_Integer:
# We are never going to do better than doing the sum in the obvious way
return None
old_sum = Sum(f, (i, a, b))
if b != S.Infinity:
if a is S.NegativeInfinity:
res = _eval_sum_hyper(f.subs(i, -i), i, -b)
if res is not None:
return Piecewise(res, (old_sum, True))
else:
res1 = _eval_sum_hyper(f, i, a)
res2 = _eval_sum_hyper(f, i, b + 1)
if res1 is None or res2 is None:
return None
(res1, cond1), (res2, cond2) = res1, res2
cond = And(cond1, cond2)
if cond == False:
return None
return Piecewise((res1 - res2, cond), (old_sum, True))
if a is S.NegativeInfinity:
res1 = _eval_sum_hyper(f.subs(i, -i), i, 1)
res2 = _eval_sum_hyper(f, i, 0)
if res1 is None or res2 is None:
return None
res1, cond1 = res1
res2, cond2 = res2
cond = And(cond1, cond2)
if cond == False or cond.as_set() == S.EmptySet:
return None
return Piecewise((res1 + res2, cond), (old_sum, True))
# Now b == oo, a != -oo
res = _eval_sum_hyper(f, i, a)
if res is not None:
r, c = res
if c == False:
if r.is_number:
f = f.subs(i, Dummy('i', integer=True, positive=True) + a)
if f.is_positive or f.is_zero:
return S.Infinity
elif f.is_negative:
return S.NegativeInfinity
return None
return Piecewise(res, (old_sum, True))
def eval_sum_residue(f, i_a_b):
r"""Compute the infinite summation with residues
Notes
=====
If $f(n), g(n)$ are polynomials with $\deg(g(n)) - \deg(f(n)) \ge 2$,
some infinite summations can be computed by the following residue
evaluations.
.. math::
\sum_{n=-\infty, g(n) \ne 0}^{\infty} \frac{f(n)}{g(n)} =
-\pi \sum_{\alpha|g(\alpha)=0}
\text{Res}(\cot(\pi x) \frac{f(x)}{g(x)}, \alpha)
.. math::
\sum_{n=-\infty, g(n) \ne 0}^{\infty} (-1)^n \frac{f(n)}{g(n)} =
-\pi \sum_{\alpha|g(\alpha)=0}
\text{Res}(\csc(\pi x) \frac{f(x)}{g(x)}, \alpha)
Examples
========
>>> from sympy import Sum, oo, Symbol
>>> x = Symbol('x')
Doubly infinite series of rational functions.
>>> Sum(1 / (x**2 + 1), (x, -oo, oo)).doit()
pi/tanh(pi)
Doubly infinite alternating series of rational functions.
>>> Sum((-1)**x / (x**2 + 1), (x, -oo, oo)).doit()
pi/sinh(pi)
Infinite series of even rational functions.
>>> Sum(1 / (x**2 + 1), (x, 0, oo)).doit()
1/2 + pi/(2*tanh(pi))
Infinite series of alternating even rational functions.
>>> Sum((-1)**x / (x**2 + 1), (x, 0, oo)).doit()
pi/(2*sinh(pi)) + 1/2
This also have heuristics to transform arbitrarily shifted summand or
arbitrarily shifted summation range to the canonical problem the
formula can handle.
>>> Sum(1 / (x**2 + 2*x + 2), (x, -1, oo)).doit()
1/2 + pi/(2*tanh(pi))
>>> Sum(1 / (x**2 + 4*x + 5), (x, -2, oo)).doit()
1/2 + pi/(2*tanh(pi))
>>> Sum(1 / (x**2 + 1), (x, 1, oo)).doit()
-1/2 + pi/(2*tanh(pi))
>>> Sum(1 / (x**2 + 1), (x, 2, oo)).doit()
-1 + pi/(2*tanh(pi))
References
==========
.. [#] http://www.supermath.info/InfiniteSeriesandtheResidueTheorem.pdf
.. [#] Asmar N.H., Grafakos L. (2018) Residue Theory.
In: Complex Analysis with Applications.
Undergraduate Texts in Mathematics. Springer, Cham.
https://doi.org/10.1007/978-3-319-94063-2_5
"""
i, a, b = i_a_b
def is_even_function(numer, denom):
"""Test if the rational function is an even function"""
numer_even = all(i % 2 == 0 for (i,) in numer.monoms())
denom_even = all(i % 2 == 0 for (i,) in denom.monoms())
numer_odd = all(i % 2 == 1 for (i,) in numer.monoms())
denom_odd = all(i % 2 == 1 for (i,) in denom.monoms())
return (numer_even and denom_even) or (numer_odd and denom_odd)
def match_rational(f, i):
numer, denom = f.as_numer_denom()
try:
(numer, denom), opt = parallel_poly_from_expr((numer, denom), i)
except (PolificationFailed, PolynomialError):
return None
return numer, denom
def get_poles(denom):
roots = denom.sqf_part().all_roots()
roots = sift(roots, lambda x: x.is_integer)
if None in roots:
return None
int_roots, nonint_roots = roots[True], roots[False]
return int_roots, nonint_roots
def get_shift(denom):
n = denom.degree(i)
a = denom.coeff_monomial(i**n)
b = denom.coeff_monomial(i**(n-1))
shift = - b / a / n
return shift
def get_residue_factor(numer, denom, alternating):
if not alternating:
residue_factor = (numer.as_expr() / denom.as_expr()) * cot(S.Pi * i)
else:
residue_factor = (numer.as_expr() / denom.as_expr()) * csc(S.Pi * i)
return residue_factor
# We don't know how to deal with symbolic constants in summand
if f.free_symbols - set([i]):
return None
if not (a.is_Integer or a in (S.Infinity, S.NegativeInfinity)):
return None
if not (b.is_Integer or b in (S.Infinity, S.NegativeInfinity)):
return None
# Quick exit heuristic for the sums which doesn't have infinite range
if a != S.NegativeInfinity and b != S.Infinity:
return None
match = match_rational(f, i)
if match:
alternating = False
numer, denom = match
else:
match = match_rational(f / S.NegativeOne**i, i)
if match:
alternating = True
numer, denom = match
else:
return None
if denom.degree(i) - numer.degree(i) < 2:
return None
if (a, b) == (S.NegativeInfinity, S.Infinity):
poles = get_poles(denom)
if poles is None:
return None
int_roots, nonint_roots = poles
if int_roots:
return None
residue_factor = get_residue_factor(numer, denom, alternating)
residues = [residue(residue_factor, i, root) for root in nonint_roots]
return -S.Pi * sum(residues)
if not (a.is_finite and b is S.Infinity):
return None
if not is_even_function(numer, denom):
# Try shifting summation and check if the summand can be made
# and even function from the origin.
# Sum(f(n), (n, a, b)) => Sum(f(n + s), (n, a - s, b - s))
shift = get_shift(denom)
if not shift.is_Integer:
return None
if shift == 0:
return None
numer = numer.shift(shift)
denom = denom.shift(shift)
if not is_even_function(numer, denom):
return None
if alternating:
f = S.NegativeOne**i * (S.NegativeOne**shift * numer.as_expr() / denom.as_expr())
else:
f = numer.as_expr() / denom.as_expr()
return eval_sum_residue(f, (i, a-shift, b-shift))
poles = get_poles(denom)
if poles is None:
return None
int_roots, nonint_roots = poles
if int_roots:
int_roots = [int(root) for root in int_roots]
int_roots_max = max(int_roots)
int_roots_min = min(int_roots)
# Integer valued poles must be next to each other
# and also symmetric from origin (Because the function is even)
if not len(int_roots) == int_roots_max - int_roots_min + 1:
return None
# Check whether the summation indices contain poles
if a <= max(int_roots):
return None
residue_factor = get_residue_factor(numer, denom, alternating)
residues = [residue(residue_factor, i, root) for root in int_roots + nonint_roots]
full_sum = -S.Pi * sum(residues)
if not int_roots:
# Compute Sum(f, (i, 0, oo)) by adding a extraneous evaluation
# at the origin.
half_sum = (full_sum + f.xreplace({i: 0})) / 2
# Add and subtract extraneous evaluations
extraneous_neg = [f.xreplace({i: i0}) for i0 in range(int(a), 0)]
extraneous_pos = [f.xreplace({i: i0}) for i0 in range(0, int(a))]
result = half_sum + sum(extraneous_neg) - sum(extraneous_pos)
return result
# Compute Sum(f, (i, min(poles) + 1, oo))
half_sum = full_sum / 2
# Subtract extraneous evaluations
extraneous = [f.xreplace({i: i0}) for i0 in range(max(int_roots) + 1, int(a))]
result = half_sum - sum(extraneous)
return result
def _eval_matrix_sum(expression):
f = expression.function
for n, limit in enumerate(expression.limits):
i, a, b = limit
dif = b - a
if dif.is_Integer:
if (dif < 0) == True:
a, b = b + 1, a - 1
f = -f
newf = eval_sum_direct(f, (i, a, b))
if newf is not None:
return newf.doit()
def _dummy_with_inherited_properties_concrete(limits):
"""
Return a Dummy symbol that inherits as many assumptions as possible
from the provided symbol and limits.
If the symbol already has all True assumption shared by the limits
then return None.
"""
x, a, b = limits
l = [a, b]
assumptions_to_consider = ['extended_nonnegative', 'nonnegative',
'extended_nonpositive', 'nonpositive',
'extended_positive', 'positive',
'extended_negative', 'negative',
'integer', 'rational', 'finite',
'zero', 'real', 'extended_real']
assumptions_to_keep = {}
assumptions_to_add = {}
for assum in assumptions_to_consider:
assum_true = x._assumptions.get(assum, None)
if assum_true:
assumptions_to_keep[assum] = True
elif all(getattr(i, 'is_' + assum) for i in l):
assumptions_to_add[assum] = True
if assumptions_to_add:
assumptions_to_keep.update(assumptions_to_add)
return Dummy('d', **assumptions_to_keep)
|
95651bbec7bdde2d90a92c743fdf22781f50d6fd004ec1ea1d8201152747986c | """Tools to assist importing optional external modules."""
import sys
import re
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of SymPy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
_component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def version_tuple(vstring):
# Parse a version string to a tuple e.g. '1.2' -> (1, 2)
# Simplified from distutils.version.LooseVersion which was deprecated in
# Python 3.10.
components = []
for x in _component_re.split(vstring):
if x and x != '.':
try:
x = int(x)
except ValueError:
pass
components.append(x)
return tuple(components)
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
import_kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the import_kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... import_kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... import_kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning, stacklevel=2)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **import_kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = import_kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning,
stacklevel=2)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)),
stacklevel=2)
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if version_tuple(modversion) < version_tuple(min_module_version):
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, str):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning, stacklevel=2)
return
return mod
|
f7a350281254baa594372c35fa0805b3007bde9cfba5e75da04f445b31af203f | """
PythonMPQ: Rational number type based on Python integers.
This class is intended as a pure Python fallback for when gmpy2 is not
installed. If gmpy2 is installed then its mpq type will be used instead. The
mpq type is around 20x faster. We could just use the stdlib Fraction class
here but that is slower:
from fractions import Fraction
from sympy.external.pythonmpq import PythonMPQ
nums = range(1000)
dens = range(5, 1005)
rats = [Fraction(n, d) for n, d in zip(nums, dens)]
sum(rats) # <--- 24 milliseconds
rats = [PythonMPQ(n, d) for n, d in zip(nums, dens)]
sum(rats) # <--- 7 milliseconds
Both mpq and Fraction have some awkward features like the behaviour of
division with // and %:
>>> from fractions import Fraction
>>> Fraction(2, 3) % Fraction(1, 4)
1/6
For the QQ domain we do not want this behaviour because there should be no
remainder when dividing rational numbers. SymPy does not make use of this
aspect of mpq when gmpy2 is installed. Since this class is a fallback for that
case we do not bother implementing e.g. __mod__ so that we can be sure we
are not using it when gmpy2 is installed either.
"""
import operator
from math import gcd
from decimal import Decimal
from fractions import Fraction
import sys
from typing import Tuple as tTuple, Type
# Used for __hash__
_PyHASH_MODULUS = sys.hash_info.modulus
_PyHASH_INF = sys.hash_info.inf
class PythonMPQ:
"""Rational number implementation that is intended to be compatible with
gmpy2's mpq.
Also slightly faster than fractions.Fraction.
PythonMPQ should be treated as immutable although no effort is made to
prevent mutation (since that might slow down calculations).
"""
__slots__ = ('numerator', 'denominator')
def __new__(cls, numerator, denominator=None):
"""Construct PythonMPQ with gcd computation and checks"""
if denominator is not None:
#
# PythonMPQ(n, d): require n and d to be int and d != 0
#
if isinstance(numerator, int) and isinstance(denominator, int):
# This is the slow part:
divisor = gcd(numerator, denominator)
numerator //= divisor
denominator //= divisor
return cls._new_check(numerator, denominator)
else:
#
# PythonMPQ(q)
#
# Here q can be PythonMPQ, int, Decimal, float, Fraction or str
#
if isinstance(numerator, int):
return cls._new(numerator, 1)
elif isinstance(numerator, PythonMPQ):
return cls._new(numerator.numerator, numerator.denominator)
# Let Fraction handle Decimal/float conversion and str parsing
if isinstance(numerator, (Decimal, float, str)):
numerator = Fraction(numerator)
if isinstance(numerator, Fraction):
return cls._new(numerator.numerator, numerator.denominator)
#
# Reject everything else. This is more strict than mpq which allows
# things like mpq(Fraction, Fraction) or mpq(Decimal, any). The mpq
# behaviour is somewhat inconsistent so we choose to accept only a
# more strict subset of what mpq allows.
#
raise TypeError("PythonMPQ() requires numeric or string argument")
@classmethod
def _new_check(cls, numerator, denominator):
"""Construct PythonMPQ, check divide by zero and canonicalize signs"""
if not denominator:
raise ZeroDivisionError(f'Zero divisor {numerator}/{denominator}')
elif denominator < 0:
numerator = -numerator
denominator = -denominator
return cls._new(numerator, denominator)
@classmethod
def _new(cls, numerator, denominator):
"""Construct PythonMPQ efficiently (no checks)"""
obj = super().__new__(cls)
obj.numerator = numerator
obj.denominator = denominator
return obj
def __int__(self):
"""Convert to int (truncates towards zero)"""
p, q = self.numerator, self.denominator
if p < 0:
return -(-p//q)
return p//q
def __float__(self):
"""Convert to float (approximately)"""
return self.numerator / self.denominator
def __bool__(self):
"""True/False if nonzero/zero"""
return bool(self.numerator)
def __eq__(self, other):
"""Compare equal with PythonMPQ, int, float, Decimal or Fraction"""
if isinstance(other, PythonMPQ):
return (self.numerator == other.numerator
and self.denominator == other.denominator)
elif isinstance(other, self._compatible_types):
return self.__eq__(PythonMPQ(other))
else:
return NotImplemented
# The hashing algorithm for Fraction changed in Python 3.8
if sys.version_info >= (3, 8):
#
# Hash for Python 3.8 onwards
#
def __hash__(self):
"""hash - same as mpq/Fraction"""
try:
dinv = pow(self.denominator, -1, _PyHASH_MODULUS)
except ValueError:
hash_ = _PyHASH_INF
else:
hash_ = hash(hash(abs(self.numerator)) * dinv)
result = hash_ if self.numerator >= 0 else -hash_
return -2 if result == -1 else result
else:
#
# Hash for Python < 3.7
#
def __hash__(self):
"""hash - same as mpq/Fraction"""
# This is from fractions.py in the stdlib.
dinv = pow(self.denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
if not dinv:
hash_ = _PyHASH_INF
else:
hash_ = abs(self.numerator) * dinv % _PyHASH_MODULUS
result = hash_ if self >= 0 else -hash_
return -2 if result == -1 else result
def __reduce__(self):
"""Deconstruct for pickling"""
return type(self), (self.numerator, self.denominator)
def __str__(self):
"""Convert to string"""
if self.denominator != 1:
return f"{self.numerator}/{self.denominator}"
else:
return f"{self.numerator}"
def __repr__(self):
"""Convert to string"""
return f"MPQ({self.numerator},{self.denominator})"
def _cmp(self, other, op):
"""Helper for lt/le/gt/ge"""
if not isinstance(other, self._compatible_types):
return NotImplemented
lhs = self.numerator * other.denominator
rhs = other.numerator * self.denominator
return op(lhs, rhs)
def __lt__(self, other):
"""self < other"""
return self._cmp(other, operator.lt)
def __le__(self, other):
"""self <= other"""
return self._cmp(other, operator.le)
def __gt__(self, other):
"""self > other"""
return self._cmp(other, operator.gt)
def __ge__(self, other):
"""self >= other"""
return self._cmp(other, operator.ge)
def __abs__(self):
"""abs(q)"""
return self._new(abs(self.numerator), self.denominator)
def __pos__(self):
"""+q"""
return self
def __neg__(self):
"""-q"""
return self._new(-self.numerator, self.denominator)
def __add__(self, other):
"""q1 + q2"""
if isinstance(other, PythonMPQ):
#
# This is much faster than the naive method used in the stdlib
# fractions module. Not sure where this method comes from
# though...
#
# Compare timings for something like:
# nums = range(1000)
# rats = [PythonMPQ(n, d) for n, d in zip(nums[:-5], nums[5:])]
# sum(rats) # <-- time this
#
ap, aq = self.numerator, self.denominator
bp, bq = other.numerator, other.denominator
g = gcd(aq, bq)
if g == 1:
p = ap*bq + aq*bp
q = bq*aq
else:
q1, q2 = aq//g, bq//g
p, q = ap*q2 + bp*q1, q1*q2
g2 = gcd(p, g)
p, q = (p // g2), q * (g // g2)
elif isinstance(other, int):
p = self.numerator + self.denominator * other
q = self.denominator
else:
return NotImplemented
return self._new(p, q)
def __radd__(self, other):
"""z1 + q2"""
if isinstance(other, int):
p = self.numerator + self.denominator * other
q = self.denominator
return self._new(p, q)
else:
return NotImplemented
def __sub__(self ,other):
"""q1 - q2"""
if isinstance(other, PythonMPQ):
ap, aq = self.numerator, self.denominator
bp, bq = other.numerator, other.denominator
g = gcd(aq, bq)
if g == 1:
p = ap*bq - aq*bp
q = bq*aq
else:
q1, q2 = aq//g, bq//g
p, q = ap*q2 - bp*q1, q1*q2
g2 = gcd(p, g)
p, q = (p // g2), q * (g // g2)
elif isinstance(other, int):
p = self.numerator - self.denominator*other
q = self.denominator
else:
return NotImplemented
return self._new(p, q)
def __rsub__(self, other):
"""z1 - q2"""
if isinstance(other, int):
p = self.denominator * other - self.numerator
q = self.denominator
return self._new(p, q)
else:
return NotImplemented
def __mul__(self, other):
"""q1 * q2"""
if isinstance(other, PythonMPQ):
ap, aq = self.numerator, self.denominator
bp, bq = other.numerator, other.denominator
x1 = gcd(ap, bq)
x2 = gcd(bp, aq)
p, q = ((ap//x1)*(bp//x2), (aq//x2)*(bq//x1))
elif isinstance(other, int):
x = gcd(other, self.denominator)
p = self.numerator*(other//x)
q = self.denominator//x
else:
return NotImplemented
return self._new(p, q)
def __rmul__(self, other):
"""z1 * q2"""
if isinstance(other, int):
x = gcd(self.denominator, other)
p = self.numerator*(other//x)
q = self.denominator//x
return self._new(p, q)
else:
return NotImplemented
def __pow__(self, exp):
"""q ** z"""
p, q = self.numerator, self.denominator
if exp < 0:
p, q, exp = q, p, -exp
return self._new_check(p**exp, q**exp)
def __truediv__(self, other):
"""q1 / q2"""
if isinstance(other, PythonMPQ):
ap, aq = self.numerator, self.denominator
bp, bq = other.numerator, other.denominator
x1 = gcd(ap, bp)
x2 = gcd(bq, aq)
p, q = ((ap//x1)*(bq//x2), (aq//x2)*(bp//x1))
elif isinstance(other, int):
x = gcd(other, self.numerator)
p = self.numerator//x
q = self.denominator*(other//x)
else:
return NotImplemented
return self._new_check(p, q)
def __rtruediv__(self, other):
"""z / q"""
if isinstance(other, int):
x = gcd(self.numerator, other)
p = self.denominator*(other//x)
q = self.numerator//x
return self._new_check(p, q)
else:
return NotImplemented
_compatible_types: tTuple[Type, ...] = ()
#
# These are the types that PythonMPQ will interoperate with for operations
# and comparisons such as ==, + etc. We define this down here so that we can
# include PythonMPQ in the list as well.
#
PythonMPQ._compatible_types = (PythonMPQ, int, Decimal, Fraction)
|
5776741d0ca0ed23a0be2c0ebc18a4f1d52d4b13baca236a16a8c04678d33b72 | import os
from typing import Tuple as tTuple, Type
import mpmath.libmp as mlib
from sympy.external import import_module
__all__ = [
# GROUND_TYPES is either 'gmpy' or 'python' depending on which is used. If
# gmpy is installed then it will be used unless the environment variable
# SYMPY_GROUND_TYPES is set to something other than 'auto', 'gmpy', or
# 'gmpy2'.
'GROUND_TYPES',
# If HAS_GMPY is 0, no supported version of gmpy is available. Otherwise,
# HAS_GMPY will be 2 for gmpy2 if GROUND_TYPES is 'gmpy'. It used to be
# possible for HAS_GMPY to be 1 for gmpy but gmpy is no longer supported.
'HAS_GMPY',
# SYMPY_INTS is a tuple containing the base types for valid integer types.
# This is either (int,) or (int, type(mpz(0))) depending on GROUND_TYPES.
'SYMPY_INTS',
# MPQ is either gmpy.mpq or the Python equivalent from
# sympy.external.pythonmpq
'MPQ',
# MPZ is either gmpy.mpz or int.
'MPZ',
# Either the gmpy or the mpmath function
'factorial',
# isqrt from gmpy or mpmath
'sqrt',
]
#
# SYMPY_GROUND_TYPES can be gmpy, gmpy2, python or auto
#
GROUND_TYPES = os.environ.get('SYMPY_GROUND_TYPES', 'auto').lower()
#
# Try to import gmpy2 by default. If gmpy or gmpy2 is specified in
# SYMPY_GROUND_TYPES then warn if gmpy2 is not found. In all cases there is a
# fallback based on pure Python int and PythonMPQ that should still work fine.
#
if GROUND_TYPES in ('auto', 'gmpy', 'gmpy2'):
# Actually import gmpy2
gmpy = import_module('gmpy2', min_module_version='2.0.0',
module_version_attr='version', module_version_attr_call_args=())
# Warn if user explicitly asked for gmpy but it isn't available.
if gmpy is None and GROUND_TYPES in ('gmpy', 'gmpy2'):
from warnings import warn
warn("gmpy library is not installed, switching to 'python' ground types")
elif GROUND_TYPES == 'python':
# The user asked for Python so ignore gmpy2 module.
gmpy = None
else:
# Invalid value for SYMPY_GROUND_TYPES. Ignore the gmpy2 module.
from warnings import warn
warn("SYMPY_GROUND_TYPES environment variable unrecognised. "
"Should be 'python', 'auto', 'gmpy', or 'gmpy2'")
gmpy = None
#
# At this point gmpy will be None if gmpy2 was not successfully imported or if
# the environment variable SYMPY_GROUND_TYPES was set to 'python' (or some
# unrecognised value). The two blocks below define the values exported by this
# module in each case.
#
SYMPY_INTS: tTuple[Type, ...]
if gmpy is not None:
HAS_GMPY = 2
GROUND_TYPES = 'gmpy'
SYMPY_INTS = (int, type(gmpy.mpz(0)))
MPZ = gmpy.mpz
MPQ = gmpy.mpq
factorial = gmpy.fac
sqrt = gmpy.isqrt
else:
from .pythonmpq import PythonMPQ
HAS_GMPY = 0
GROUND_TYPES = 'python'
SYMPY_INTS = (int,)
MPZ = int
MPQ = PythonMPQ
factorial = lambda x: int(mlib.ifac(x))
sqrt = lambda x: int(mlib.isqrt(x))
|
19cfeff0e96595f96c087cdd387a4d5dd6bf79bd2474a55b523207b796e74d00 | from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.utilities import public
@public
def approximants(l, X=Symbol('x'), simplify=False):
"""
Return a generator for consecutive Pade approximants for a series.
It can also be used for computing the rational generating function of a
series when possible, since the last approximant returned by the generator
will be the generating function (if any).
Explanation
===========
The input list can contain more complex expressions than integer or rational
numbers; symbols may also be involved in the computation. An example below
show how to compute the generating function of the whole Pascal triangle.
The generator can be asked to apply the sympy.simplify function on each
generated term, which will make the computation slower; however it may be
useful when symbols are involved in the expressions.
Examples
========
>>> from sympy.series import approximants
>>> from sympy import lucas, fibonacci, symbols, binomial
>>> g = [lucas(k) for k in range(16)]
>>> [e for e in approximants(g)]
[2, -4/(x - 2), (5*x - 2)/(3*x - 1), (x - 2)/(x**2 + x - 1)]
>>> h = [fibonacci(k) for k in range(16)]
>>> [e for e in approximants(h)]
[x, -x/(x - 1), (x**2 - x)/(2*x - 1), -x/(x**2 + x - 1)]
>>> x, t = symbols("x,t")
>>> p=[sum(binomial(k,i)*x**i for i in range(k+1)) for k in range(16)]
>>> y = approximants(p, t)
>>> for k in range(3): print(next(y))
1
(x + 1)/((-x - 1)*(t*(x + 1) + (x + 1)/(-x - 1)))
nan
>>> y = approximants(p, t, simplify=True)
>>> for k in range(3): print(next(y))
1
-1/(t*(x + 1) - 1)
nan
See Also
========
See function sympy.concrete.guess.guess_generating_function_rational and
function mpmath.pade
"""
p1, q1 = [S.One], [S.Zero]
p2, q2 = [S.Zero], [S.One]
while len(l):
b = 0
while l[b]==0:
b += 1
if b == len(l):
return
m = [S.One/l[b]]
for k in range(b+1, len(l)):
s = 0
for j in range(b, k):
s -= l[j+1] * m[b-j-1]
m.append(s/l[b])
l = m
a, l[0] = l[0], 0
p = [0] * max(len(p2), b+len(p1))
q = [0] * max(len(q2), b+len(q1))
for k in range(len(p2)):
p[k] = a*p2[k]
for k in range(b, b+len(p1)):
p[k] += p1[k-b]
for k in range(len(q2)):
q[k] = a*q2[k]
for k in range(b, b+len(q1)):
q[k] += q1[k-b]
while p[-1]==0: p.pop()
while q[-1]==0: q.pop()
p1, p2 = p2, p
q1, q2 = q2, q
# yield result
from sympy.polys.polytools import lcm
from sympy.simplify import simplify as simp
from sympy.simplify.radsimp import denom
c = 1
for x in p:
c = lcm(c, denom(x))
for x in q:
c = lcm(c, denom(x))
out = ( sum(c*e*X**k for k, e in enumerate(p))
/ sum(c*e*X**k for k, e in enumerate(q)) )
if simplify:
yield(simp(out))
else:
yield out
return
|
f554c23843a02323af7308d8e594ff73891613e7ff6cbefd8b3fd3dd61a2b3de | """
Convergence acceleration / extrapolation methods for series and
sequences.
References:
Carl M. Bender & Steven A. Orszag, "Advanced Mathematical Methods for
Scientists and Engineers: Asymptotic Methods and Perturbation Theory",
Springer 1999. (Shanks transformation: pp. 368-375, Richardson
extrapolation: pp. 375-377.)
"""
from sympy.core.numbers import Integer
from sympy.core.singleton import S
from sympy.functions.combinatorial.factorials import factorial
def richardson(A, k, n, N):
"""
Calculate an approximation for lim k->oo A(k) using Richardson
extrapolation with the terms A(n), A(n+1), ..., A(n+N+1).
Choosing N ~= 2*n often gives good results.
Examples
========
A simple example is to calculate exp(1) using the limit definition.
This limit converges slowly; n = 100 only produces two accurate
digits:
>>> from sympy.abc import n
>>> e = (1 + 1/n)**n
>>> print(round(e.subs(n, 100).evalf(), 10))
2.7048138294
Richardson extrapolation with 11 appropriately chosen terms gives
a value that is accurate to the indicated precision:
>>> from sympy import E
>>> from sympy.series.acceleration import richardson
>>> print(round(richardson(e, n, 10, 20).evalf(), 10))
2.7182818285
>>> print(round(E.evalf(), 10))
2.7182818285
Another useful application is to speed up convergence of series.
Computing 100 terms of the zeta(2) series 1/k**2 yields only
two accurate digits:
>>> from sympy.abc import k, n
>>> from sympy import Sum
>>> A = Sum(k**-2, (k, 1, n))
>>> print(round(A.subs(n, 100).evalf(), 10))
1.6349839002
Richardson extrapolation performs much better:
>>> from sympy import pi
>>> print(round(richardson(A, n, 10, 20).evalf(), 10))
1.6449340668
>>> print(round(((pi**2)/6).evalf(), 10)) # Exact value
1.6449340668
"""
s = S.Zero
for j in range(0, N + 1):
s += (A.subs(k, Integer(n + j)).doit() * (n + j)**N *
S.NegativeOne**(j + N) / (factorial(j) * factorial(N - j)))
return s
def shanks(A, k, n, m=1):
"""
Calculate an approximation for lim k->oo A(k) using the n-term Shanks
transformation S(A)(n). With m > 1, calculate the m-fold recursive
Shanks transformation S(S(...S(A)...))(n).
The Shanks transformation is useful for summing Taylor series that
converge slowly near a pole or singularity, e.g. for log(2):
>>> from sympy.abc import k, n
>>> from sympy import Sum, Integer
>>> from sympy.series.acceleration import shanks
>>> A = Sum(Integer(-1)**(k+1) / k, (k, 1, n))
>>> print(round(A.subs(n, 100).doit().evalf(), 10))
0.6881721793
>>> print(round(shanks(A, n, 25).evalf(), 10))
0.6931396564
>>> print(round(shanks(A, n, 25, 5).evalf(), 10))
0.6931471806
The correct value is 0.6931471805599453094172321215.
"""
table = [A.subs(k, Integer(j)).doit() for j in range(n + m + 2)]
table2 = table[:]
for i in range(1, m + 1):
for j in range(i, n + m + 1):
x, y, z = table[j - 1], table[j], table[j + 1]
table2[j] = (z*x - y**2) / (z + x - 2*y)
table = table2[:]
return table[n]
|
b5a7c724f8e8fb8fc48ad71c319bceb3b584a176b770ce064e48f43206d0deac | """
Limits
======
Implemented according to the PhD thesis
http://www.cybertester.com/data/gruntz.pdf, which contains very thorough
descriptions of the algorithm including many examples. We summarize here
the gist of it.
All functions are sorted according to how rapidly varying they are at
infinity using the following rules. Any two functions f and g can be
compared using the properties of L:
L=lim log|f(x)| / log|g(x)| (for x -> oo)
We define >, < ~ according to::
1. f > g .... L=+-oo
we say that:
- f is greater than any power of g
- f is more rapidly varying than g
- f goes to infinity/zero faster than g
2. f < g .... L=0
we say that:
- f is lower than any power of g
3. f ~ g .... L!=0, +-oo
we say that:
- both f and g are bounded from above and below by suitable integral
powers of the other
Examples
========
::
2 < x < exp(x) < exp(x**2) < exp(exp(x))
2 ~ 3 ~ -5
x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x
exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x))
f ~ 1/f
So we can divide all the functions into comparability classes (x and x^2
belong to one class, exp(x) and exp(-x) belong to some other class). In
principle, we could compare any two functions, but in our algorithm, we
do not compare anything below the class 2~3~-5 (for example log(x) is
below this), so we set 2~3~-5 as the lowest comparability class.
Given the function f, we find the list of most rapidly varying (mrv set)
subexpressions of it. This list belongs to the same comparability class.
Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an
element "w" (either from the list or a new one) from the same
comparability class which goes to zero at infinity. In our example we
set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We
rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it
into f. Then we expand f into a series in w::
f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0<e1<...<en, c0!=0
but for x->oo, lim f = lim c0*w^e0, because all the other terms go to zero,
because w goes to zero faster than the ci and ei. So::
for e0>0, lim f = 0
for e0<0, lim f = +-oo (the sign depends on the sign of c0)
for e0=0, lim f = lim c0
We need to recursively compute limits at several places of the algorithm, but
as is shown in the PhD thesis, it always finishes.
Important functions from the implementation:
compare(a, b, x) compares "a" and "b" by computing the limit L.
mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e"
rewrite(e, Omega, x, wsym) rewrites "e" in terms of w
leadterm(f, x) returns the lowest power term in the series of f
mrv_leadterm(e, x) returns the lead term (c0, e0) for e
limitinf(e, x) computes lim e (for x->oo)
limit(e, z, z0) computes any limit by converting it to the case x->oo
All the functions are really simple and straightforward except
rewrite(), which is the most difficult/complex part of the algorithm.
When the algorithm fails, the bugs are usually in the series expansion
(i.e. in SymPy) or in rewrite.
This code is almost exact rewrite of the Maple code inside the Gruntz
thesis.
Debugging
---------
Because the gruntz algorithm is highly recursive, it's difficult to
figure out what went wrong inside a debugger. Instead, turn on nice
debug prints by defining the environment variable SYMPY_DEBUG. For
example:
[user@localhost]: SYMPY_DEBUG=True ./bin/isympy
In [1]: limit(sin(x)/x, x, 0)
limitinf(_x*sin(1/_x), _x) = 1
+-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0)
| +-mrv(_x*sin(1/_x), _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| | +-mrv(sin(1/_x), _x) = set([_x])
| | +-mrv(1/_x, _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0)
| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x)
| +-sign(_x, _x) = 1
| +-mrv_leadterm(1, _x) = (1, 0)
+-sign(0, _x) = 0
+-limitinf(1, _x) = 1
And check manually which line is wrong. Then go to the source code and
debug this function to figure out the exact problem.
"""
from functools import reduce
from sympy.core import Basic, S, Mul, PoleError
from sympy.core.cache import cacheit
from sympy.core.numbers import ilcm, I, oo
from sympy.core.symbol import Dummy, Wild
from sympy.core.traversal import bottom_up
from sympy.functions import log, exp, sign as _sign
from sympy.series.order import Order
from sympy.simplify import logcombine
from sympy.simplify.powsimp import powsimp, powdenest
from sympy.utilities.misc import debug_decorator as debug
from sympy.utilities.timeutils import timethis
timeit = timethis('gruntz')
def compare(a, b, x):
"""Returns "<" if a<b, "=" for a == b, ">" for a>b"""
# log(exp(...)) must always be simplified here for termination
la, lb = log(a), log(b)
if isinstance(a, Basic) and (isinstance(a, exp) or (a.is_Pow and a.base == S.Exp1)):
la = a.exp
if isinstance(b, Basic) and (isinstance(b, exp) or (b.is_Pow and b.base == S.Exp1)):
lb = b.exp
c = limitinf(la/lb, x)
if c == 0:
return "<"
elif c.is_infinite:
return ">"
else:
return "="
class SubsSet(dict):
"""
Stores (expr, dummy) pairs, and how to rewrite expr-s.
Explanation
===========
The gruntz algorithm needs to rewrite certain expressions in term of a new
variable w. We cannot use subs, because it is just too smart for us. For
example::
> Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))]
> O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w]
> e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p))
> e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1])
-1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p))
is really not what we want!
So we do it the hard way and keep track of all the things we potentially
want to substitute by dummy variables. Consider the expression::
exp(x - exp(-x)) + exp(x) + x.
The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}.
We introduce corresponding dummy variables d1, d2, d3 and rewrite::
d3 + d1 + x.
This class first of all keeps track of the mapping expr->variable, i.e.
will at this stage be a dictionary::
{exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}.
[It turns out to be more convenient this way round.]
But sometimes expressions in the mrv set have other expressions from the
mrv set as subexpressions, and we need to keep track of that as well. In
this case, d3 is really exp(x - d2), so rewrites at this stage is::
{d3: exp(x-d2)}.
The function rewrite uses all this information to correctly rewrite our
expression in terms of w. In this case w can be chosen to be exp(-x),
i.e. d2. The correct rewriting then is::
exp(-w)/w + 1/w + x.
"""
def __init__(self):
self.rewrites = {}
def __repr__(self):
return super().__repr__() + ', ' + self.rewrites.__repr__()
def __getitem__(self, key):
if not key in self:
self[key] = Dummy()
return dict.__getitem__(self, key)
def do_subs(self, e):
"""Substitute the variables with expressions"""
for expr, var in self.items():
e = e.xreplace({var: expr})
return e
def meets(self, s2):
"""Tell whether or not self and s2 have non-empty intersection"""
return set(self.keys()).intersection(list(s2.keys())) != set()
def union(self, s2, exps=None):
"""Compute the union of self and s2, adjusting exps"""
res = self.copy()
tr = {}
for expr, var in s2.items():
if expr in self:
if exps:
exps = exps.xreplace({var: res[expr]})
tr[var] = res[expr]
else:
res[expr] = var
for var, rewr in s2.rewrites.items():
res.rewrites[var] = rewr.xreplace(tr)
return res, exps
def copy(self):
"""Create a shallow copy of SubsSet"""
r = SubsSet()
r.rewrites = self.rewrites.copy()
for expr, var in self.items():
r[expr] = var
return r
@debug
def mrv(e, x):
"""Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e',
and e rewritten in terms of these"""
e = powsimp(e, deep=True, combine='exp')
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if not e.has(x):
return SubsSet(), e
elif e == x:
s = SubsSet()
return s, s[x]
elif e.is_Mul or e.is_Add:
i, d = e.as_independent(x) # throw away x-independent terms
if d.func != e.func:
s, expr = mrv(d, x)
return s, e.func(i, expr)
a, b = d.as_two_terms()
s1, e1 = mrv(a, x)
s2, e2 = mrv(b, x)
return mrv_max1(s1, s2, e.func(i, e1, e2), x)
elif e.is_Pow and e.base != S.Exp1:
e1 = S.One
while e.is_Pow:
b1 = e.base
e1 *= e.exp
e = b1
if b1 == 1:
return SubsSet(), b1
if e1.has(x):
base_lim = limitinf(b1, x)
if base_lim is S.One:
return mrv(exp(e1 * (b1 - 1)), x)
return mrv(exp(e1 * log(b1)), x)
else:
s, expr = mrv(b1, x)
return s, expr**e1
elif isinstance(e, log):
s, expr = mrv(e.args[0], x)
return s, log(expr)
elif isinstance(e, exp) or (e.is_Pow and e.base == S.Exp1):
# We know from the theory of this algorithm that exp(log(...)) may always
# be simplified here, and doing so is vital for termination.
if isinstance(e.exp, log):
return mrv(e.exp.args[0], x)
# if a product has an infinite factor the result will be
# infinite if there is no zero, otherwise NaN; here, we
# consider the result infinite if any factor is infinite
li = limitinf(e.exp, x)
if any(_.is_infinite for _ in Mul.make_args(li)):
s1 = SubsSet()
e1 = s1[e]
s2, e2 = mrv(e.exp, x)
su = s1.union(s2)[0]
su.rewrites[e1] = exp(e2)
return mrv_max3(s1, e1, s2, exp(e2), su, e1, x)
else:
s, expr = mrv(e.exp, x)
return s, exp(expr)
elif e.is_Function:
l = [mrv(a, x) for a in e.args]
l2 = [s for (s, _) in l if s != SubsSet()]
if len(l2) != 1:
# e.g. something like BesselJ(x, x)
raise NotImplementedError("MRV set computation for functions in"
" several variables not implemented.")
s, ss = l2[0], SubsSet()
args = [ss.do_subs(x[1]) for x in l]
return s, e.func(*args)
elif e.is_Derivative:
raise NotImplementedError("MRV set computation for derviatives"
" not implemented yet.")
raise NotImplementedError(
"Don't know how to calculate the mrv of '%s'" % e)
def mrv_max3(f, expsf, g, expsg, union, expsboth, x):
"""
Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. max() compares (two elements of)
f and g and returns either (f, expsf) [if f is larger], (g, expsg)
[if g is larger] or (union, expsboth) [if f, g are of the same class].
"""
if not isinstance(f, SubsSet):
raise TypeError("f should be an instance of SubsSet")
if not isinstance(g, SubsSet):
raise TypeError("g should be an instance of SubsSet")
if f == SubsSet():
return g, expsg
elif g == SubsSet():
return f, expsf
elif f.meets(g):
return union, expsboth
c = compare(list(f.keys())[0], list(g.keys())[0], x)
if c == ">":
return f, expsf
elif c == "<":
return g, expsg
else:
if c != "=":
raise ValueError("c should be =")
return union, expsboth
def mrv_max1(f, g, exps, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. mrv_max1() compares (two elements of)
f and g and returns the set, which is in the higher comparability class
of the union of both, if they have the same order of variation.
Also returns exps, with the appropriate substitutions made.
"""
u, b = f.union(g, exps)
return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),
u, b, x)
@debug
@cacheit
@timeit
def sign(e, x):
"""
Returns a sign of an expression e(x) for x->oo.
::
e > 0 for x sufficiently large ... 1
e == 0 for x sufficiently large ... 0
e < 0 for x sufficiently large ... -1
The result of this function is currently undefined if e changes sign
arbitrarily often for arbitrarily large x (e.g. sin(x)).
Note that this returns zero only if e is *constantly* zero
for x sufficiently large. [If e is constant, of course, this is just
the same thing as the sign of e.]
"""
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if e.is_positive:
return 1
elif e.is_negative:
return -1
elif e.is_zero:
return 0
elif not e.has(x):
e = logcombine(e)
return _sign(e)
elif e == x:
return 1
elif e.is_Mul:
a, b = e.as_two_terms()
sa = sign(a, x)
if not sa:
return 0
return sa * sign(b, x)
elif isinstance(e, exp):
return 1
elif e.is_Pow:
if e.base == S.Exp1:
return 1
s = sign(e.base, x)
if s == 1:
return 1
if e.exp.is_Integer:
return s**e.exp
elif isinstance(e, log):
return sign(e.args[0] - 1, x)
# if all else fails, do it the hard way
c0, e0 = mrv_leadterm(e, x)
return sign(c0, x)
@debug
@timeit
@cacheit
def limitinf(e, x, leadsimp=False):
"""Limit e(x) for x-> oo.
Explanation
===========
If ``leadsimp`` is True, an attempt is made to simplify the leading
term of the series expansion of ``e``. That may succeed even if
``e`` cannot be simplified.
"""
# rewrite e in terms of tractable functions only
if not e.has(x):
return e # e is a constant
if e.has(Order):
e = e.expand().removeO()
if not x.is_positive or x.is_integer:
# We make sure that x.is_positive is True and x.is_integer is None
# so we get all the correct mathematical behavior from the expression.
# We need a fresh variable.
p = Dummy('p', positive=True)
e = e.subs(x, p)
x = p
e = e.rewrite('tractable', deep=True, limitvar=x)
e = powdenest(e)
c0, e0 = mrv_leadterm(e, x)
sig = sign(e0, x)
if sig == 1:
return S.Zero # e0>0: lim f = 0
elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0)
if c0.match(I*Wild("a", exclude=[I])):
return c0*oo
s = sign(c0, x)
# the leading term shouldn't be 0:
if s == 0:
raise ValueError("Leading term should not be 0")
return s*oo
elif sig == 0:
if leadsimp:
c0 = c0.simplify()
return limitinf(c0, x, leadsimp) # e0=0: lim f = lim c0
else:
raise ValueError("{} could not be evaluated".format(sig))
def moveup2(s, x):
r = SubsSet()
for expr, var in s.items():
r[expr.xreplace({x: exp(x)})] = var
for var, expr in s.rewrites.items():
r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)})
return r
def moveup(l, x):
return [e.xreplace({x: exp(x)}) for e in l]
@debug
@timeit
def calculate_series(e, x, logx=None):
""" Calculates at least one term of the series of ``e`` in ``x``.
This is a place that fails most often, so it is in its own function.
"""
from sympy.polys import cancel
for t in e.lseries(x, logx=logx):
# bottom_up function is required for a specific case - when e is
# -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p). No current simplification
# methods reduce this to 0 while not expanding polynomials.
t = bottom_up(t, lambda w: getattr(w, 'normal', lambda: w)())
t = cancel(t, expand=False).factor()
if t.has(exp) and t.has(log):
t = powdenest(t)
if not t.is_zero:
break
return t
@debug
@timeit
@cacheit
def mrv_leadterm(e, x):
"""Returns (c0, e0) for e."""
Omega = SubsSet()
if not e.has(x):
return (e, S.Zero)
if Omega == SubsSet():
Omega, exps = mrv(e, x)
if not Omega:
# e really does not depend on x after simplification
return exps, S.Zero
if x in Omega:
# move the whole omega up (exponentiate each term):
Omega_up = moveup2(Omega, x)
exps_up = moveup([exps], x)[0]
# NOTE: there is no need to move this down!
Omega = Omega_up
exps = exps_up
#
# The positive dummy, w, is used here so log(w*2) etc. will expand;
# a unique dummy is needed in this algorithm
#
# For limits of complex functions, the algorithm would have to be
# improved, or just find limits of Re and Im components separately.
#
w = Dummy("w", real=True, positive=True)
f, logw = rewrite(exps, Omega, x, w)
series = calculate_series(f, w, logx=logw)
try:
lt = series.leadterm(w, logx=logw)
except (ValueError, PoleError):
lt = f.as_coeff_exponent(w)
# as_coeff_exponent won't always split in required form. It may simply
# return (f, 0) when a better form may be obtained. Example (-x)**(-pi)
# can be written as (-1**(-pi), -pi) which as_coeff_exponent does not return
if lt[0].has(w):
base = f.as_base_exp()[0].as_coeff_exponent(w)
ex = f.as_base_exp()[1]
lt = (base[0]**ex, base[1]*ex)
return (lt[0].subs(log(w), logw), lt[1])
def build_expression_tree(Omega, rewrites):
r""" Helper function for rewrite.
We need to sort Omega (mrv set) so that we replace an expression before
we replace any expression in terms of which it has to be rewritten::
e1 ---> e2 ---> e3
\
-> e4
Here we can do e1, e2, e3, e4 or e1, e2, e4, e3.
To do this we assemble the nodes into a tree, and sort them by height.
This function builds the tree, rewrites then sorts the nodes.
"""
class Node:
def __init__(self):
self.before = []
self.expr = None
self.var = None
def ht(self):
return reduce(lambda x, y: x + y,
[x.ht() for x in self.before], 1)
nodes = {}
for expr, v in Omega:
n = Node()
n.var = v
n.expr = expr
nodes[v] = n
for _, v in Omega:
if v in rewrites:
n = nodes[v]
r = rewrites[v]
for _, v2 in Omega:
if r.has(v2):
n.before.append(nodes[v2])
return nodes
@debug
@timeit
def rewrite(e, Omega, x, wsym):
"""e(x) ... the function
Omega ... the mrv set
wsym ... the symbol which is going to be used for w
Returns the rewritten e in terms of w and log(w). See test_rewrite1()
for examples and correct results.
"""
if not isinstance(Omega, SubsSet):
raise TypeError("Omega should be an instance of SubsSet")
if len(Omega) == 0:
raise ValueError("Length cannot be 0")
# all items in Omega must be exponentials
for t in Omega.keys():
if not isinstance(t, exp):
raise ValueError("Value should be exp")
rewrites = Omega.rewrites
Omega = list(Omega.items())
nodes = build_expression_tree(Omega, rewrites)
Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)
# make sure we know the sign of each exp() term; after the loop,
# g is going to be the "w" - the simplest one in the mrv set
for g, _ in Omega:
sig = sign(g.exp, x)
if sig != 1 and sig != -1:
raise NotImplementedError('Result depends on the sign of %s' % sig)
if sig == 1:
wsym = 1/wsym # if g goes to oo, substitute 1/w
# O2 is a list, which results by rewriting each item in Omega using "w"
O2 = []
denominators = []
for f, var in Omega:
c = limitinf(f.exp/g.exp, x)
if c.is_Rational:
denominators.append(c.q)
arg = f.exp
if var in rewrites:
if not isinstance(rewrites[var], exp):
raise ValueError("Value should be exp")
arg = rewrites[var].args[0]
O2.append((var, exp((arg - c*g.exp).expand())*wsym**c))
# Remember that Omega contains subexpressions of "e". So now we find
# them in "e" and substitute them for our rewriting, stored in O2
# the following powsimp is necessary to automatically combine exponentials,
# so that the .xreplace() below succeeds:
# TODO this should not be necessary
f = powsimp(e, deep=True, combine='exp')
for a, b in O2:
f = f.xreplace({a: b})
for _, var in Omega:
assert not f.has(var)
# finally compute the logarithm of w (logw).
logw = g.exp
if sig == 1:
logw = -logw # log(w)->log(1/w)=-log(w)
# Some parts of SymPy have difficulty computing series expansions with
# non-integral exponents. The following heuristic improves the situation:
exponent = reduce(ilcm, denominators, 1)
f = f.subs({wsym: wsym**exponent})
logw /= exponent
return f, logw
def gruntz(e, z, z0, dir="+"):
"""
Compute the limit of e(z) at the point z0 using the Gruntz algorithm.
Explanation
===========
``z0`` can be any expression, including oo and -oo.
For ``dir="+"`` (default) it calculates the limit from the right
(z->z0+) and for ``dir="-"`` the limit from the left (z->z0-). For infinite z0
(oo or -oo), the dir argument doesn't matter.
This algorithm is fully described in the module docstring in the gruntz.py
file. It relies heavily on the series expansion. Most frequently, gruntz()
is only used if the faster limit() function (which uses heuristics) fails.
"""
if not z.is_symbol:
raise NotImplementedError("Second argument must be a Symbol")
# convert all limits to the limit z->oo; sign of z is handled in limitinf
r = None
if z0 == oo:
e0 = e
elif z0 == -oo:
e0 = e.subs(z, -z)
else:
if str(dir) == "-":
e0 = e.subs(z, z0 - 1/z)
elif str(dir) == "+":
e0 = e.subs(z, z0 + 1/z)
else:
raise NotImplementedError("dir must be '+' or '-'")
try:
r = limitinf(e0, z)
except ValueError:
r = limitinf(e0, z, leadsimp=True)
# This is a bit of a heuristic for nice results... we always rewrite
# tractable functions in terms of familiar intractable ones.
# It might be nicer to rewrite the exactly to what they were initially,
# but that would take some work to implement.
return r.rewrite('intractable', deep=True)
|
443a5147ab74cd0fbfa9d57dc597ed6cd1d6a75ed0ef91e3169e7bd4b90848fd | from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.containers import Tuple
from sympy.core.decorators import call_highest_priority
from sympy.core.parameters import global_parameters
from sympy.core.function import AppliedUndef
from sympy.core.mul import Mul
from sympy.core.numbers import Integer
from sympy.core.relational import Eq
from sympy.core.singleton import S, Singleton
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy, Symbol, Wild
from sympy.core.sympify import sympify
from sympy.polys import lcm, factor
from sympy.sets.sets import Interval, Intersection
from sympy.simplify import simplify
from sympy.tensor.indexed import Idx
from sympy.utilities.iterables import flatten, is_sequence, iterable
from sympy.core.function import expand
###############################################################################
# SEQUENCES #
###############################################################################
class SeqBase(Basic):
"""Base class for sequences"""
is_commutative = True
_op_priority = 15
@staticmethod
def _start_key(expr):
"""Return start (if possible) else S.Infinity.
adapted from Set._infimum_key
"""
try:
start = expr.start
except (NotImplementedError,
AttributeError, ValueError):
start = S.Infinity
return start
def _intersect_interval(self, other):
"""Returns start and stop.
Takes intersection over the two intervals.
"""
interval = Intersection(self.interval, other.interval)
return interval.inf, interval.sup
@property
def gen(self):
"""Returns the generator for the sequence"""
raise NotImplementedError("(%s).gen" % self)
@property
def interval(self):
"""The interval on which the sequence is defined"""
raise NotImplementedError("(%s).interval" % self)
@property
def start(self):
"""The starting point of the sequence. This point is included"""
raise NotImplementedError("(%s).start" % self)
@property
def stop(self):
"""The ending point of the sequence. This point is included"""
raise NotImplementedError("(%s).stop" % self)
@property
def length(self):
"""Length of the sequence"""
raise NotImplementedError("(%s).length" % self)
@property
def variables(self):
"""Returns a tuple of variables that are bounded"""
return ()
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n, m
>>> SeqFormula(m*n**2, (n, 0, 5)).free_symbols
{m}
"""
return ({j for i in self.args for j in i.free_symbols
.difference(self.variables)})
@cacheit
def coeff(self, pt):
"""Returns the coefficient at point pt"""
if pt < self.start or pt > self.stop:
raise IndexError("Index %s out of bounds %s" % (pt, self.interval))
return self._eval_coeff(pt)
def _eval_coeff(self, pt):
raise NotImplementedError("The _eval_coeff method should be added to"
"%s to return coefficient so it is available"
"when coeff calls it."
% self.func)
def _ith_point(self, i):
"""Returns the i'th point of a sequence.
Explanation
===========
If start point is negative infinity, point is returned from the end.
Assumes the first point to be indexed zero.
Examples
=========
>>> from sympy import oo
>>> from sympy.series.sequences import SeqPer
bounded
>>> SeqPer((1, 2, 3), (-10, 10))._ith_point(0)
-10
>>> SeqPer((1, 2, 3), (-10, 10))._ith_point(5)
-5
End is at infinity
>>> SeqPer((1, 2, 3), (0, oo))._ith_point(5)
5
Starts at negative infinity
>>> SeqPer((1, 2, 3), (-oo, 0))._ith_point(5)
-5
"""
if self.start is S.NegativeInfinity:
initial = self.stop
else:
initial = self.start
if self.start is S.NegativeInfinity:
step = -1
else:
step = 1
return initial + i*step
def _add(self, other):
"""
Should only be used internally.
Explanation
===========
self._add(other) returns a new, term-wise added sequence if self
knows how to add with other, otherwise it returns ``None``.
``other`` should only be a sequence object.
Used within :class:`SeqAdd` class.
"""
return None
def _mul(self, other):
"""
Should only be used internally.
Explanation
===========
self._mul(other) returns a new, term-wise multiplied sequence if self
knows how to multiply with other, otherwise it returns ``None``.
``other`` should only be a sequence object.
Used within :class:`SeqMul` class.
"""
return None
def coeff_mul(self, other):
"""
Should be used when ``other`` is not a sequence. Should be
defined to define custom behaviour.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2).coeff_mul(2)
SeqFormula(2*n**2, (n, 0, oo))
Notes
=====
'*' defines multiplication of sequences with sequences only.
"""
return Mul(self, other)
def __add__(self, other):
"""Returns the term-wise addition of 'self' and 'other'.
``other`` should be a sequence.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2) + SeqFormula(n**3)
SeqFormula(n**3 + n**2, (n, 0, oo))
"""
if not isinstance(other, SeqBase):
raise TypeError('cannot add sequence and %s' % type(other))
return SeqAdd(self, other)
@call_highest_priority('__add__')
def __radd__(self, other):
return self + other
def __sub__(self, other):
"""Returns the term-wise subtraction of ``self`` and ``other``.
``other`` should be a sequence.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2) - (SeqFormula(n))
SeqFormula(n**2 - n, (n, 0, oo))
"""
if not isinstance(other, SeqBase):
raise TypeError('cannot subtract sequence and %s' % type(other))
return SeqAdd(self, -other)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return (-self) + other
def __neg__(self):
"""Negates the sequence.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> -SeqFormula(n**2)
SeqFormula(-n**2, (n, 0, oo))
"""
return self.coeff_mul(-1)
def __mul__(self, other):
"""Returns the term-wise multiplication of 'self' and 'other'.
``other`` should be a sequence. For ``other`` not being a
sequence see :func:`coeff_mul` method.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2) * (SeqFormula(n))
SeqFormula(n**3, (n, 0, oo))
"""
if not isinstance(other, SeqBase):
raise TypeError('cannot multiply sequence and %s' % type(other))
return SeqMul(self, other)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return self * other
def __iter__(self):
for i in range(self.length):
pt = self._ith_point(i)
yield self.coeff(pt)
def __getitem__(self, index):
if isinstance(index, int):
index = self._ith_point(index)
return self.coeff(index)
elif isinstance(index, slice):
start, stop = index.start, index.stop
if start is None:
start = 0
if stop is None:
stop = self.length
return [self.coeff(self._ith_point(i)) for i in
range(start, stop, index.step or 1)]
def find_linear_recurrence(self,n,d=None,gfvar=None):
r"""
Finds the shortest linear recurrence that satisfies the first n
terms of sequence of order `\leq` ``n/2`` if possible.
If ``d`` is specified, find shortest linear recurrence of order
`\leq` min(d, n/2) if possible.
Returns list of coefficients ``[b(1), b(2), ...]`` corresponding to the
recurrence relation ``x(n) = b(1)*x(n-1) + b(2)*x(n-2) + ...``
Returns ``[]`` if no recurrence is found.
If gfvar is specified, also returns ordinary generating function as a
function of gfvar.
Examples
========
>>> from sympy import sequence, sqrt, oo, lucas
>>> from sympy.abc import n, x, y
>>> sequence(n**2).find_linear_recurrence(10, 2)
[]
>>> sequence(n**2).find_linear_recurrence(10)
[3, -3, 1]
>>> sequence(2**n).find_linear_recurrence(10)
[2]
>>> sequence(23*n**4+91*n**2).find_linear_recurrence(10)
[5, -10, 10, -5, 1]
>>> sequence(sqrt(5)*(((1 + sqrt(5))/2)**n - (-(1 + sqrt(5))/2)**(-n))/5).find_linear_recurrence(10)
[1, 1]
>>> sequence(x+y*(-2)**(-n), (n, 0, oo)).find_linear_recurrence(30)
[1/2, 1/2]
>>> sequence(3*5**n + 12).find_linear_recurrence(20,gfvar=x)
([6, -5], 3*(5 - 21*x)/((x - 1)*(5*x - 1)))
>>> sequence(lucas(n)).find_linear_recurrence(15,gfvar=x)
([1, 1], (x - 2)/(x**2 + x - 1))
"""
from sympy.matrices import Matrix
x = [simplify(expand(t)) for t in self[:n]]
lx = len(x)
if d is None:
r = lx//2
else:
r = min(d,lx//2)
coeffs = []
for l in range(1, r+1):
l2 = 2*l
mlist = []
for k in range(l):
mlist.append(x[k:k+l])
m = Matrix(mlist)
if m.det() != 0:
y = simplify(m.LUsolve(Matrix(x[l:l2])))
if lx == l2:
coeffs = flatten(y[::-1])
break
mlist = []
for k in range(l,lx-l):
mlist.append(x[k:k+l])
m = Matrix(mlist)
if m*y == Matrix(x[l2:]):
coeffs = flatten(y[::-1])
break
if gfvar is None:
return coeffs
else:
l = len(coeffs)
if l == 0:
return [], None
else:
n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l
for i in range(l-1):
n += x[i]*gfvar**i
for j in range(l-i-1):
n -= coeffs[i]*x[j]*gfvar**(i+j+1)
d -= coeffs[i]*gfvar**(i+1)
return coeffs, simplify(factor(n)/factor(d))
class EmptySequence(SeqBase, metaclass=Singleton):
"""Represents an empty sequence.
The empty sequence is also available as a singleton as
``S.EmptySequence``.
Examples
========
>>> from sympy import EmptySequence, SeqPer
>>> from sympy.abc import x
>>> EmptySequence
EmptySequence
>>> SeqPer((1, 2), (x, 0, 10)) + EmptySequence
SeqPer((1, 2), (x, 0, 10))
>>> SeqPer((1, 2)) * EmptySequence
EmptySequence
>>> EmptySequence.coeff_mul(-1)
EmptySequence
"""
@property
def interval(self):
return S.EmptySet
@property
def length(self):
return S.Zero
def coeff_mul(self, coeff):
"""See docstring of SeqBase.coeff_mul"""
return self
def __iter__(self):
return iter([])
class SeqExpr(SeqBase):
"""Sequence expression class.
Various sequences should inherit from this class.
Examples
========
>>> from sympy.series.sequences import SeqExpr
>>> from sympy.abc import x
>>> s = SeqExpr((1, 2, 3), (x, 0, 10))
>>> s.gen
(1, 2, 3)
>>> s.interval
Interval(0, 10)
>>> s.length
11
See Also
========
sympy.series.sequences.SeqPer
sympy.series.sequences.SeqFormula
"""
@property
def gen(self):
return self.args[0]
@property
def interval(self):
return Interval(self.args[1][1], self.args[1][2])
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return self.stop - self.start + 1
@property
def variables(self):
return (self.args[1][0],)
class SeqPer(SeqExpr):
"""
Represents a periodic sequence.
The elements are repeated after a given period.
Examples
========
>>> from sympy import SeqPer, oo
>>> from sympy.abc import k
>>> s = SeqPer((1, 2, 3), (0, 5))
>>> s.periodical
(1, 2, 3)
>>> s.period
3
For value at a particular point
>>> s.coeff(3)
1
supports slicing
>>> s[:]
[1, 2, 3, 1, 2, 3]
iterable
>>> list(s)
[1, 2, 3, 1, 2, 3]
sequence starts from negative infinity
>>> SeqPer((1, 2, 3), (-oo, 0))[0:6]
[1, 2, 3, 1, 2, 3]
Periodic formulas
>>> SeqPer((k, k**2, k**3), (k, 0, oo))[0:6]
[0, 1, 8, 3, 16, 125]
See Also
========
sympy.series.sequences.SeqFormula
"""
def __new__(cls, periodical, limits=None):
periodical = sympify(periodical)
def _find_x(periodical):
free = periodical.free_symbols
if len(periodical.free_symbols) == 1:
return free.pop()
else:
return Dummy('k')
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(periodical), 0, S.Infinity
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(periodical)
start, stop = limits
if not isinstance(x, (Symbol, Idx)) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
if start is S.NegativeInfinity and stop is S.Infinity:
raise ValueError("Both the start and end value"
"cannot be unbounded")
limits = sympify((x, start, stop))
if is_sequence(periodical, Tuple):
periodical = sympify(tuple(flatten(periodical)))
else:
raise ValueError("invalid period %s should be something "
"like e.g (1, 2) " % periodical)
if Interval(limits[1], limits[2]) is S.EmptySet:
return S.EmptySequence
return Basic.__new__(cls, periodical, limits)
@property
def period(self):
return len(self.gen)
@property
def periodical(self):
return self.gen
def _eval_coeff(self, pt):
if self.start is S.NegativeInfinity:
idx = (self.stop - pt) % self.period
else:
idx = (pt - self.start) % self.period
return self.periodical[idx].subs(self.variables[0], pt)
def _add(self, other):
"""See docstring of SeqBase._add"""
if isinstance(other, SeqPer):
per1, lper1 = self.periodical, self.period
per2, lper2 = other.periodical, other.period
per_length = lcm(lper1, lper2)
new_per = []
for x in range(per_length):
ele1 = per1[x % lper1]
ele2 = per2[x % lper2]
new_per.append(ele1 + ele2)
start, stop = self._intersect_interval(other)
return SeqPer(new_per, (self.variables[0], start, stop))
def _mul(self, other):
"""See docstring of SeqBase._mul"""
if isinstance(other, SeqPer):
per1, lper1 = self.periodical, self.period
per2, lper2 = other.periodical, other.period
per_length = lcm(lper1, lper2)
new_per = []
for x in range(per_length):
ele1 = per1[x % lper1]
ele2 = per2[x % lper2]
new_per.append(ele1 * ele2)
start, stop = self._intersect_interval(other)
return SeqPer(new_per, (self.variables[0], start, stop))
def coeff_mul(self, coeff):
"""See docstring of SeqBase.coeff_mul"""
coeff = sympify(coeff)
per = [x * coeff for x in self.periodical]
return SeqPer(per, self.args[1])
class SeqFormula(SeqExpr):
"""
Represents sequence based on a formula.
Elements are generated using a formula.
Examples
========
>>> from sympy import SeqFormula, oo, Symbol
>>> n = Symbol('n')
>>> s = SeqFormula(n**2, (n, 0, 5))
>>> s.formula
n**2
For value at a particular point
>>> s.coeff(3)
9
supports slicing
>>> s[:]
[0, 1, 4, 9, 16, 25]
iterable
>>> list(s)
[0, 1, 4, 9, 16, 25]
sequence starts from negative infinity
>>> SeqFormula(n**2, (-oo, 0))[0:6]
[0, 1, 4, 9, 16, 25]
See Also
========
sympy.series.sequences.SeqPer
"""
def __new__(cls, formula, limits=None):
formula = sympify(formula)
def _find_x(formula):
free = formula.free_symbols
if len(free) == 1:
return free.pop()
elif not free:
return Dummy('k')
else:
raise ValueError(
" specify dummy variables for %s. If the formula contains"
" more than one free symbol, a dummy variable should be"
" supplied explicitly e.g., SeqFormula(m*n**2, (n, 0, 5))"
% formula)
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(formula), 0, S.Infinity
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(formula)
start, stop = limits
if not isinstance(x, (Symbol, Idx)) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
if start is S.NegativeInfinity and stop is S.Infinity:
raise ValueError("Both the start and end value "
"cannot be unbounded")
limits = sympify((x, start, stop))
if Interval(limits[1], limits[2]) is S.EmptySet:
return S.EmptySequence
return Basic.__new__(cls, formula, limits)
@property
def formula(self):
return self.gen
def _eval_coeff(self, pt):
d = self.variables[0]
return self.formula.subs(d, pt)
def _add(self, other):
"""See docstring of SeqBase._add"""
if isinstance(other, SeqFormula):
form1, v1 = self.formula, self.variables[0]
form2, v2 = other.formula, other.variables[0]
formula = form1 + form2.subs(v2, v1)
start, stop = self._intersect_interval(other)
return SeqFormula(formula, (v1, start, stop))
def _mul(self, other):
"""See docstring of SeqBase._mul"""
if isinstance(other, SeqFormula):
form1, v1 = self.formula, self.variables[0]
form2, v2 = other.formula, other.variables[0]
formula = form1 * form2.subs(v2, v1)
start, stop = self._intersect_interval(other)
return SeqFormula(formula, (v1, start, stop))
def coeff_mul(self, coeff):
"""See docstring of SeqBase.coeff_mul"""
coeff = sympify(coeff)
formula = self.formula * coeff
return SeqFormula(formula, self.args[1])
def expand(self, *args, **kwargs):
return SeqFormula(expand(self.formula, *args, **kwargs), self.args[1])
class RecursiveSeq(SeqBase):
"""
A finite degree recursive sequence.
Explanation
===========
That is, a sequence a(n) that depends on a fixed, finite number of its
previous values. The general form is
a(n) = f(a(n - 1), a(n - 2), ..., a(n - d))
for some fixed, positive integer d, where f is some function defined by a
SymPy expression.
Parameters
==========
recurrence : SymPy expression defining recurrence
This is *not* an equality, only the expression that the nth term is
equal to. For example, if :code:`a(n) = f(a(n - 1), ..., a(n - d))`,
then the expression should be :code:`f(a(n - 1), ..., a(n - d))`.
yn : applied undefined function
Represents the nth term of the sequence as e.g. :code:`y(n)` where
:code:`y` is an undefined function and `n` is the sequence index.
n : symbolic argument
The name of the variable that the recurrence is in, e.g., :code:`n` if
the recurrence function is :code:`y(n)`.
initial : iterable with length equal to the degree of the recurrence
The initial values of the recurrence.
start : start value of sequence (inclusive)
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.series.sequences import RecursiveSeq
>>> y = Function("y")
>>> n = symbols("n")
>>> fib = RecursiveSeq(y(n - 1) + y(n - 2), y(n), n, [0, 1])
>>> fib.coeff(3) # Value at a particular point
2
>>> fib[:6] # supports slicing
[0, 1, 1, 2, 3, 5]
>>> fib.recurrence # inspect recurrence
Eq(y(n), y(n - 2) + y(n - 1))
>>> fib.degree # automatically determine degree
2
>>> for x in zip(range(10), fib): # supports iteration
... print(x)
(0, 0)
(1, 1)
(2, 1)
(3, 2)
(4, 3)
(5, 5)
(6, 8)
(7, 13)
(8, 21)
(9, 34)
See Also
========
sympy.series.sequences.SeqFormula
"""
def __new__(cls, recurrence, yn, n, initial=None, start=0):
if not isinstance(yn, AppliedUndef):
raise TypeError("recurrence sequence must be an applied undefined function"
", found `{}`".format(yn))
if not isinstance(n, Basic) or not n.is_symbol:
raise TypeError("recurrence variable must be a symbol"
", found `{}`".format(n))
if yn.args != (n,):
raise TypeError("recurrence sequence does not match symbol")
y = yn.func
k = Wild("k", exclude=(n,))
degree = 0
# Find all applications of y in the recurrence and check that:
# 1. The function y is only being used with a single argument; and
# 2. All arguments are n + k for constant negative integers k.
prev_ys = recurrence.find(y)
for prev_y in prev_ys:
if len(prev_y.args) != 1:
raise TypeError("Recurrence should be in a single variable")
shift = prev_y.args[0].match(n + k)[k]
if not (shift.is_constant() and shift.is_integer and shift < 0):
raise TypeError("Recurrence should have constant,"
" negative, integer shifts"
" (found {})".format(prev_y))
if -shift > degree:
degree = -shift
if not initial:
initial = [Dummy("c_{}".format(k)) for k in range(degree)]
if len(initial) != degree:
raise ValueError("Number of initial terms must equal degree")
degree = Integer(degree)
start = sympify(start)
initial = Tuple(*(sympify(x) for x in initial))
seq = Basic.__new__(cls, recurrence, yn, n, initial, start)
seq.cache = {y(start + k): init for k, init in enumerate(initial)}
seq.degree = degree
return seq
@property
def _recurrence(self):
"""Equation defining recurrence."""
return self.args[0]
@property
def recurrence(self):
"""Equation defining recurrence."""
return Eq(self.yn, self.args[0])
@property
def yn(self):
"""Applied function representing the nth term"""
return self.args[1]
@property
def y(self):
"""Undefined function for the nth term of the sequence"""
return self.yn.func
@property
def n(self):
"""Sequence index symbol"""
return self.args[2]
@property
def initial(self):
"""The initial values of the sequence"""
return self.args[3]
@property
def start(self):
"""The starting point of the sequence. This point is included"""
return self.args[4]
@property
def stop(self):
"""The ending point of the sequence. (oo)"""
return S.Infinity
@property
def interval(self):
"""Interval on which sequence is defined."""
return (self.start, S.Infinity)
def _eval_coeff(self, index):
if index - self.start < len(self.cache):
return self.cache[self.y(index)]
for current in range(len(self.cache), index + 1):
# Use xreplace over subs for performance.
# See issue #10697.
seq_index = self.start + current
current_recurrence = self._recurrence.xreplace({self.n: seq_index})
new_term = current_recurrence.xreplace(self.cache)
self.cache[self.y(seq_index)] = new_term
return self.cache[self.y(self.start + current)]
def __iter__(self):
index = self.start
while True:
yield self._eval_coeff(index)
index += 1
def sequence(seq, limits=None):
"""
Returns appropriate sequence object.
Explanation
===========
If ``seq`` is a SymPy sequence, returns :class:`SeqPer` object
otherwise returns :class:`SeqFormula` object.
Examples
========
>>> from sympy import sequence
>>> from sympy.abc import n
>>> sequence(n**2, (n, 0, 5))
SeqFormula(n**2, (n, 0, 5))
>>> sequence((1, 2, 3), (n, 0, 5))
SeqPer((1, 2, 3), (n, 0, 5))
See Also
========
sympy.series.sequences.SeqPer
sympy.series.sequences.SeqFormula
"""
seq = sympify(seq)
if is_sequence(seq, Tuple):
return SeqPer(seq, limits)
else:
return SeqFormula(seq, limits)
###############################################################################
# OPERATIONS #
###############################################################################
class SeqExprOp(SeqBase):
"""
Base class for operations on sequences.
Examples
========
>>> from sympy.series.sequences import SeqExprOp, sequence
>>> from sympy.abc import n
>>> s1 = sequence(n**2, (n, 0, 10))
>>> s2 = sequence((1, 2, 3), (n, 5, 10))
>>> s = SeqExprOp(s1, s2)
>>> s.gen
(n**2, (1, 2, 3))
>>> s.interval
Interval(5, 10)
>>> s.length
6
See Also
========
sympy.series.sequences.SeqAdd
sympy.series.sequences.SeqMul
"""
@property
def gen(self):
"""Generator for the sequence.
returns a tuple of generators of all the argument sequences.
"""
return tuple(a.gen for a in self.args)
@property
def interval(self):
"""Sequence is defined on the intersection
of all the intervals of respective sequences
"""
return Intersection(*(a.interval for a in self.args))
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def variables(self):
"""Cumulative of all the bound variables"""
return tuple(flatten([a.variables for a in self.args]))
@property
def length(self):
return self.stop - self.start + 1
class SeqAdd(SeqExprOp):
"""Represents term-wise addition of sequences.
Rules:
* The interval on which sequence is defined is the intersection
of respective intervals of sequences.
* Anything + :class:`EmptySequence` remains unchanged.
* Other rules are defined in ``_add`` methods of sequence classes.
Examples
========
>>> from sympy import EmptySequence, oo, SeqAdd, SeqPer, SeqFormula
>>> from sympy.abc import n
>>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), EmptySequence)
SeqPer((1, 2), (n, 0, oo))
>>> SeqAdd(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10)))
EmptySequence
>>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2, (n, 0, oo)))
SeqAdd(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo)))
>>> SeqAdd(SeqFormula(n**3), SeqFormula(n**2))
SeqFormula(n**3 + n**2, (n, 0, oo))
See Also
========
sympy.series.sequences.SeqMul
"""
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs
args = list(args)
# adapted from sympy.sets.sets.Union
def _flatten(arg):
if isinstance(arg, SeqBase):
if isinstance(arg, SeqAdd):
return sum(map(_flatten, arg.args), [])
else:
return [arg]
if iterable(arg):
return sum(map(_flatten, arg), [])
raise TypeError("Input must be Sequences or "
" iterables of Sequences")
args = _flatten(args)
args = [a for a in args if a is not S.EmptySequence]
# Addition of no sequences is EmptySequence
if not args:
return S.EmptySequence
if Intersection(*(a.interval for a in args)) is S.EmptySet:
return S.EmptySequence
# reduce using known rules
if evaluate:
return SeqAdd.reduce(args)
args = list(ordered(args, SeqBase._start_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""Simplify :class:`SeqAdd` using known rules.
Iterates through all pairs and ask the constituent
sequences if they can simplify themselves with any other constituent.
Notes
=====
adapted from ``Union.reduce``
"""
new_args = True
while new_args:
for id1, s in enumerate(args):
new_args = False
for id2, t in enumerate(args):
if id1 == id2:
continue
new_seq = s._add(t)
# This returns None if s does not know how to add
# with t. Returns the newly added sequence otherwise
if new_seq is not None:
new_args = [a for a in args if a not in (s, t)]
new_args.append(new_seq)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return SeqAdd(args, evaluate=False)
def _eval_coeff(self, pt):
"""adds up the coefficients of all the sequences at point pt"""
return sum(a.coeff(pt) for a in self.args)
class SeqMul(SeqExprOp):
r"""Represents term-wise multiplication of sequences.
Explanation
===========
Handles multiplication of sequences only. For multiplication
with other objects see :func:`SeqBase.coeff_mul`.
Rules:
* The interval on which sequence is defined is the intersection
of respective intervals of sequences.
* Anything \* :class:`EmptySequence` returns :class:`EmptySequence`.
* Other rules are defined in ``_mul`` methods of sequence classes.
Examples
========
>>> from sympy import EmptySequence, oo, SeqMul, SeqPer, SeqFormula
>>> from sympy.abc import n
>>> SeqMul(SeqPer((1, 2), (n, 0, oo)), EmptySequence)
EmptySequence
>>> SeqMul(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10)))
EmptySequence
>>> SeqMul(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2))
SeqMul(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo)))
>>> SeqMul(SeqFormula(n**3), SeqFormula(n**2))
SeqFormula(n**5, (n, 0, oo))
See Also
========
sympy.series.sequences.SeqAdd
"""
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs
args = list(args)
# adapted from sympy.sets.sets.Union
def _flatten(arg):
if isinstance(arg, SeqBase):
if isinstance(arg, SeqMul):
return sum(map(_flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(_flatten, arg), [])
raise TypeError("Input must be Sequences or "
" iterables of Sequences")
args = _flatten(args)
# Multiplication of no sequences is EmptySequence
if not args:
return S.EmptySequence
if Intersection(*(a.interval for a in args)) is S.EmptySet:
return S.EmptySequence
# reduce using known rules
if evaluate:
return SeqMul.reduce(args)
args = list(ordered(args, SeqBase._start_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""Simplify a :class:`SeqMul` using known rules.
Explanation
===========
Iterates through all pairs and ask the constituent
sequences if they can simplify themselves with any other constituent.
Notes
=====
adapted from ``Union.reduce``
"""
new_args = True
while new_args:
for id1, s in enumerate(args):
new_args = False
for id2, t in enumerate(args):
if id1 == id2:
continue
new_seq = s._mul(t)
# This returns None if s does not know how to multiply
# with t. Returns the newly multiplied sequence otherwise
if new_seq is not None:
new_args = [a for a in args if a not in (s, t)]
new_args.append(new_seq)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return SeqMul(args, evaluate=False)
def _eval_coeff(self, pt):
"""multiplies the coefficients of all the sequences at point pt"""
val = 1
for a in self.args:
val *= a.coeff(pt)
return val
|
bdf3bf5ef4e9253cb77d9a79b0e7de66d827c03de0de1010e371bf5672a3a4d1 | from sympy.core import S, Symbol, Add, sympify, Expr, PoleError, Mul
from sympy.core.exprtools import factor_terms
from sympy.core.numbers import Float
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.complexes import (Abs, sign)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.special.gamma_functions import gamma
from sympy.polys import PolynomialError, factor
from sympy.series.order import Order
from sympy.simplify.powsimp import powsimp
from sympy.simplify.ratsimp import ratsimp
from sympy.simplify.simplify import nsimplify, together
from .gruntz import gruntz
def limit(e, z, z0, dir="+"):
"""Computes the limit of ``e(z)`` at the point ``z0``.
Parameters
==========
e : expression, the limit of which is to be taken
z : symbol representing the variable in the limit.
Other symbols are treated as constants. Multivariate limits
are not supported.
z0 : the value toward which ``z`` tends. Can be any expression,
including ``oo`` and ``-oo``.
dir : string, optional (default: "+")
The limit is bi-directional if ``dir="+-"``, from the right
(z->z0+) if ``dir="+"``, and from the left (z->z0-) if
``dir="-"``. For infinite ``z0`` (``oo`` or ``-oo``), the ``dir``
argument is determined from the direction of the infinity
(i.e., ``dir="-"`` for ``oo``).
Examples
========
>>> from sympy import limit, sin, oo
>>> from sympy.abc import x
>>> limit(sin(x)/x, x, 0)
1
>>> limit(1/x, x, 0) # default dir='+'
oo
>>> limit(1/x, x, 0, dir="-")
-oo
>>> limit(1/x, x, 0, dir='+-')
zoo
>>> limit(1/x, x, oo)
0
Notes
=====
First we try some heuristics for easy and frequent cases like "x", "1/x",
"x**2" and similar, so that it's fast. For all other cases, we use the
Gruntz algorithm (see the gruntz() function).
See Also
========
limit_seq : returns the limit of a sequence.
"""
return Limit(e, z, z0, dir).doit(deep=False)
def heuristics(e, z, z0, dir):
"""Computes the limit of an expression term-wise.
Parameters are the same as for the ``limit`` function.
Works with the arguments of expression ``e`` one by one, computing
the limit of each and then combining the results. This approach
works only for simple limits, but it is fast.
"""
from sympy.calculus.util import AccumBounds
rv = None
if abs(z0) is S.Infinity:
rv = limit(e.subs(z, 1/z), z, S.Zero, "+" if z0 is S.Infinity else "-")
if isinstance(rv, Limit):
return
elif e.is_Mul or e.is_Add or e.is_Pow or e.is_Function:
r = []
for a in e.args:
l = limit(a, z, z0, dir)
if l.has(S.Infinity) and l.is_finite is None:
if isinstance(e, Add):
m = factor_terms(e)
if not isinstance(m, Mul): # try together
m = together(m)
if not isinstance(m, Mul): # try factor if the previous methods failed
m = factor(e)
if isinstance(m, Mul):
return heuristics(m, z, z0, dir)
return
return
elif isinstance(l, Limit):
return
elif l is S.NaN:
return
else:
r.append(l)
if r:
rv = e.func(*r)
if rv is S.NaN and e.is_Mul and any(isinstance(rr, AccumBounds) for rr in r):
r2 = []
e2 = []
for ii in range(len(r)):
if isinstance(r[ii], AccumBounds):
r2.append(r[ii])
else:
e2.append(e.args[ii])
if len(e2) > 0:
e3 = Mul(*e2).simplify()
l = limit(e3, z, z0, dir)
rv = l * Mul(*r2)
if rv is S.NaN:
try:
rat_e = ratsimp(e)
except PolynomialError:
return
if rat_e is S.NaN or rat_e == e:
return
return limit(rat_e, z, z0, dir)
return rv
class Limit(Expr):
"""Represents an unevaluated limit.
Examples
========
>>> from sympy import Limit, sin
>>> from sympy.abc import x
>>> Limit(sin(x)/x, x, 0)
Limit(sin(x)/x, x, 0)
>>> Limit(1/x, x, 0, dir="-")
Limit(1/x, x, 0, dir='-')
"""
def __new__(cls, e, z, z0, dir="+"):
e = sympify(e)
z = sympify(z)
z0 = sympify(z0)
if z0 is S.Infinity:
dir = "-"
elif z0 is S.NegativeInfinity:
dir = "+"
if(z0.has(z)):
raise NotImplementedError("Limits approaching a variable point are"
" not supported (%s -> %s)" % (z, z0))
if isinstance(dir, str):
dir = Symbol(dir)
elif not isinstance(dir, Symbol):
raise TypeError("direction must be of type basestring or "
"Symbol, not %s" % type(dir))
if str(dir) not in ('+', '-', '+-'):
raise ValueError("direction must be one of '+', '-' "
"or '+-', not %s" % dir)
obj = Expr.__new__(cls)
obj._args = (e, z, z0, dir)
return obj
@property
def free_symbols(self):
e = self.args[0]
isyms = e.free_symbols
isyms.difference_update(self.args[1].free_symbols)
isyms.update(self.args[2].free_symbols)
return isyms
def pow_heuristics(self, e):
_, z, z0, _ = self.args
b1, e1 = e.base, e.exp
if not b1.has(z):
res = limit(e1*log(b1), z, z0)
return exp(res)
ex_lim = limit(e1, z, z0)
base_lim = limit(b1, z, z0)
if base_lim is S.One:
if ex_lim in (S.Infinity, S.NegativeInfinity):
res = limit(e1*(b1 - 1), z, z0)
return exp(res)
if base_lim is S.NegativeInfinity and ex_lim is S.Infinity:
return S.ComplexInfinity
def doit(self, **hints):
"""Evaluates the limit.
Parameters
==========
deep : bool, optional (default: True)
Invoke the ``doit`` method of the expressions involved before
taking the limit.
hints : optional keyword arguments
To be passed to ``doit`` methods; only used if deep is True.
"""
e, z, z0, dir = self.args
if z0 is S.ComplexInfinity:
raise NotImplementedError("Limits at complex "
"infinity are not implemented")
if hints.get('deep', True):
e = e.doit(**hints)
z = z.doit(**hints)
z0 = z0.doit(**hints)
if e == z:
return z0
if not e.has(z):
return e
if z0 is S.NaN:
return S.NaN
if e.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity, S.NaN):
return self
if e.is_Order:
return Order(limit(e.expr, z, z0), *e.args[1:])
cdir = 0
if str(dir) == "+":
cdir = 1
elif str(dir) == "-":
cdir = -1
def set_signs(expr):
if not expr.args:
return expr
newargs = tuple(set_signs(arg) for arg in expr.args)
if newargs != expr.args:
expr = expr.func(*newargs)
abs_flag = isinstance(expr, Abs)
sign_flag = isinstance(expr, sign)
if abs_flag or sign_flag:
sig = limit(expr.args[0], z, z0, dir)
if sig.is_zero:
sig = limit(1/expr.args[0], z, z0, dir)
if sig.is_extended_real:
if (sig < 0) == True:
return -expr.args[0] if abs_flag else S.NegativeOne
elif (sig > 0) == True:
return expr.args[0] if abs_flag else S.One
return expr
if e.has(Float):
# Convert floats like 0.5 to exact SymPy numbers like S.Half, to
# prevent rounding errors which can lead to unexpected execution
# of conditional blocks that work on comparisons
# Also see comments in https://github.com/sympy/sympy/issues/19453
e = nsimplify(e)
e = set_signs(e)
if e.is_meromorphic(z, z0):
if abs(z0) is S.Infinity:
newe = e.subs(z, -1/z)
else:
newe = e.subs(z, z + z0)
try:
coeff, ex = newe.leadterm(z, cdir=cdir)
except ValueError:
pass
else:
if ex > 0:
return S.Zero
elif ex == 0:
return coeff
if str(dir) == "+" or not(int(ex) & 1):
return S.Infinity*sign(coeff)
elif str(dir) == "-":
return S.NegativeInfinity*sign(coeff)
else:
return S.ComplexInfinity
if abs(z0) is S.Infinity:
if e.is_Mul:
e = factor_terms(e)
newe = e.subs(z, 1/z)
# cdir changes sign as oo- should become 0+
cdir = -cdir
else:
newe = e.subs(z, z + z0)
try:
coeff, ex = newe.leadterm(z, cdir=cdir)
except (ValueError, NotImplementedError, PoleError):
# The NotImplementedError catching is for custom functions
e = powsimp(e)
if e.is_Pow:
r = self.pow_heuristics(e)
if r is not None:
return r
else:
if coeff.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
return self
if not coeff.has(z):
if ex.is_positive:
return S.Zero
elif ex == 0:
return coeff
elif ex.is_negative:
if ex.is_integer:
if str(dir) == "-" or ex.is_even:
return S.Infinity*sign(coeff)
elif str(dir) == "+":
return S.NegativeInfinity*sign(coeff)
else:
return S.ComplexInfinity
else:
if str(dir) == "+":
return S.Infinity*sign(coeff)
elif str(dir) == "-":
return S.NegativeInfinity*sign(coeff)*S.NegativeOne**(S.One + ex)
else:
return S.ComplexInfinity
# gruntz fails on factorials but works with the gamma function
# If no factorial term is present, e should remain unchanged.
# factorial is defined to be zero for negative inputs (which
# differs from gamma) so only rewrite for positive z0.
if z0.is_extended_positive:
e = e.rewrite(factorial, gamma)
l = None
try:
if str(dir) == '+-':
r = gruntz(e, z, z0, '+')
l = gruntz(e, z, z0, '-')
if l != r:
raise ValueError("The limit does not exist since "
"left hand limit = %s and right hand limit = %s"
% (l, r))
else:
r = gruntz(e, z, z0, dir)
if r is S.NaN or l is S.NaN:
raise PoleError()
except (PoleError, ValueError):
if l is not None:
raise
r = heuristics(e, z, z0, dir)
if r is None:
return self
return r
|
0babad48ae2f130d106e17c436f3f80f7a8ba0a856d1334618bfb43104a07b51 | """Fourier Series"""
from sympy.core.numbers import (oo, pi)
from sympy.core.symbol import Wild
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.containers import Tuple
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import sin, cos, sinc
from sympy.series.series_class import SeriesBase
from sympy.series.sequences import SeqFormula
from sympy.sets.sets import Interval
from sympy.simplify.fu import TR2, TR1, TR10, sincos_to_sum
from sympy.utilities.iterables import is_sequence
def fourier_cos_seq(func, limits, n):
"""Returns the cos sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
cos_term = cos(2*n*pi*x / L)
formula = 2 * cos_term * integrate(func * cos_term, limits) / L
a0 = formula.subs(n, S.Zero) / 2
return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits)
/ L, (n, 1, oo))
def fourier_sin_seq(func, limits, n):
"""Returns the sin sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
sin_term = sin(2*n*pi*x / L)
return SeqFormula(2 * sin_term * integrate(func * sin_term, limits)
/ L, (n, 1, oo))
def _process_limits(func, limits):
"""
Limits should be of the form (x, start, stop).
x should be a symbol. Both start and stop should be bounded.
Explanation
===========
* If x is not given, x is determined from func.
* If limits is None. Limit of the form (x, -pi, pi) is returned.
Examples
========
>>> from sympy.series.fourier import _process_limits as pari
>>> from sympy.abc import x
>>> pari(x**2, (x, -2, 2))
(x, -2, 2)
>>> pari(x**2, (-2, 2))
(x, -2, 2)
>>> pari(x**2, None)
(x, -pi, pi)
"""
def _find_x(func):
free = func.free_symbols
if len(free) == 1:
return free.pop()
elif not free:
return Dummy('k')
else:
raise ValueError(
" specify dummy variables for %s. If the function contains"
" more than one free symbol, a dummy variable should be"
" supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))"
% func)
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(func), -pi, pi
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(func)
start, stop = limits
if not isinstance(x, Symbol) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
unbounded = [S.NegativeInfinity, S.Infinity]
if start in unbounded or stop in unbounded:
raise ValueError("Both the start and end value should be bounded")
return sympify((x, start, stop))
def finite_check(f, x, L):
def check_fx(exprs, x):
return x not in exprs.free_symbols
def check_sincos(_expr, x, L):
if isinstance(_expr, (sin, cos)):
sincos_args = _expr.args[0]
if sincos_args.match(a*(pi/L)*x + b) is not None:
return True
else:
return False
_expr = sincos_to_sum(TR2(TR1(f)))
add_coeff = _expr.as_coeff_add()
a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k != S.Zero, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols, ])
for s in add_coeff[1]:
mul_coeffs = s.as_coeff_mul()[1]
for t in mul_coeffs:
if not (check_fx(t, x) or check_sincos(t, x, L)):
return False, f
return True, _expr
class FourierSeries(SeriesBase):
r"""Represents Fourier sine/cosine series.
Explanation
===========
This class only represents a fourier series.
No computation is performed.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
See Also
========
sympy.series.fourier.fourier_series
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1][0]
@property
def period(self):
return (self.args[1][1], self.args[1][2])
@property
def a0(self):
return self.args[2][0]
@property
def an(self):
return self.args[2][1]
@property
def bn(self):
return self.args[2][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
@property
def L(self):
return abs(self.period[1] - self.period[0]) / 2
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def truncate(self, n=3):
"""
Return the first n nonzero terms of the series.
If ``n`` is None return an iterator.
Parameters
==========
n : int or None
Amount of non-zero terms in approximation or None.
Returns
=======
Expr or iterator :
Approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.truncate(4)
2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2
See Also
========
sympy.series.fourier.FourierSeries.sigma_approximation
"""
if n is None:
return iter(self)
terms = []
for t in self:
if len(terms) == n:
break
if t is not S.Zero:
terms.append(t)
return Add(*terms)
def sigma_approximation(self, n=3):
r"""
Return :math:`\sigma`-approximation of Fourier series with respect
to order n.
Explanation
===========
Sigma approximation adjusts a Fourier summation to eliminate the Gibbs
phenomenon which would otherwise occur at discontinuities.
A sigma-approximated summation for a Fourier series of a T-periodical
function can be written as
.. math::
s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1}
\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot
\left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr)
+ b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right],
where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier
series coefficients and
:math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos
:math:`\sigma` factor (expressed in terms of normalized
:math:`\operatorname{sinc}` function).
Parameters
==========
n : int
Highest order of the terms taken into account in approximation.
Returns
=======
Expr :
Sigma approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.sigma_approximation(4)
2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3
See Also
========
sympy.series.fourier.FourierSeries.truncate
Notes
=====
The behaviour of
:meth:`~sympy.series.fourier.FourierSeries.sigma_approximation`
is different from :meth:`~sympy.series.fourier.FourierSeries.truncate`
- it takes all nonzero terms of degree smaller than n, rather than
first n nonzero ones.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon
.. [2] https://en.wikipedia.org/wiki/Sigma_approximation
"""
terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n])
if t is not S.Zero]
return Add(*terms)
def shift(self, s):
"""
Shift the function by a term independent of x.
Explanation
===========
f(x) -> f(x) + s
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
a0 = self.a0 + s
sfunc = self.function + s
return self.func(sfunc, self.args[1], (a0, self.an, self.bn))
def shiftx(self, s):
"""
Shift x by a term independent of x.
Explanation
===========
f(x) -> f(x + s)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x + s)
bn = self.bn.subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def scale(self, s):
"""
Scale the function by a term independent of x.
Explanation
===========
f(x) -> s * f(x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.coeff_mul(s)
bn = self.bn.coeff_mul(s)
a0 = self.a0 * s
sfunc = self.args[0] * s
return self.func(sfunc, self.args[1], (a0, an, bn))
def scalex(self, s):
"""
Scale x by a term independent of x.
Explanation
===========
f(x) -> f(s*x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x * s)
bn = self.bn.subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def _eval_as_leading_term(self, x, logx=None, cdir=0):
for t in self:
if t is not S.Zero:
return t
def _eval_term(self, pt):
if pt == 0:
return self.a0
return self.an.coeff(pt) + self.bn.coeff(pt)
def __neg__(self):
return self.scale(-1)
def __add__(self, other):
if isinstance(other, FourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
an = self.an + other.an
bn = self.bn + other.bn
a0 = self.a0 + other.a0
return self.func(function, self.args[1], (a0, an, bn))
return Add(self, other)
def __sub__(self, other):
return self.__add__(-other)
class FiniteFourierSeries(FourierSeries):
r"""Represents Finite Fourier sine/cosine series.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
Parameters
==========
f : Expr
Expression for finding fourier_series
limits : ( x, start, stop)
x is the independent variable for the expression f
(start, stop) is the period of the fourier series
exprs: (a0, an, bn) or Expr
a0 is the constant term a0 of the fourier series
an is a dictionary of coefficients of cos terms
an[k] = coefficient of cos(pi*(k/L)*x)
bn is a dictionary of coefficients of sin terms
bn[k] = coefficient of sin(pi*(k/L)*x)
or exprs can be an expression to be converted to fourier form
Methods
=======
This class is an extension of FourierSeries class.
Please refer to sympy.series.fourier.FourierSeries for
further information.
See Also
========
sympy.series.fourier.FourierSeries
sympy.series.fourier.fourier_series
"""
def __new__(cls, f, limits, exprs):
f = sympify(f)
limits = sympify(limits)
exprs = sympify(exprs)
if not (type(exprs) == Tuple and len(exprs) == 3): # exprs is not of form (a0, an, bn)
# Converts the expression to fourier form
c, e = exprs.as_coeff_add()
rexpr = c + Add(*[TR10(i) for i in e])
a0, exp_ls = rexpr.expand(trig=False, power_base=False, power_exp=False, log=False).as_coeff_add()
x = limits[0]
L = abs(limits[2] - limits[1]) / 2
a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k is not S.Zero, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols, ])
an = dict()
bn = dict()
# separates the coefficients of sin and cos terms in dictionaries an, and bn
for p in exp_ls:
t = p.match(b * cos(a * (pi / L) * x))
q = p.match(b * sin(a * (pi / L) * x))
if t:
an[t[a]] = t[b] + an.get(t[a], S.Zero)
elif q:
bn[q[a]] = q[b] + bn.get(q[a], S.Zero)
else:
a0 += p
exprs = Tuple(a0, an, bn)
return Expr.__new__(cls, f, limits, exprs)
@property
def interval(self):
_length = 1 if self.a0 else 0
_length += max(set(self.an.keys()).union(set(self.bn.keys()))) + 1
return Interval(0, _length)
@property
def length(self):
return self.stop - self.start
def shiftx(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate().subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], _expr)
def scale(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate() * s
sfunc = self.function * s
return self.func(sfunc, self.args[1], _expr)
def scalex(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate().subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], _expr)
def _eval_term(self, pt):
if pt == 0:
return self.a0
_term = self.an.get(pt, S.Zero) * cos(pt * (pi / self.L) * self.x) \
+ self.bn.get(pt, S.Zero) * sin(pt * (pi / self.L) * self.x)
return _term
def __add__(self, other):
if isinstance(other, FourierSeries):
return other.__add__(fourier_series(self.function, self.args[1],\
finite=False))
elif isinstance(other, FiniteFourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
return fourier_series(function, limits=self.args[1])
def fourier_series(f, limits=None, finite=True):
r"""Computes the Fourier trigonometric series expansion.
Explanation
===========
Fourier trigonometric series of $f(x)$ over the interval $(a, b)$
is defined as:
.. math::
\frac{a_0}{2} + \sum_{n=1}^{\infty}
(a_n \cos(\frac{2n \pi x}{L}) + b_n \sin(\frac{2n \pi x}{L}))
where the coefficients are:
.. math::
L = b - a
.. math::
a_0 = \frac{2}{L} \int_{a}^{b}{f(x) dx}
.. math::
a_n = \frac{2}{L} \int_{a}^{b}{f(x) \cos(\frac{2n \pi x}{L}) dx}
.. math::
b_n = \frac{2}{L} \int_{a}^{b}{f(x) \sin(\frac{2n \pi x}{L}) dx}
The condition whether the function $f(x)$ given should be periodic
or not is more than necessary, because it is sufficient to consider
the series to be converging to $f(x)$ only in the given interval,
not throughout the whole real line.
This also brings a lot of ease for the computation because
you don't have to make $f(x)$ artificially periodic by
wrapping it with piecewise, modulo operations,
but you can shape the function to look like the desired periodic
function only in the interval $(a, b)$, and the computed series will
automatically become the series of the periodic version of $f(x)$.
This property is illustrated in the examples section below.
Parameters
==========
limits : (sym, start, end), optional
*sym* denotes the symbol the series is computed with respect to.
*start* and *end* denotes the start and the end of the interval
where the fourier series converges to the given function.
Default range is specified as $-\pi$ and $\pi$.
Returns
=======
FourierSeries
A symbolic object representing the Fourier trigonometric series.
Examples
========
Computing the Fourier series of $f(x) = x^2$:
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> f = x**2
>>> s = fourier_series(f, (x, -pi, pi))
>>> s1 = s.truncate(n=3)
>>> s1
-4*cos(x) + cos(2*x) + pi**2/3
Shifting of the Fourier series:
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
Scaling of the Fourier series:
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
Computing the Fourier series of $f(x) = x$:
This illustrates how truncating to the higher order gives better
convergence.
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import fourier_series, pi, plot
>>> from sympy.abc import x
>>> f = x
>>> s = fourier_series(f, (x, -pi, pi))
>>> s1 = s.truncate(n = 3)
>>> s2 = s.truncate(n = 5)
>>> s3 = s.truncate(n = 7)
>>> p = plot(f, s1, s2, s3, (x, -pi, pi), show=False, legend=True)
>>> p[0].line_color = (0, 0, 0)
>>> p[0].label = 'x'
>>> p[1].line_color = (0.7, 0.7, 0.7)
>>> p[1].label = 'n=3'
>>> p[2].line_color = (0.5, 0.5, 0.5)
>>> p[2].label = 'n=5'
>>> p[3].line_color = (0.3, 0.3, 0.3)
>>> p[3].label = 'n=7'
>>> p.show()
This illustrates how the series converges to different sawtooth
waves if the different ranges are specified.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> s1 = fourier_series(x, (x, -1, 1)).truncate(10)
>>> s2 = fourier_series(x, (x, -pi, pi)).truncate(10)
>>> s3 = fourier_series(x, (x, 0, 1)).truncate(10)
>>> p = plot(x, s1, s2, s3, (x, -5, 5), show=False, legend=True)
>>> p[0].line_color = (0, 0, 0)
>>> p[0].label = 'x'
>>> p[1].line_color = (0.7, 0.7, 0.7)
>>> p[1].label = '[-1, 1]'
>>> p[2].line_color = (0.5, 0.5, 0.5)
>>> p[2].label = '[-pi, pi]'
>>> p[3].line_color = (0.3, 0.3, 0.3)
>>> p[3].label = '[0, 1]'
>>> p.show()
Notes
=====
Computing Fourier series can be slow
due to the integration required in computing
an, bn.
It is faster to compute Fourier series of a function
by using shifting and scaling on an already
computed Fourier series rather than computing
again.
e.g. If the Fourier series of ``x**2`` is known
the Fourier series of ``x**2 - 1`` can be found by shifting by ``-1``.
See Also
========
sympy.series.fourier.FourierSeries
References
==========
.. [1] https://mathworld.wolfram.com/FourierSeries.html
"""
f = sympify(f)
limits = _process_limits(f, limits)
x = limits[0]
if x not in f.free_symbols:
return f
if finite:
L = abs(limits[2] - limits[1]) / 2
is_finite, res_f = finite_check(f, x, L)
if is_finite:
return FiniteFourierSeries(f, limits, res_f)
n = Dummy('n')
center = (limits[1] + limits[2]) / 2
if center.is_zero:
neg_f = f.subs(x, -x)
if f == neg_f:
a0, an = fourier_cos_seq(f, limits, n)
bn = SeqFormula(0, (1, oo))
return FourierSeries(f, limits, (a0, an, bn))
elif f == -neg_f:
a0 = S.Zero
an = SeqFormula(0, (1, oo))
bn = fourier_sin_seq(f, limits, n)
return FourierSeries(f, limits, (a0, an, bn))
a0, an = fourier_cos_seq(f, limits, n)
bn = fourier_sin_seq(f, limits, n)
return FourierSeries(f, limits, (a0, an, bn))
|
938eacfdf15f20774966a7ecb7e07ff97d51f92016b3fad1f4c407edaf492ed8 | """
This module implements the Residue function and related tools for working
with residues.
"""
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.utilities.timeutils import timethis
@timethis('residue')
def residue(expr, x, x0):
"""
Finds the residue of ``expr`` at the point x=x0.
The residue is defined as the coefficient of ``1/(x-x0)`` in the power series
expansion about ``x=x0``.
Examples
========
>>> from sympy import Symbol, residue, sin
>>> x = Symbol("x")
>>> residue(1/x, x, 0)
1
>>> residue(1/x**2, x, 0)
0
>>> residue(2/sin(x), x, 0)
2
This function is essential for the Residue Theorem [1].
References
==========
.. [1] https://en.wikipedia.org/wiki/Residue_theorem
"""
# The current implementation uses series expansion to
# calculate it. A more general implementation is explained in
# the section 5.6 of the Bronstein's book {M. Bronstein:
# Symbolic Integration I, Springer Verlag (2005)}. For purely
# rational functions, the algorithm is much easier. See
# sections 2.4, 2.5, and 2.7 (this section actually gives an
# algorithm for computing any Laurent series coefficient for
# a rational function). The theory in section 2.4 will help to
# understand why the resultant works in the general algorithm.
# For the definition of a resultant, see section 1.4 (and any
# previous sections for more review).
from sympy.series.order import Order
from sympy.simplify.radsimp import collect
expr = sympify(expr)
if x0 != 0:
expr = expr.subs(x, x + x0)
for n in (0, 1, 2, 4, 8, 16, 32):
s = expr.nseries(x, n=n)
if not s.has(Order) or s.getn() >= 0:
break
s = collect(s.removeO(), x)
if s.is_Add:
args = s.args
else:
args = [s]
res = S.Zero
for arg in args:
c, m = arg.as_coeff_mul(x)
m = Mul(*m)
if not (m in (S.One, x) or (m.is_Pow and m.exp.is_Integer)):
raise NotImplementedError('term of unexpected form: %s' % m)
if m == 1/x:
res += c
return res
|
a5df82bb8538e9e5486838d65330d046a8b105c72c3667dd419a0b0afa6e43f8 | """Formal Power Series"""
from collections import defaultdict
from sympy.core.numbers import (nan, oo, zoo)
from sympy.core.add import Add
from sympy.core.expr import Expr
from sympy.core.function import Derivative, Function, expand
from sympy.core.mul import Mul
from sympy.core.numbers import Rational
from sympy.core.relational import Eq
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import Wild, Dummy, symbols, Symbol
from sympy.core.sympify import sympify
from sympy.discrete.convolutions import convolution
from sympy.functions.combinatorial.factorials import binomial, factorial, rf
from sympy.functions.combinatorial.numbers import bell
from sympy.functions.elementary.integers import floor, frac, ceiling
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.functions.elementary.piecewise import Piecewise
from sympy.series.limits import Limit
from sympy.series.order import Order
from sympy.simplify.powsimp import powsimp
from sympy.series.sequences import sequence
from sympy.series.series_class import SeriesBase
from sympy.utilities.iterables import iterable
def rational_algorithm(f, x, k, order=4, full=False):
"""
Rational algorithm for computing
formula of coefficients of Formal Power Series
of a function.
Explanation
===========
Applicable when f(x) or some derivative of f(x)
is a rational function in x.
:func:`rational_algorithm` uses :func:`~.apart` function for partial fraction
decomposition. :func:`~.apart` by default uses 'undetermined coefficients
method'. By setting ``full=True``, 'Bronstein's algorithm' can be used
instead.
Looks for derivative of a function up to 4'th order (by default).
This can be overridden using order option.
Parameters
==========
x : Symbol
order : int, optional
Order of the derivative of ``f``, Default is 4.
full : bool
Returns
=======
formula : Expr
ind : Expr
Independent terms.
order : int
full : bool
Examples
========
>>> from sympy import log, atan
>>> from sympy.series.formal import rational_algorithm as ra
>>> from sympy.abc import x, k
>>> ra(1 / (1 - x), x, k)
(1, 0, 0)
>>> ra(log(1 + x), x, k)
(-1/((-1)**k*k), 0, 1)
>>> ra(atan(x), x, k, full=True)
((-I/(2*(-I)**k) + I/(2*I**k))/k, 0, 1)
Notes
=====
By setting ``full=True``, range of admissible functions to be solved using
``rational_algorithm`` can be increased. This option should be used
carefully as it can significantly slow down the computation as ``doit`` is
performed on the :class:`~.RootSum` object returned by the :func:`~.apart`
function. Use ``full=False`` whenever possible.
See Also
========
sympy.polys.partfrac.apart
References
==========
.. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf
.. [2] Power Series in Computer Algebra - Wolfram Koepf
"""
from sympy.polys import RootSum, apart
from sympy.integrals import integrate
diff = f
ds = [] # list of diff
for i in range(order + 1):
if i:
diff = diff.diff(x)
if diff.is_rational_function(x):
coeff, sep = S.Zero, S.Zero
terms = apart(diff, x, full=full)
if terms.has(RootSum):
terms = terms.doit()
for t in Add.make_args(terms):
num, den = t.as_numer_denom()
if not den.has(x):
sep += t
else:
if isinstance(den, Mul):
# m*(n*x - a)**j -> (n*x - a)**j
ind = den.as_independent(x)
den = ind[1]
num /= ind[0]
# (n*x - a)**j -> (x - b)
den, j = den.as_base_exp()
a, xterm = den.as_coeff_add(x)
# term -> m/x**n
if not a:
sep += t
continue
xc = xterm[0].coeff(x)
a /= -xc
num /= xc**j
ak = ((-1)**j * num *
binomial(j + k - 1, k).rewrite(factorial) /
a**(j + k))
coeff += ak
# Hacky, better way?
if coeff.is_zero:
return None
if (coeff.has(x) or coeff.has(zoo) or coeff.has(oo) or
coeff.has(nan)):
return None
for j in range(i):
coeff = (coeff / (k + j + 1))
sep = integrate(sep, x)
sep += (ds.pop() - sep).limit(x, 0) # constant of integration
return (coeff.subs(k, k - i), sep, i)
else:
ds.append(diff)
return None
def rational_independent(terms, x):
"""
Returns a list of all the rationally independent terms.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.series.formal import rational_independent
>>> from sympy.abc import x
>>> rational_independent([cos(x), sin(x)], x)
[cos(x), sin(x)]
>>> rational_independent([x**2, sin(x), x*sin(x), x**3], x)
[x**3 + x**2, x*sin(x) + sin(x)]
"""
if not terms:
return []
ind = terms[0:1]
for t in terms[1:]:
n = t.as_independent(x)[1]
for i, term in enumerate(ind):
d = term.as_independent(x)[1]
q = (n / d).cancel()
if q.is_rational_function(x):
ind[i] += t
break
else:
ind.append(t)
return ind
def simpleDE(f, x, g, order=4):
r"""
Generates simple DE.
Explanation
===========
DE is of the form
.. math::
f^k(x) + \sum\limits_{j=0}^{k-1} A_j f^j(x) = 0
where :math:`A_j` should be rational function in x.
Generates DE's upto order 4 (default). DE's can also have free parameters.
By increasing order, higher order DE's can be found.
Yields a tuple of (DE, order).
"""
from sympy.solvers.solveset import linsolve
a = symbols('a:%d' % (order))
def _makeDE(k):
eq = f.diff(x, k) + Add(*[a[i]*f.diff(x, i) for i in range(0, k)])
DE = g(x).diff(x, k) + Add(*[a[i]*g(x).diff(x, i) for i in range(0, k)])
return eq, DE
found = False
for k in range(1, order + 1):
eq, DE = _makeDE(k)
eq = eq.expand()
terms = eq.as_ordered_terms()
ind = rational_independent(terms, x)
if found or len(ind) == k:
sol = dict(zip(a, (i for s in linsolve(ind, a[:k]) for i in s)))
if sol:
found = True
DE = DE.subs(sol)
DE = DE.as_numer_denom()[0]
DE = DE.factor().as_coeff_mul(Derivative)[1][0]
yield DE.collect(Derivative(g(x))), k
def exp_re(DE, r, k):
"""Converts a DE with constant coefficients (explike) into a RE.
Explanation
===========
Performs the substitution:
.. math::
f^j(x) \\to r(k + j)
Normalises the terms so that lowest order of a term is always r(k).
Examples
========
>>> from sympy import Function, Derivative
>>> from sympy.series.formal import exp_re
>>> from sympy.abc import x, k
>>> f, r = Function('f'), Function('r')
>>> exp_re(-f(x) + Derivative(f(x)), r, k)
-r(k) + r(k + 1)
>>> exp_re(Derivative(f(x), x) + Derivative(f(x), (x, 2)), r, k)
r(k) + r(k + 1)
See Also
========
sympy.series.formal.hyper_re
"""
RE = S.Zero
g = DE.atoms(Function).pop()
mini = None
for t in Add.make_args(DE):
coeff, d = t.as_independent(g)
if isinstance(d, Derivative):
j = d.derivative_count
else:
j = 0
if mini is None or j < mini:
mini = j
RE += coeff * r(k + j)
if mini:
RE = RE.subs(k, k - mini)
return RE
def hyper_re(DE, r, k):
"""
Converts a DE into a RE.
Explanation
===========
Performs the substitution:
.. math::
x^l f^j(x) \\to (k + 1 - l)_j . a_{k + j - l}
Normalises the terms so that lowest order of a term is always r(k).
Examples
========
>>> from sympy import Function, Derivative
>>> from sympy.series.formal import hyper_re
>>> from sympy.abc import x, k
>>> f, r = Function('f'), Function('r')
>>> hyper_re(-f(x) + Derivative(f(x)), r, k)
(k + 1)*r(k + 1) - r(k)
>>> hyper_re(-x*f(x) + Derivative(f(x), (x, 2)), r, k)
(k + 2)*(k + 3)*r(k + 3) - r(k)
See Also
========
sympy.series.formal.exp_re
"""
RE = S.Zero
g = DE.atoms(Function).pop()
x = g.atoms(Symbol).pop()
mini = None
for t in Add.make_args(DE.expand()):
coeff, d = t.as_independent(g)
c, v = coeff.as_independent(x)
l = v.as_coeff_exponent(x)[1]
if isinstance(d, Derivative):
j = d.derivative_count
else:
j = 0
RE += c * rf(k + 1 - l, j) * r(k + j - l)
if mini is None or j - l < mini:
mini = j - l
RE = RE.subs(k, k - mini)
m = Wild('m')
return RE.collect(r(k + m))
def _transformation_a(f, x, P, Q, k, m, shift):
f *= x**(-shift)
P = P.subs(k, k + shift)
Q = Q.subs(k, k + shift)
return f, P, Q, m
def _transformation_c(f, x, P, Q, k, m, scale):
f = f.subs(x, x**scale)
P = P.subs(k, k / scale)
Q = Q.subs(k, k / scale)
m *= scale
return f, P, Q, m
def _transformation_e(f, x, P, Q, k, m):
f = f.diff(x)
P = P.subs(k, k + 1) * (k + m + 1)
Q = Q.subs(k, k + 1) * (k + 1)
return f, P, Q, m
def _apply_shift(sol, shift):
return [(res, cond + shift) for res, cond in sol]
def _apply_scale(sol, scale):
return [(res, cond / scale) for res, cond in sol]
def _apply_integrate(sol, x, k):
return [(res / ((cond + 1)*(cond.as_coeff_Add()[1].coeff(k))), cond + 1)
for res, cond in sol]
def _compute_formula(f, x, P, Q, k, m, k_max):
"""Computes the formula for f."""
from sympy.polys import roots
sol = []
for i in range(k_max + 1, k_max + m + 1):
if (i < 0) == True:
continue
r = f.diff(x, i).limit(x, 0) / factorial(i)
if r.is_zero:
continue
kterm = m*k + i
res = r
p = P.subs(k, kterm)
q = Q.subs(k, kterm)
c1 = p.subs(k, 1/k).leadterm(k)[0]
c2 = q.subs(k, 1/k).leadterm(k)[0]
res *= (-c1 / c2)**k
for r, mul in roots(p, k).items():
res *= rf(-r, k)**mul
for r, mul in roots(q, k).items():
res /= rf(-r, k)**mul
sol.append((res, kterm))
return sol
def _rsolve_hypergeometric(f, x, P, Q, k, m):
"""
Recursive wrapper to rsolve_hypergeometric.
Explanation
===========
Returns a Tuple of (formula, series independent terms,
maximum power of x in independent terms) if successful
otherwise ``None``.
See :func:`rsolve_hypergeometric` for details.
"""
from sympy.polys import lcm, roots
from sympy.integrals import integrate
# transformation - c
proots, qroots = roots(P, k), roots(Q, k)
all_roots = dict(proots)
all_roots.update(qroots)
scale = lcm([r.as_numer_denom()[1] for r, t in all_roots.items()
if r.is_rational])
f, P, Q, m = _transformation_c(f, x, P, Q, k, m, scale)
# transformation - a
qroots = roots(Q, k)
if qroots:
k_min = Min(*qroots.keys())
else:
k_min = S.Zero
shift = k_min + m
f, P, Q, m = _transformation_a(f, x, P, Q, k, m, shift)
l = (x*f).limit(x, 0)
if not isinstance(l, Limit) and l != 0: # Ideally should only be l != 0
return None
qroots = roots(Q, k)
if qroots:
k_max = Max(*qroots.keys())
else:
k_max = S.Zero
ind, mp = S.Zero, -oo
for i in range(k_max + m + 1):
r = f.diff(x, i).limit(x, 0) / factorial(i)
if r.is_finite is False:
old_f = f
f, P, Q, m = _transformation_a(f, x, P, Q, k, m, i)
f, P, Q, m = _transformation_e(f, x, P, Q, k, m)
sol, ind, mp = _rsolve_hypergeometric(f, x, P, Q, k, m)
sol = _apply_integrate(sol, x, k)
sol = _apply_shift(sol, i)
ind = integrate(ind, x)
ind += (old_f - ind).limit(x, 0) # constant of integration
mp += 1
return sol, ind, mp
elif r:
ind += r*x**(i + shift)
pow_x = Rational((i + shift), scale)
if pow_x > mp:
mp = pow_x # maximum power of x
ind = ind.subs(x, x**(1/scale))
sol = _compute_formula(f, x, P, Q, k, m, k_max)
sol = _apply_shift(sol, shift)
sol = _apply_scale(sol, scale)
return sol, ind, mp
def rsolve_hypergeometric(f, x, P, Q, k, m):
"""
Solves RE of hypergeometric type.
Explanation
===========
Attempts to solve RE of the form
Q(k)*a(k + m) - P(k)*a(k)
Transformations that preserve Hypergeometric type:
a. x**n*f(x): b(k + m) = R(k - n)*b(k)
b. f(A*x): b(k + m) = A**m*R(k)*b(k)
c. f(x**n): b(k + n*m) = R(k/n)*b(k)
d. f(x**(1/m)): b(k + 1) = R(k*m)*b(k)
e. f'(x): b(k + m) = ((k + m + 1)/(k + 1))*R(k + 1)*b(k)
Some of these transformations have been used to solve the RE.
Returns
=======
formula : Expr
ind : Expr
Independent terms.
order : int
Examples
========
>>> from sympy import exp, ln, S
>>> from sympy.series.formal import rsolve_hypergeometric as rh
>>> from sympy.abc import x, k
>>> rh(exp(x), x, -S.One, (k + 1), k, 1)
(Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1)
>>> rh(ln(1 + x), x, k**2, k*(k + 1), k, 1)
(Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1),
Eq(Mod(k, 1), 0)), (0, True)), x, 2)
References
==========
.. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf
.. [2] Power Series in Computer Algebra - Wolfram Koepf
"""
result = _rsolve_hypergeometric(f, x, P, Q, k, m)
if result is None:
return None
sol_list, ind, mp = result
sol_dict = defaultdict(lambda: S.Zero)
for res, cond in sol_list:
j, mk = cond.as_coeff_Add()
c = mk.coeff(k)
if j.is_integer is False:
res *= x**frac(j)
j = floor(j)
res = res.subs(k, (k - j) / c)
cond = Eq(k % c, j % c)
sol_dict[cond] += res # Group together formula for same conditions
sol = []
for cond, res in sol_dict.items():
sol.append((res, cond))
sol.append((S.Zero, True))
sol = Piecewise(*sol)
if mp is -oo:
s = S.Zero
elif mp.is_integer is False:
s = ceiling(mp)
else:
s = mp + 1
# save all the terms of
# form 1/x**k in ind
if s < 0:
ind += sum(sequence(sol * x**k, (k, s, -1)))
s = S.Zero
return (sol, ind, s)
def _solve_hyper_RE(f, x, RE, g, k):
"""See docstring of :func:`rsolve_hypergeometric` for details."""
terms = Add.make_args(RE)
if len(terms) == 2:
gs = list(RE.atoms(Function))
P, Q = map(RE.coeff, gs)
m = gs[1].args[0] - gs[0].args[0]
if m < 0:
P, Q = Q, P
m = abs(m)
return rsolve_hypergeometric(f, x, P, Q, k, m)
def _solve_explike_DE(f, x, DE, g, k):
"""Solves DE with constant coefficients."""
from sympy.solvers import rsolve
for t in Add.make_args(DE):
coeff, d = t.as_independent(g)
if coeff.free_symbols:
return
RE = exp_re(DE, g, k)
init = {}
for i in range(len(Add.make_args(RE))):
if i:
f = f.diff(x)
init[g(k).subs(k, i)] = f.limit(x, 0)
sol = rsolve(RE, g(k), init)
if sol:
return (sol / factorial(k), S.Zero, S.Zero)
def _solve_simple(f, x, DE, g, k):
"""Converts DE into RE and solves using :func:`rsolve`."""
from sympy.solvers import rsolve
RE = hyper_re(DE, g, k)
init = {}
for i in range(len(Add.make_args(RE))):
if i:
f = f.diff(x)
init[g(k).subs(k, i)] = f.limit(x, 0) / factorial(i)
sol = rsolve(RE, g(k), init)
if sol:
return (sol, S.Zero, S.Zero)
def _transform_explike_DE(DE, g, x, order, syms):
"""Converts DE with free parameters into DE with constant coefficients."""
from sympy.solvers.solveset import linsolve
eq = []
highest_coeff = DE.coeff(Derivative(g(x), x, order))
for i in range(order):
coeff = DE.coeff(Derivative(g(x), x, i))
coeff = (coeff / highest_coeff).expand().collect(x)
for t in Add.make_args(coeff):
eq.append(t)
temp = []
for e in eq:
if e.has(x):
break
elif e.has(Symbol):
temp.append(e)
else:
eq = temp
if eq:
sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s)))
if sol:
DE = DE.subs(sol)
DE = DE.factor().as_coeff_mul(Derivative)[1][0]
DE = DE.collect(Derivative(g(x)))
return DE
def _transform_DE_RE(DE, g, k, order, syms):
"""Converts DE with free parameters into RE of hypergeometric type."""
from sympy.solvers.solveset import linsolve
RE = hyper_re(DE, g, k)
eq = []
for i in range(1, order):
coeff = RE.coeff(g(k + i))
eq.append(coeff)
sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s)))
if sol:
m = Wild('m')
RE = RE.subs(sol)
RE = RE.factor().as_numer_denom()[0].collect(g(k + m))
RE = RE.as_coeff_mul(g)[1][0]
for i in range(order): # smallest order should be g(k)
if RE.coeff(g(k + i)) and i:
RE = RE.subs(k, k - i)
break
return RE
def solve_de(f, x, DE, order, g, k):
"""
Solves the DE.
Explanation
===========
Tries to solve DE by either converting into a RE containing two terms or
converting into a DE having constant coefficients.
Returns
=======
formula : Expr
ind : Expr
Independent terms.
order : int
Examples
========
>>> from sympy import Derivative as D, Function
>>> from sympy import exp, ln
>>> from sympy.series.formal import solve_de
>>> from sympy.abc import x, k
>>> f = Function('f')
>>> solve_de(exp(x), x, D(f(x), x) - f(x), 1, f, k)
(Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1)
>>> solve_de(ln(1 + x), x, (x + 1)*D(f(x), x, 2) + D(f(x)), 2, f, k)
(Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1),
Eq(Mod(k, 1), 0)), (0, True)), x, 2)
"""
sol = None
syms = DE.free_symbols.difference({g, x})
if syms:
RE = _transform_DE_RE(DE, g, k, order, syms)
else:
RE = hyper_re(DE, g, k)
if not RE.free_symbols.difference({k}):
sol = _solve_hyper_RE(f, x, RE, g, k)
if sol:
return sol
if syms:
DE = _transform_explike_DE(DE, g, x, order, syms)
if not DE.free_symbols.difference({x}):
sol = _solve_explike_DE(f, x, DE, g, k)
if sol:
return sol
def hyper_algorithm(f, x, k, order=4):
"""
Hypergeometric algorithm for computing Formal Power Series.
Explanation
===========
Steps:
* Generates DE
* Convert the DE into RE
* Solves the RE
Examples
========
>>> from sympy import exp, ln
>>> from sympy.series.formal import hyper_algorithm
>>> from sympy.abc import x, k
>>> hyper_algorithm(exp(x), x, k)
(Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1)
>>> hyper_algorithm(ln(1 + x), x, k)
(Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1),
Eq(Mod(k, 1), 0)), (0, True)), x, 2)
See Also
========
sympy.series.formal.simpleDE
sympy.series.formal.solve_de
"""
g = Function('g')
des = [] # list of DE's
sol = None
for DE, i in simpleDE(f, x, g, order):
if DE is not None:
sol = solve_de(f, x, DE, i, g, k)
if sol:
return sol
if not DE.free_symbols.difference({x}):
des.append(DE)
# If nothing works
# Try plain rsolve
for DE in des:
sol = _solve_simple(f, x, DE, g, k)
if sol:
return sol
def _compute_fps(f, x, x0, dir, hyper, order, rational, full):
"""Recursive wrapper to compute fps.
See :func:`compute_fps` for details.
"""
if x0 in [S.Infinity, S.NegativeInfinity]:
dir = S.One if x0 is S.Infinity else -S.One
temp = f.subs(x, 1/x)
result = _compute_fps(temp, x, 0, dir, hyper, order, rational, full)
if result is None:
return None
return (result[0], result[1].subs(x, 1/x), result[2].subs(x, 1/x))
elif x0 or dir == -S.One:
if dir == -S.One:
rep = -x + x0
rep2 = -x
rep2b = x0
else:
rep = x + x0
rep2 = x
rep2b = -x0
temp = f.subs(x, rep)
result = _compute_fps(temp, x, 0, S.One, hyper, order, rational, full)
if result is None:
return None
return (result[0], result[1].subs(x, rep2 + rep2b),
result[2].subs(x, rep2 + rep2b))
if f.is_polynomial(x):
k = Dummy('k')
ak = sequence(Coeff(f, x, k), (k, 1, oo))
xk = sequence(x**k, (k, 0, oo))
ind = f.coeff(x, 0)
return ak, xk, ind
# Break instances of Add
# this allows application of different
# algorithms on different terms increasing the
# range of admissible functions.
if isinstance(f, Add):
result = False
ak = sequence(S.Zero, (0, oo))
ind, xk = S.Zero, None
for t in Add.make_args(f):
res = _compute_fps(t, x, 0, S.One, hyper, order, rational, full)
if res:
if not result:
result = True
xk = res[1]
if res[0].start > ak.start:
seq = ak
s, f = ak.start, res[0].start
else:
seq = res[0]
s, f = res[0].start, ak.start
save = Add(*[z[0]*z[1] for z in zip(seq[0:(f - s)], xk[s:f])])
ak += res[0]
ind += res[2] + save
else:
ind += t
if result:
return ak, xk, ind
return None
# The symbolic term - symb, if present, is being separated from the function
# Otherwise symb is being set to S.One
syms = f.free_symbols.difference({x})
(f, symb) = expand(f).as_independent(*syms)
if symb.is_zero:
symb = S.One
symb = powsimp(symb)
result = None
# from here on it's x0=0 and dir=1 handling
k = Dummy('k')
if rational:
result = rational_algorithm(f, x, k, order, full)
if result is None and hyper:
result = hyper_algorithm(f, x, k, order)
if result is None:
return None
ak = sequence(result[0], (k, result[2], oo))
xk_formula = powsimp(x**k * symb)
xk = sequence(xk_formula, (k, 0, oo))
ind = powsimp(result[1] * symb)
return ak, xk, ind
def compute_fps(f, x, x0=0, dir=1, hyper=True, order=4, rational=True,
full=False):
"""
Computes the formula for Formal Power Series of a function.
Explanation
===========
Tries to compute the formula by applying the following techniques
(in order):
* rational_algorithm
* Hypergeometric algorithm
Parameters
==========
x : Symbol
x0 : number, optional
Point to perform series expansion about. Default is 0.
dir : {1, -1, '+', '-'}, optional
If dir is 1 or '+' the series is calculated from the right and
for -1 or '-' the series is calculated from the left. For smooth
functions this flag will not alter the results. Default is 1.
hyper : {True, False}, optional
Set hyper to False to skip the hypergeometric algorithm.
By default it is set to False.
order : int, optional
Order of the derivative of ``f``, Default is 4.
rational : {True, False}, optional
Set rational to False to skip rational algorithm. By default it is set
to True.
full : {True, False}, optional
Set full to True to increase the range of rational algorithm.
See :func:`rational_algorithm` for details. By default it is set to
False.
Returns
=======
ak : sequence
Sequence of coefficients.
xk : sequence
Sequence of powers of x.
ind : Expr
Independent terms.
mul : Pow
Common terms.
See Also
========
sympy.series.formal.rational_algorithm
sympy.series.formal.hyper_algorithm
"""
f = sympify(f)
x = sympify(x)
if not f.has(x):
return None
x0 = sympify(x0)
if dir == '+':
dir = S.One
elif dir == '-':
dir = -S.One
elif dir not in [S.One, -S.One]:
raise ValueError("Dir must be '+' or '-'")
else:
dir = sympify(dir)
return _compute_fps(f, x, x0, dir, hyper, order, rational, full)
class Coeff(Function):
"""
Coeff(p, x, n) represents the nth coefficient of the polynomial p in x
"""
@classmethod
def eval(cls, p, x, n):
if p.is_polynomial(x) and n.is_integer:
return p.coeff(x, n)
class FormalPowerSeries(SeriesBase):
"""
Represents Formal Power Series of a function.
Explanation
===========
No computation is performed. This class should only to be used to represent
a series. No checks are performed.
For computing a series use :func:`fps`.
See Also
========
sympy.series.formal.fps
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
def __init__(self, *args):
ak = args[4][0]
k = ak.variables[0]
self.ak_seq = sequence(ak.formula, (k, 1, oo))
self.fact_seq = sequence(factorial(k), (k, 1, oo))
self.bell_coeff_seq = self.ak_seq * self.fact_seq
self.sign_seq = sequence((-1, 1), (k, 1, oo))
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1]
@property
def x0(self):
return self.args[2]
@property
def dir(self):
return self.args[3]
@property
def ak(self):
return self.args[4][0]
@property
def xk(self):
return self.args[4][1]
@property
def ind(self):
return self.args[4][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
@property
def infinite(self):
"""Returns an infinite representation of the series"""
from sympy.concrete import Sum
ak, xk = self.ak, self.xk
k = ak.variables[0]
inf_sum = Sum(ak.formula * xk.formula, (k, ak.start, ak.stop))
return self.ind + inf_sum
def _get_pow_x(self, term):
"""Returns the power of x in a term."""
xterm, pow_x = term.as_independent(self.x)[1].as_base_exp()
if not xterm.has(self.x):
return S.Zero
return pow_x
def polynomial(self, n=6):
"""
Truncated series as polynomial.
Explanation
===========
Returns series expansion of ``f`` upto order ``O(x**n)``
as a polynomial(without ``O`` term).
"""
terms = []
sym = self.free_symbols
for i, t in enumerate(self):
xp = self._get_pow_x(t)
if xp.has(*sym):
xp = xp.as_coeff_add(*sym)[0]
if xp >= n:
break
elif xp.is_integer is True and i == n + 1:
break
elif t is not S.Zero:
terms.append(t)
return Add(*terms)
def truncate(self, n=6):
"""
Truncated series.
Explanation
===========
Returns truncated series expansion of f upto
order ``O(x**n)``.
If n is ``None``, returns an infinite iterator.
"""
if n is None:
return iter(self)
x, x0 = self.x, self.x0
pt_xk = self.xk.coeff(n)
if x0 is S.NegativeInfinity:
x0 = S.Infinity
return self.polynomial(n) + Order(pt_xk, (x, x0))
def zero_coeff(self):
return self._eval_term(0)
def _eval_term(self, pt):
try:
pt_xk = self.xk.coeff(pt)
pt_ak = self.ak.coeff(pt).simplify() # Simplify the coefficients
except IndexError:
term = S.Zero
else:
term = (pt_ak * pt_xk)
if self.ind:
ind = S.Zero
sym = self.free_symbols
for t in Add.make_args(self.ind):
pow_x = self._get_pow_x(t)
if pow_x.has(*sym):
pow_x = pow_x.as_coeff_add(*sym)[0]
if pt == 0 and pow_x < 1:
ind += t
elif pow_x >= pt and pow_x < pt + 1:
ind += t
term += ind
return term.collect(self.x)
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def _eval_as_leading_term(self, x, logx=None, cdir=0):
for t in self:
if t is not S.Zero:
return t
def _eval_derivative(self, x):
f = self.function.diff(x)
ind = self.ind.diff(x)
pow_xk = self._get_pow_x(self.xk.formula)
ak = self.ak
k = ak.variables[0]
if ak.formula.has(x):
form = []
for e, c in ak.formula.args:
temp = S.Zero
for t in Add.make_args(e):
pow_x = self._get_pow_x(t)
temp += t * (pow_xk + pow_x)
form.append((temp, c))
form = Piecewise(*form)
ak = sequence(form.subs(k, k + 1), (k, ak.start - 1, ak.stop))
else:
ak = sequence((ak.formula * pow_xk).subs(k, k + 1),
(k, ak.start - 1, ak.stop))
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def integrate(self, x=None, **kwargs):
"""
Integrate Formal Power Series.
Examples
========
>>> from sympy import fps, sin, integrate
>>> from sympy.abc import x
>>> f = fps(sin(x))
>>> f.integrate(x).truncate()
-1 + x**2/2 - x**4/24 + O(x**6)
>>> integrate(f, (x, 0, 1))
1 - cos(1)
"""
from sympy.integrals import integrate
if x is None:
x = self.x
elif iterable(x):
return integrate(self.function, x)
f = integrate(self.function, x)
ind = integrate(self.ind, x)
ind += (f - ind).limit(x, 0) # constant of integration
pow_xk = self._get_pow_x(self.xk.formula)
ak = self.ak
k = ak.variables[0]
if ak.formula.has(x):
form = []
for e, c in ak.formula.args:
temp = S.Zero
for t in Add.make_args(e):
pow_x = self._get_pow_x(t)
temp += t / (pow_xk + pow_x + 1)
form.append((temp, c))
form = Piecewise(*form)
ak = sequence(form.subs(k, k - 1), (k, ak.start + 1, ak.stop))
else:
ak = sequence((ak.formula / (pow_xk + 1)).subs(k, k - 1),
(k, ak.start + 1, ak.stop))
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def product(self, other, x=None, n=6):
"""
Multiplies two Formal Power Series, using discrete convolution and
return the truncated terms upto specified order.
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(sin(x))
>>> f2 = fps(exp(x))
>>> f1.product(f2, x).truncate(4)
x + x**2 + x**3/3 + O(x**4)
See Also
========
sympy.discrete.convolutions
sympy.series.formal.FormalPowerSeriesProduct
"""
if n is None:
return iter(self)
other = sympify(other)
if not isinstance(other, FormalPowerSeries):
raise ValueError("Both series should be an instance of FormalPowerSeries"
" class.")
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
elif self.x != other.x:
raise ValueError("Both series should have the same symbol.")
return FormalPowerSeriesProduct(self, other)
def coeff_bell(self, n):
r"""
self.coeff_bell(n) returns a sequence of Bell polynomials of the second kind.
Note that ``n`` should be a integer.
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math::
B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* ``bell(n, k, (x1, x2, ...))`` gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
See Also
========
sympy.functions.combinatorial.numbers.bell
"""
inner_coeffs = [bell(n, j, tuple(self.bell_coeff_seq[:n-j+1])) for j in range(1, n+1)]
k = Dummy('k')
return sequence(tuple(inner_coeffs), (k, 1, oo))
def compose(self, other, x=None, n=6):
r"""
Returns the truncated terms of the formal power series of the composed function,
up to specified ``n``.
Explanation
===========
If ``f`` and ``g`` are two formal power series of two different functions,
then the coefficient sequence ``ak`` of the composed formal power series `fp`
will be as follows.
.. math::
\sum\limits_{k=0}^{n} b_k B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(sin(x))
>>> f1.compose(f2, x).truncate()
1 + x + x**2/2 - x**4/8 - x**5/15 + O(x**6)
>>> f1.compose(f2, x).truncate(8)
1 + x + x**2/2 - x**4/8 - x**5/15 - x**6/240 + x**7/90 + O(x**8)
See Also
========
sympy.functions.combinatorial.numbers.bell
sympy.series.formal.FormalPowerSeriesCompose
References
==========
.. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974.
"""
if n is None:
return iter(self)
other = sympify(other)
if not isinstance(other, FormalPowerSeries):
raise ValueError("Both series should be an instance of FormalPowerSeries"
" class.")
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
elif self.x != other.x:
raise ValueError("Both series should have the same symbol.")
if other._eval_term(0).as_coeff_mul(other.x)[0] is not S.Zero:
raise ValueError("The formal power series of the inner function should not have any "
"constant coefficient term.")
return FormalPowerSeriesCompose(self, other)
def inverse(self, x=None, n=6):
r"""
Returns the truncated terms of the inverse of the formal power series,
up to specified ``n``.
Explanation
===========
If ``f`` and ``g`` are two formal power series of two different functions,
then the coefficient sequence ``ak`` of the composed formal power series ``fp``
will be as follows.
.. math::
\sum\limits_{k=0}^{n} (-1)^{k} x_0^{-k-1} B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, exp, cos
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(cos(x))
>>> f1.inverse(x).truncate()
1 - x + x**2/2 - x**3/6 + x**4/24 - x**5/120 + O(x**6)
>>> f2.inverse(x).truncate(8)
1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + O(x**8)
See Also
========
sympy.functions.combinatorial.numbers.bell
sympy.series.formal.FormalPowerSeriesInverse
References
==========
.. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974.
"""
if n is None:
return iter(self)
if self._eval_term(0).is_zero:
raise ValueError("Constant coefficient should exist for an inverse of a formal"
" power series to exist.")
return FormalPowerSeriesInverse(self)
def __add__(self, other):
other = sympify(other)
if isinstance(other, FormalPowerSeries):
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
x, y = self.x, other.x
f = self.function + other.function.subs(y, x)
if self.x not in f.free_symbols:
return f
ak = self.ak + other.ak
if self.ak.start > other.ak.start:
seq = other.ak
s, e = other.ak.start, self.ak.start
else:
seq = self.ak
s, e = self.ak.start, other.ak.start
save = Add(*[z[0]*z[1] for z in zip(seq[0:(e - s)], self.xk[s:e])])
ind = self.ind + other.ind + save
return self.func(f, x, self.x0, self.dir, (ak, self.xk, ind))
elif not other.has(self.x):
f = self.function + other
ind = self.ind + other
return self.func(f, self.x, self.x0, self.dir,
(self.ak, self.xk, ind))
return Add(self, other)
def __radd__(self, other):
return self.__add__(other)
def __neg__(self):
return self.func(-self.function, self.x, self.x0, self.dir,
(-self.ak, self.xk, -self.ind))
def __sub__(self, other):
return self.__add__(-other)
def __rsub__(self, other):
return (-self).__add__(other)
def __mul__(self, other):
other = sympify(other)
if other.has(self.x):
return Mul(self, other)
f = self.function * other
ak = self.ak.coeff_mul(other)
ind = self.ind * other
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def __rmul__(self, other):
return self.__mul__(other)
class FiniteFormalPowerSeries(FormalPowerSeries):
"""Base Class for Product, Compose and Inverse classes"""
def __init__(self, *args):
pass
@property
def ffps(self):
return self.args[0]
@property
def gfps(self):
return self.args[1]
@property
def f(self):
return self.ffps.function
@property
def g(self):
return self.gfps.function
@property
def infinite(self):
raise NotImplementedError("No infinite version for an object of"
" FiniteFormalPowerSeries class.")
def _eval_terms(self, n):
raise NotImplementedError("(%s)._eval_terms()" % self)
def _eval_term(self, pt):
raise NotImplementedError("By the current logic, one can get terms"
"upto a certain order, instead of getting term by term.")
def polynomial(self, n):
return self._eval_terms(n)
def truncate(self, n=6):
ffps = self.ffps
pt_xk = ffps.xk.coeff(n)
x, x0 = ffps.x, ffps.x0
return self.polynomial(n) + Order(pt_xk, (x, x0))
def _eval_derivative(self, x):
raise NotImplementedError
def integrate(self, x):
raise NotImplementedError
class FormalPowerSeriesProduct(FiniteFormalPowerSeries):
"""Represents the product of two formal power series of two functions.
Explanation
===========
No computation is performed. Terms are calculated using a term by term logic,
instead of a point by point logic.
There are two differences between a :obj:`FormalPowerSeries` object and a
:obj:`FormalPowerSeriesProduct` object. The first argument contains the two
functions involved in the product. Also, the coefficient sequence contains
both the coefficient sequence of the formal power series of the involved functions.
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.FiniteFormalPowerSeries
"""
def __init__(self, *args):
ffps, gfps = self.ffps, self.gfps
k = ffps.ak.variables[0]
self.coeff1 = sequence(ffps.ak.formula, (k, 0, oo))
k = gfps.ak.variables[0]
self.coeff2 = sequence(gfps.ak.formula, (k, 0, oo))
@property
def function(self):
"""Function of the product of two formal power series."""
return self.f * self.g
def _eval_terms(self, n):
"""
Returns the first ``n`` terms of the product formal power series.
Term by term logic is implemented here.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(sin(x))
>>> f2 = fps(exp(x))
>>> fprod = f1.product(f2, x)
>>> fprod._eval_terms(4)
x**3/3 + x**2 + x
See Also
========
sympy.series.formal.FormalPowerSeries.product
"""
coeff1, coeff2 = self.coeff1, self.coeff2
aks = convolution(coeff1[:n], coeff2[:n])
terms = []
for i in range(0, n):
terms.append(aks[i] * self.ffps.xk.coeff(i))
return Add(*terms)
class FormalPowerSeriesCompose(FiniteFormalPowerSeries):
"""
Represents the composed formal power series of two functions.
Explanation
===========
No computation is performed. Terms are calculated using a term by term logic,
instead of a point by point logic.
There are two differences between a :obj:`FormalPowerSeries` object and a
:obj:`FormalPowerSeriesCompose` object. The first argument contains the outer
function and the inner function involved in the omposition. Also, the
coefficient sequence contains the generic sequence which is to be multiplied
by a custom ``bell_seq`` finite sequence. The finite terms will then be added up to
get the final terms.
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.FiniteFormalPowerSeries
"""
@property
def function(self):
"""Function for the composed formal power series."""
f, g, x = self.f, self.g, self.ffps.x
return f.subs(x, g)
def _eval_terms(self, n):
"""
Returns the first `n` terms of the composed formal power series.
Term by term logic is implemented here.
Explanation
===========
The coefficient sequence of the :obj:`FormalPowerSeriesCompose` object is the generic sequence.
It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get
the final terms for the polynomial.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(sin(x))
>>> fcomp = f1.compose(f2, x)
>>> fcomp._eval_terms(6)
-x**5/15 - x**4/8 + x**2/2 + x + 1
>>> fcomp._eval_terms(8)
x**7/90 - x**6/240 - x**5/15 - x**4/8 + x**2/2 + x + 1
See Also
========
sympy.series.formal.FormalPowerSeries.compose
sympy.series.formal.FormalPowerSeries.coeff_bell
"""
ffps, gfps = self.ffps, self.gfps
terms = [ffps.zero_coeff()]
for i in range(1, n):
bell_seq = gfps.coeff_bell(i)
seq = (ffps.bell_coeff_seq * bell_seq)
terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i))
return Add(*terms)
class FormalPowerSeriesInverse(FiniteFormalPowerSeries):
"""
Represents the Inverse of a formal power series.
Explanation
===========
No computation is performed. Terms are calculated using a term by term logic,
instead of a point by point logic.
There is a single difference between a :obj:`FormalPowerSeries` object and a
:obj:`FormalPowerSeriesInverse` object. The coefficient sequence contains the
generic sequence which is to be multiplied by a custom ``bell_seq`` finite sequence.
The finite terms will then be added up to get the final terms.
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.FiniteFormalPowerSeries
"""
def __init__(self, *args):
ffps = self.ffps
k = ffps.xk.variables[0]
inv = ffps.zero_coeff()
inv_seq = sequence(inv ** (-(k + 1)), (k, 1, oo))
self.aux_seq = ffps.sign_seq * ffps.fact_seq * inv_seq
@property
def function(self):
"""Function for the inverse of a formal power series."""
f = self.f
return 1 / f
@property
def g(self):
raise ValueError("Only one function is considered while performing"
"inverse of a formal power series.")
@property
def gfps(self):
raise ValueError("Only one function is considered while performing"
"inverse of a formal power series.")
def _eval_terms(self, n):
"""
Returns the first ``n`` terms of the composed formal power series.
Term by term logic is implemented here.
Explanation
===========
The coefficient sequence of the `FormalPowerSeriesInverse` object is the generic sequence.
It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get
the final terms for the polynomial.
Examples
========
>>> from sympy import fps, exp, cos
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(cos(x))
>>> finv1, finv2 = f1.inverse(), f2.inverse()
>>> finv1._eval_terms(6)
-x**5/120 + x**4/24 - x**3/6 + x**2/2 - x + 1
>>> finv2._eval_terms(8)
61*x**6/720 + 5*x**4/24 + x**2/2 + 1
See Also
========
sympy.series.formal.FormalPowerSeries.inverse
sympy.series.formal.FormalPowerSeries.coeff_bell
"""
ffps = self.ffps
terms = [ffps.zero_coeff()]
for i in range(1, n):
bell_seq = ffps.coeff_bell(i)
seq = (self.aux_seq * bell_seq)
terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i))
return Add(*terms)
def fps(f, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, full=False):
"""
Generates Formal Power Series of ``f``.
Explanation
===========
Returns the formal series expansion of ``f`` around ``x = x0``
with respect to ``x`` in the form of a ``FormalPowerSeries`` object.
Formal Power Series is represented using an explicit formula
computed using different algorithms.
See :func:`compute_fps` for the more details regarding the computation
of formula.
Parameters
==========
x : Symbol, optional
If x is None and ``f`` is univariate, the univariate symbols will be
supplied, otherwise an error will be raised.
x0 : number, optional
Point to perform series expansion about. Default is 0.
dir : {1, -1, '+', '-'}, optional
If dir is 1 or '+' the series is calculated from the right and
for -1 or '-' the series is calculated from the left. For smooth
functions this flag will not alter the results. Default is 1.
hyper : {True, False}, optional
Set hyper to False to skip the hypergeometric algorithm.
By default it is set to False.
order : int, optional
Order of the derivative of ``f``, Default is 4.
rational : {True, False}, optional
Set rational to False to skip rational algorithm. By default it is set
to True.
full : {True, False}, optional
Set full to True to increase the range of rational algorithm.
See :func:`rational_algorithm` for details. By default it is set to
False.
Examples
========
>>> from sympy import fps, ln, atan, sin
>>> from sympy.abc import x, n
Rational Functions
>>> fps(ln(1 + x)).truncate()
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
>>> fps(atan(x), full=True).truncate()
x - x**3/3 + x**5/5 + O(x**6)
Symbolic Functions
>>> fps(x**n*sin(x**2), x).truncate(8)
-x**(n + 6)/6 + x**(n + 2) + O(x**(n + 8))
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.compute_fps
"""
f = sympify(f)
if x is None:
free = f.free_symbols
if len(free) == 1:
x = free.pop()
elif not free:
return f
else:
raise NotImplementedError("multivariate formal power series")
result = compute_fps(f, x, x0, dir, hyper, order, rational, full)
if result is None:
return f
return FormalPowerSeries(f, x, x0, dir, result)
|
8951c367a56c3fa0ca3f29034a7640a34a28273beebf692d05e1bb4d04bbd9fb | from sympy.core import S, sympify, Expr, Dummy, Add, Mul
from sympy.core.cache import cacheit
from sympy.core.containers import Tuple
from sympy.core.function import Function, PoleError, expand_power_base, expand_log
from sympy.core.sorting import default_sort_key
from sympy.sets.sets import Complement
from sympy.utilities.iterables import uniq, is_sequence
class Order(Expr):
r""" Represents the limiting behavior of some function.
Explanation
===========
The order of a function characterizes the function based on the limiting
behavior of the function as it goes to some limit. Only taking the limit
point to be a number is currently supported. This is expressed in
big O notation [1]_.
The formal definition for the order of a function `g(x)` about a point `a`
is such that `g(x) = O(f(x))` as `x \rightarrow a` if and only if for any
`\delta > 0` there exists a `M > 0` such that `|g(x)| \leq M|f(x)|` for
`|x-a| < \delta`. This is equivalent to `\lim_{x \rightarrow a}
\sup |g(x)/f(x)| < \infty`.
Let's illustrate it on the following example by taking the expansion of
`\sin(x)` about 0:
.. math ::
\sin(x) = x - x^3/3! + O(x^5)
where in this case `O(x^5) = x^5/5! - x^7/7! + \cdots`. By the definition
of `O`, for any `\delta > 0` there is an `M` such that:
.. math ::
|x^5/5! - x^7/7! + ....| <= M|x^5| \text{ for } |x| < \delta
or by the alternate definition:
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| < \infty
which surely is true, because
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| = 1/5!
As it is usually used, the order of a function can be intuitively thought
of representing all terms of powers greater than the one specified. For
example, `O(x^3)` corresponds to any terms proportional to `x^3,
x^4,\ldots` and any higher power. For a polynomial, this leaves terms
proportional to `x^2`, `x` and constants.
Examples
========
>>> from sympy import O, oo, cos, pi
>>> from sympy.abc import x, y
>>> O(x + x**2)
O(x)
>>> O(x + x**2, (x, 0))
O(x)
>>> O(x + x**2, (x, oo))
O(x**2, (x, oo))
>>> O(1 + x*y)
O(1, x, y)
>>> O(1 + x*y, (x, 0), (y, 0))
O(1, x, y)
>>> O(1 + x*y, (x, oo), (y, oo))
O(x*y, (x, oo), (y, oo))
>>> O(1) in O(1, x)
True
>>> O(1, x) in O(1)
False
>>> O(x) in O(1, x)
True
>>> O(x**2) in O(x)
True
>>> O(x)*x
O(x**2)
>>> O(x) - O(x)
O(x)
>>> O(cos(x))
O(1)
>>> O(cos(x), (x, pi/2))
O(x - pi/2, (x, pi/2))
References
==========
.. [1] `Big O notation <https://en.wikipedia.org/wiki/Big_O_notation>`_
Notes
=====
In ``O(f(x), x)`` the expression ``f(x)`` is assumed to have a leading
term. ``O(f(x), x)`` is automatically transformed to
``O(f(x).as_leading_term(x),x)``.
``O(expr*f(x), x)`` is ``O(f(x), x)``
``O(expr, x)`` is ``O(1)``
``O(0, x)`` is 0.
Multivariate O is also supported:
``O(f(x, y), x, y)`` is transformed to
``O(f(x, y).as_leading_term(x,y).as_leading_term(y), x, y)``
In the multivariate case, it is assumed the limits w.r.t. the various
symbols commute.
If no symbols are passed then all symbols in the expression are used
and the limit point is assumed to be zero.
"""
is_Order = True
__slots__ = ()
@cacheit
def __new__(cls, expr, *args, **kwargs):
expr = sympify(expr)
if not args:
if expr.is_Order:
variables = expr.variables
point = expr.point
else:
variables = list(expr.free_symbols)
point = [S.Zero]*len(variables)
else:
args = list(args if is_sequence(args) else [args])
variables, point = [], []
if is_sequence(args[0]):
for a in args:
v, p = list(map(sympify, a))
variables.append(v)
point.append(p)
else:
variables = list(map(sympify, args))
point = [S.Zero]*len(variables)
if not all(v.is_symbol for v in variables):
raise TypeError('Variables are not symbols, got %s' % variables)
if len(list(uniq(variables))) != len(variables):
raise ValueError('Variables are supposed to be unique symbols, got %s' % variables)
if expr.is_Order:
expr_vp = dict(expr.args[1:])
new_vp = dict(expr_vp)
vp = dict(zip(variables, point))
for v, p in vp.items():
if v in new_vp.keys():
if p != new_vp[v]:
raise NotImplementedError(
"Mixing Order at different points is not supported.")
else:
new_vp[v] = p
if set(expr_vp.keys()) == set(new_vp.keys()):
return expr
else:
variables = list(new_vp.keys())
point = [new_vp[v] for v in variables]
if expr is S.NaN:
return S.NaN
if any(x in p.free_symbols for x in variables for p in point):
raise ValueError('Got %s as a point.' % point)
if variables:
if any(p != point[0] for p in point):
raise NotImplementedError(
"Multivariable orders at different points are not supported.")
if point[0] is S.Infinity:
s = {k: 1/Dummy() for k in variables}
rs = {1/v: 1/k for k, v in s.items()}
ps = [S.Zero for p in point]
elif point[0] is S.NegativeInfinity:
s = {k: -1/Dummy() for k in variables}
rs = {-1/v: -1/k for k, v in s.items()}
ps = [S.Zero for p in point]
elif point[0] is not S.Zero:
s = {k: Dummy() + point[0] for k in variables}
rs = {(v - point[0]).together(): k - point[0] for k, v in s.items()}
ps = [S.Zero for p in point]
else:
s = ()
rs = ()
ps = list(point)
expr = expr.subs(s)
if expr.is_Add:
expr = expr.factor()
if s:
args = tuple([r[0] for r in rs.items()])
else:
args = tuple(variables)
if len(variables) > 1:
# XXX: better way? We need this expand() to
# workaround e.g: expr = x*(x + y).
# (x*(x + y)).as_leading_term(x, y) currently returns
# x*y (wrong order term!). That's why we want to deal with
# expand()'ed expr (handled in "if expr.is_Add" branch below).
expr = expr.expand()
old_expr = None
while old_expr != expr:
old_expr = expr
if expr.is_Add:
lst = expr.extract_leading_order(args)
expr = Add(*[f.expr for (e, f) in lst])
elif expr:
try:
expr = expr.as_leading_term(*args)
except PoleError:
if isinstance(expr, Function) or\
all(isinstance(arg, Function) for arg in expr.args):
# It is not possible to simplify an expression
# containing only functions (which raise error on
# call to leading term) further
pass
else:
orders = []
pts = tuple(zip(args, ps))
for arg in expr.args:
try:
lt = arg.as_leading_term(*args)
except PoleError:
lt = arg
if lt not in args:
order = Order(lt)
else:
order = Order(lt, *pts)
orders.append(order)
if expr.is_Add:
new_expr = Order(Add(*orders), *pts)
if new_expr.is_Add:
new_expr = Order(Add(*[a.expr for a in new_expr.args]), *pts)
expr = new_expr.expr
elif expr.is_Mul:
expr = Mul(*[a.expr for a in orders])
elif expr.is_Pow:
expr = orders[0].expr**orders[1].expr
expr = expr.as_independent(*args, as_Add=False)[1]
expr = expand_power_base(expr)
expr = expand_log(expr)
if len(args) == 1:
# The definition of O(f(x)) symbol explicitly stated that
# the argument of f(x) is irrelevant. That's why we can
# combine some power exponents (only "on top" of the
# expression tree for f(x)), e.g.:
# x**p * (-x)**q -> x**(p+q) for real p, q.
x = args[0]
margs = list(Mul.make_args(
expr.as_independent(x, as_Add=False)[1]))
for i, t in enumerate(margs):
if t.is_Pow:
b, q = t.args
if b in (x, -x) and q.is_real and not q.has(x):
margs[i] = x**q
elif b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
elif b.is_Mul and b.args[0] is S.NegativeOne:
b = -b
if b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
expr = Mul(*margs)
expr = expr.subs(rs)
if expr.is_Order:
expr = expr.expr
if not expr.has(*variables) and not expr.is_zero:
expr = S.One
# create Order instance:
vp = dict(zip(variables, point))
variables.sort(key=default_sort_key)
point = [vp[v] for v in variables]
args = (expr,) + Tuple(*zip(variables, point))
obj = Expr.__new__(cls, *args)
return obj
def _eval_nseries(self, x, n, logx, cdir=0):
return self
@property
def expr(self):
return self.args[0]
@property
def variables(self):
if self.args[1:]:
return tuple(x[0] for x in self.args[1:])
else:
return ()
@property
def point(self):
if self.args[1:]:
return tuple(x[1] for x in self.args[1:])
else:
return ()
@property
def free_symbols(self):
return self.expr.free_symbols | set(self.variables)
def _eval_power(b, e):
if e.is_Number and e.is_nonnegative:
return b.func(b.expr ** e, *b.args[1:])
if e == O(1):
return b
return
def as_expr_variables(self, order_symbols):
if order_symbols is None:
order_symbols = self.args[1:]
else:
if (not all(o[1] == order_symbols[0][1] for o in order_symbols) and
not all(p == self.point[0] for p in self.point)): # pragma: no cover
raise NotImplementedError('Order at points other than 0 '
'or oo not supported, got %s as a point.' % self.point)
if order_symbols and order_symbols[0][1] != self.point[0]:
raise NotImplementedError(
"Multiplying Order at different points is not supported.")
order_symbols = dict(order_symbols)
for s, p in dict(self.args[1:]).items():
if s not in order_symbols.keys():
order_symbols[s] = p
order_symbols = sorted(order_symbols.items(), key=lambda x: default_sort_key(x[0]))
return self.expr, tuple(order_symbols)
def removeO(self):
return S.Zero
def getO(self):
return self
@cacheit
def contains(self, expr):
r"""
Return True if expr belongs to Order(self.expr, \*self.variables).
Return False if self belongs to expr.
Return None if the inclusion relation cannot be determined
(e.g. when self and expr have different symbols).
"""
from sympy.simplify.powsimp import powsimp
expr = sympify(expr)
if expr.is_zero:
return True
if expr is S.NaN:
return False
point = self.point[0] if self.point else S.Zero
if expr.is_Order:
if (any(p != point for p in expr.point) or
any(p != point for p in self.point)):
return None
if expr.expr == self.expr:
# O(1) + O(1), O(1) + O(1, x), etc.
return all(x in self.args[1:] for x in expr.args[1:])
if expr.expr.is_Add:
return all(self.contains(x) for x in expr.expr.args)
if self.expr.is_Add and point.is_zero:
return any(self.func(x, *self.args[1:]).contains(expr)
for x in self.expr.args)
if self.variables and expr.variables:
common_symbols = tuple(
[s for s in self.variables if s in expr.variables])
elif self.variables:
common_symbols = self.variables
else:
common_symbols = expr.variables
if not common_symbols:
return None
if (self.expr.is_Pow and len(self.variables) == 1
and self.variables == expr.variables):
symbol = self.variables[0]
other = expr.expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
r = None
ratio = self.expr/expr.expr
ratio = powsimp(ratio, deep=True, combine='exp')
for s in common_symbols:
from sympy.series.limits import Limit
l = Limit(ratio, s, point).doit(heuristics=False)
if not isinstance(l, Limit):
l = l != 0
else:
l = None
if r is None:
r = l
else:
if r != l:
return
return r
if self.expr.is_Pow and len(self.variables) == 1:
symbol = self.variables[0]
other = expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
obj = self.func(expr, *self.args[1:])
return self.contains(obj)
def __contains__(self, other):
result = self.contains(other)
if result is None:
raise TypeError('contains did not evaluate to a bool')
return result
def _eval_subs(self, old, new):
if old in self.variables:
newexpr = self.expr.subs(old, new)
i = self.variables.index(old)
newvars = list(self.variables)
newpt = list(self.point)
if new.is_symbol:
newvars[i] = new
else:
syms = new.free_symbols
if len(syms) == 1 or old in syms:
if old in syms:
var = self.variables[i]
else:
var = syms.pop()
# First, try to substitute self.point in the "new"
# expr to see if this is a fixed point.
# E.g. O(y).subs(y, sin(x))
point = new.subs(var, self.point[i])
if point != self.point[i]:
from sympy.solvers.solveset import solveset
d = Dummy()
sol = solveset(old - new.subs(var, d), d)
if isinstance(sol, Complement):
e1 = sol.args[0]
e2 = sol.args[1]
sol = set(e1) - set(e2)
res = [dict(zip((d, ), sol))]
point = d.subs(res[0]).limit(old, self.point[i])
newvars[i] = var
newpt[i] = point
elif old not in syms:
del newvars[i], newpt[i]
if not syms and new == self.point[i]:
newvars.extend(syms)
newpt.extend([S.Zero]*len(syms))
else:
return
return Order(newexpr, *zip(newvars, newpt))
def _eval_conjugate(self):
expr = self.expr._eval_conjugate()
if expr is not None:
return self.func(expr, *self.args[1:])
def _eval_derivative(self, x):
return self.func(self.expr.diff(x), *self.args[1:]) or self
def _eval_transpose(self):
expr = self.expr._eval_transpose()
if expr is not None:
return self.func(expr, *self.args[1:])
def __neg__(self):
return self
O = Order
|
0b4fe4adf990484ef8664e422f03a356bf541d9a08ee8fa87c49c8b08c091588 | """
Expand Hypergeometric (and Meijer G) functions into named
special functions.
The algorithm for doing this uses a collection of lookup tables of
hypergeometric functions, and various of their properties, to expand
many hypergeometric functions in terms of special functions.
It is based on the following paper:
Kelly B. Roach. Meijer G Function Representations.
In: Proceedings of the 1997 International Symposium on Symbolic and
Algebraic Computation, pages 205-211, New York, 1997. ACM.
It is described in great(er) detail in the Sphinx documentation.
"""
# SUMMARY OF EXTENSIONS FOR MEIJER G FUNCTIONS
#
# o z**rho G(ap, bq; z) = G(ap + rho, bq + rho; z)
#
# o denote z*d/dz by D
#
# o It is helpful to keep in mind that ap and bq play essentially symmetric
# roles: G(1/z) has slightly altered parameters, with ap and bq interchanged.
#
# o There are four shift operators:
# A_J = b_J - D, J = 1, ..., n
# B_J = 1 - a_j + D, J = 1, ..., m
# C_J = -b_J + D, J = m+1, ..., q
# D_J = a_J - 1 - D, J = n+1, ..., p
#
# A_J, C_J increment b_J
# B_J, D_J decrement a_J
#
# o The corresponding four inverse-shift operators are defined if there
# is no cancellation. Thus e.g. an index a_J (upper or lower) can be
# incremented if a_J != b_i for i = 1, ..., q.
#
# o Order reduction: if b_j - a_i is a non-negative integer, where
# j <= m and i > n, the corresponding quotient of gamma functions reduces
# to a polynomial. Hence the G function can be expressed using a G-function
# of lower order.
# Similarly if j > m and i <= n.
#
# Secondly, there are paired index theorems [Adamchik, The evaluation of
# integrals of Bessel functions via G-function identities]. Suppose there
# are three parameters a, b, c, where a is an a_i, i <= n, b is a b_j,
# j <= m and c is a denominator parameter (i.e. a_i, i > n or b_j, j > m).
# Suppose further all three differ by integers.
# Then the order can be reduced.
# TODO work this out in detail.
#
# o An index quadruple is called suitable if its order cannot be reduced.
# If there exists a sequence of shift operators transforming one index
# quadruple into another, we say one is reachable from the other.
#
# o Deciding if one index quadruple is reachable from another is tricky. For
# this reason, we use hand-built routines to match and instantiate formulas.
#
from collections import defaultdict
from itertools import product
from functools import reduce
from sympy import SYMPY_DEBUG
from sympy.core import (S, Dummy, symbols, sympify, Tuple, expand, I, pi, Mul,
EulerGamma, oo, zoo, expand_func, Add, nan, Expr, Rational)
from sympy.core.mod import Mod
from sympy.core.sorting import default_sort_key
from sympy.functions import (exp, sqrt, root, log, lowergamma, cos,
besseli, gamma, uppergamma, expint, erf, sin, besselj, Ei, Ci, Si, Shi,
sinh, cosh, Chi, fresnels, fresnelc, polar_lift, exp_polar, floor, ceiling,
rf, factorial, lerchphi, Piecewise, re, elliptic_k, elliptic_e)
from sympy.functions.elementary.complexes import polarify, unpolarify
from sympy.functions.special.hyper import (hyper, HyperRep_atanh,
HyperRep_power1, HyperRep_power2, HyperRep_log1, HyperRep_asin1,
HyperRep_asin2, HyperRep_sqrts1, HyperRep_sqrts2, HyperRep_log2,
HyperRep_cosasin, HyperRep_sinasin, meijerg)
from sympy.polys import poly, Poly
from sympy.simplify.powsimp import powdenest
from sympy.utilities.iterables import sift
# function to define "buckets"
def _mod1(x):
# TODO see if this can work as Mod(x, 1); this will require
# different handling of the "buckets" since these need to
# be sorted and that fails when there is a mixture of
# integers and expressions with parameters. With the current
# Mod behavior, Mod(k, 1) == Mod(1, 1) == 0 if k is an integer.
# Although the sorting can be done with Basic.compare, this may
# still require different handling of the sorted buckets.
if x.is_Number:
return Mod(x, 1)
c, x = x.as_coeff_Add()
return Mod(c, 1) + x
# leave add formulae at the top for easy reference
def add_formulae(formulae):
""" Create our knowledge base. """
from sympy.matrices import Matrix
a, b, c, z = symbols('a b c, z', cls=Dummy)
def add(ap, bq, res):
func = Hyper_Function(ap, bq)
formulae.append(Formula(func, z, res, (a, b, c)))
def addb(ap, bq, B, C, M):
func = Hyper_Function(ap, bq)
formulae.append(Formula(func, z, None, (a, b, c), B, C, M))
# Luke, Y. L. (1969), The Special Functions and Their Approximations,
# Volume 1, section 6.2
# 0F0
add((), (), exp(z))
# 1F0
add((a, ), (), HyperRep_power1(-a, z))
# 2F1
addb((a, a - S.Half), (2*a, ),
Matrix([HyperRep_power2(a, z),
HyperRep_power2(a + S.Half, z)/2]),
Matrix([[1, 0]]),
Matrix([[(a - S.Half)*z/(1 - z), (S.Half - a)*z/(1 - z)],
[a/(1 - z), a*(z - 2)/(1 - z)]]))
addb((1, 1), (2, ),
Matrix([HyperRep_log1(z), 1]), Matrix([[-1/z, 0]]),
Matrix([[0, z/(z - 1)], [0, 0]]))
addb((S.Half, 1), (S('3/2'), ),
Matrix([HyperRep_atanh(z), 1]),
Matrix([[1, 0]]),
Matrix([[Rational(-1, 2), 1/(1 - z)/2], [0, 0]]))
addb((S.Half, S.Half), (S('3/2'), ),
Matrix([HyperRep_asin1(z), HyperRep_power1(Rational(-1, 2), z)]),
Matrix([[1, 0]]),
Matrix([[Rational(-1, 2), S.Half], [0, z/(1 - z)/2]]))
addb((a, S.Half + a), (S.Half, ),
Matrix([HyperRep_sqrts1(-a, z), -HyperRep_sqrts2(-a - S.Half, z)]),
Matrix([[1, 0]]),
Matrix([[0, -a],
[z*(-2*a - 1)/2/(1 - z), S.Half - z*(-2*a - 1)/(1 - z)]]))
# A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
# Integrals and Series: More Special Functions, Vol. 3,.
# Gordon and Breach Science Publisher
addb([a, -a], [S.Half],
Matrix([HyperRep_cosasin(a, z), HyperRep_sinasin(a, z)]),
Matrix([[1, 0]]),
Matrix([[0, -a], [a*z/(1 - z), 1/(1 - z)/2]]))
addb([1, 1], [3*S.Half],
Matrix([HyperRep_asin2(z), 1]), Matrix([[1, 0]]),
Matrix([[(z - S.Half)/(1 - z), 1/(1 - z)/2], [0, 0]]))
# Complete elliptic integrals K(z) and E(z), both a 2F1 function
addb([S.Half, S.Half], [S.One],
Matrix([elliptic_k(z), elliptic_e(z)]),
Matrix([[2/pi, 0]]),
Matrix([[Rational(-1, 2), -1/(2*z-2)],
[Rational(-1, 2), S.Half]]))
addb([Rational(-1, 2), S.Half], [S.One],
Matrix([elliptic_k(z), elliptic_e(z)]),
Matrix([[0, 2/pi]]),
Matrix([[Rational(-1, 2), -1/(2*z-2)],
[Rational(-1, 2), S.Half]]))
# 3F2
addb([Rational(-1, 2), 1, 1], [S.Half, 2],
Matrix([z*HyperRep_atanh(z), HyperRep_log1(z), 1]),
Matrix([[Rational(-2, 3), -S.One/(3*z), Rational(2, 3)]]),
Matrix([[S.Half, 0, z/(1 - z)/2],
[0, 0, z/(z - 1)],
[0, 0, 0]]))
# actually the formula for 3/2 is much nicer ...
addb([Rational(-1, 2), 1, 1], [2, 2],
Matrix([HyperRep_power1(S.Half, z), HyperRep_log2(z), 1]),
Matrix([[Rational(4, 9) - 16/(9*z), 4/(3*z), 16/(9*z)]]),
Matrix([[z/2/(z - 1), 0, 0], [1/(2*(z - 1)), 0, S.Half], [0, 0, 0]]))
# 1F1
addb([1], [b], Matrix([z**(1 - b) * exp(z) * lowergamma(b - 1, z), 1]),
Matrix([[b - 1, 0]]), Matrix([[1 - b + z, 1], [0, 0]]))
addb([a], [2*a],
Matrix([z**(S.Half - a)*exp(z/2)*besseli(a - S.Half, z/2)
* gamma(a + S.Half)/4**(S.Half - a),
z**(S.Half - a)*exp(z/2)*besseli(a + S.Half, z/2)
* gamma(a + S.Half)/4**(S.Half - a)]),
Matrix([[1, 0]]),
Matrix([[z/2, z/2], [z/2, (z/2 - 2*a)]]))
mz = polar_lift(-1)*z
addb([a], [a + 1],
Matrix([mz**(-a)*a*lowergamma(a, mz), a*exp(z)]),
Matrix([[1, 0]]),
Matrix([[-a, 1], [0, z]]))
# This one is redundant.
add([Rational(-1, 2)], [S.Half], exp(z) - sqrt(pi*z)*(-I)*erf(I*sqrt(z)))
# Added to get nice results for Laplace transform of Fresnel functions
# http://functions.wolfram.com/07.22.03.6437.01
# Basic rule
#add([1], [Rational(3, 4), Rational(5, 4)],
# sqrt(pi) * (cos(2*sqrt(polar_lift(-1)*z))*fresnelc(2*root(polar_lift(-1)*z,4)/sqrt(pi)) +
# sin(2*sqrt(polar_lift(-1)*z))*fresnels(2*root(polar_lift(-1)*z,4)/sqrt(pi)))
# / (2*root(polar_lift(-1)*z,4)))
# Manually tuned rule
addb([1], [Rational(3, 4), Rational(5, 4)],
Matrix([ sqrt(pi)*(I*sinh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))
+ cosh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)))
* exp(-I*pi/4)/(2*root(z, 4)),
sqrt(pi)*root(z, 4)*(sinh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))
+ I*cosh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)))
*exp(-I*pi/4)/2,
1 ]),
Matrix([[1, 0, 0]]),
Matrix([[Rational(-1, 4), 1, Rational(1, 4)],
[ z, Rational(1, 4), 0],
[ 0, 0, 0]]))
# 2F2
addb([S.Half, a], [Rational(3, 2), a + 1],
Matrix([a/(2*a - 1)*(-I)*sqrt(pi/z)*erf(I*sqrt(z)),
a/(2*a - 1)*(polar_lift(-1)*z)**(-a)*
lowergamma(a, polar_lift(-1)*z),
a/(2*a - 1)*exp(z)]),
Matrix([[1, -1, 0]]),
Matrix([[Rational(-1, 2), 0, 1], [0, -a, 1], [0, 0, z]]))
# We make a "basis" of four functions instead of three, and give EulerGamma
# an extra slot (it could just be a coefficient to 1). The advantage is
# that this way Polys will not see multivariate polynomials (it treats
# EulerGamma as an indeterminate), which is *way* faster.
addb([1, 1], [2, 2],
Matrix([Ei(z) - log(z), exp(z), 1, EulerGamma]),
Matrix([[1/z, 0, 0, -1/z]]),
Matrix([[0, 1, -1, 0], [0, z, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]))
# 0F1
add((), (S.Half, ), cosh(2*sqrt(z)))
addb([], [b],
Matrix([gamma(b)*z**((1 - b)/2)*besseli(b - 1, 2*sqrt(z)),
gamma(b)*z**(1 - b/2)*besseli(b, 2*sqrt(z))]),
Matrix([[1, 0]]), Matrix([[0, 1], [z, (1 - b)]]))
# 0F3
x = 4*z**Rational(1, 4)
def fp(a, z):
return besseli(a, x) + besselj(a, x)
def fm(a, z):
return besseli(a, x) - besselj(a, x)
# TODO branching
addb([], [S.Half, a, a + S.Half],
Matrix([fp(2*a - 1, z), fm(2*a, z)*z**Rational(1, 4),
fm(2*a - 1, z)*sqrt(z), fp(2*a, z)*z**Rational(3, 4)])
* 2**(-2*a)*gamma(2*a)*z**((1 - 2*a)/4),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, 1, 0, 0],
[0, S.Half - a, 1, 0],
[0, 0, S.Half, 1],
[z, 0, 0, 1 - a]]))
x = 2*(4*z)**Rational(1, 4)*exp_polar(I*pi/4)
addb([], [a, a + S.Half, 2*a],
(2*sqrt(polar_lift(-1)*z))**(1 - 2*a)*gamma(2*a)**2 *
Matrix([besselj(2*a - 1, x)*besseli(2*a - 1, x),
x*(besseli(2*a, x)*besselj(2*a - 1, x)
- besseli(2*a - 1, x)*besselj(2*a, x)),
x**2*besseli(2*a, x)*besselj(2*a, x),
x**3*(besseli(2*a, x)*besselj(2*a - 1, x)
+ besseli(2*a - 1, x)*besselj(2*a, x))]),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, Rational(1, 4), 0, 0],
[0, (1 - 2*a)/2, Rational(-1, 2), 0],
[0, 0, 1 - 2*a, Rational(1, 4)],
[-32*z, 0, 0, 1 - a]]))
# 1F2
addb([a], [a - S.Half, 2*a],
Matrix([z**(S.Half - a)*besseli(a - S.Half, sqrt(z))**2,
z**(1 - a)*besseli(a - S.Half, sqrt(z))
*besseli(a - Rational(3, 2), sqrt(z)),
z**(Rational(3, 2) - a)*besseli(a - Rational(3, 2), sqrt(z))**2]),
Matrix([[-gamma(a + S.Half)**2/4**(S.Half - a),
2*gamma(a - S.Half)*gamma(a + S.Half)/4**(1 - a),
0]]),
Matrix([[1 - 2*a, 1, 0], [z/2, S.Half - a, S.Half], [0, z, 0]]))
addb([S.Half], [b, 2 - b],
pi*(1 - b)/sin(pi*b)*
Matrix([besseli(1 - b, sqrt(z))*besseli(b - 1, sqrt(z)),
sqrt(z)*(besseli(-b, sqrt(z))*besseli(b - 1, sqrt(z))
+ besseli(1 - b, sqrt(z))*besseli(b, sqrt(z))),
besseli(-b, sqrt(z))*besseli(b, sqrt(z))]),
Matrix([[1, 0, 0]]),
Matrix([[b - 1, S.Half, 0],
[z, 0, z],
[0, S.Half, -b]]))
addb([S.Half], [Rational(3, 2), Rational(3, 2)],
Matrix([Shi(2*sqrt(z))/2/sqrt(z), sinh(2*sqrt(z))/2/sqrt(z),
cosh(2*sqrt(z))]),
Matrix([[1, 0, 0]]),
Matrix([[Rational(-1, 2), S.Half, 0], [0, Rational(-1, 2), S.Half], [0, 2*z, 0]]))
# FresnelS
# Basic rule
#add([Rational(3, 4)], [Rational(3, 2),Rational(7, 4)], 6*fresnels( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( pi * (exp(pi*I/4)*root(z,4)*2/sqrt(pi))**3 ) )
# Manually tuned rule
addb([Rational(3, 4)], [Rational(3, 2), Rational(7, 4)],
Matrix(
[ fresnels(
exp(
pi*I/4)*root(
z, 4)*2/sqrt(
pi) ) / (
pi * (exp(pi*I/4)*root(z, 4)*2/sqrt(pi))**3 ),
sinh(2*sqrt(z))/sqrt(z),
cosh(2*sqrt(z)) ]),
Matrix([[6, 0, 0]]),
Matrix([[Rational(-3, 4), Rational(1, 16), 0],
[ 0, Rational(-1, 2), 1],
[ 0, z, 0]]))
# FresnelC
# Basic rule
#add([Rational(1, 4)], [S.Half,Rational(5, 4)], fresnelc( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) )
# Manually tuned rule
addb([Rational(1, 4)], [S.Half, Rational(5, 4)],
Matrix(
[ sqrt(
pi)*exp(
-I*pi/4)*fresnelc(
2*root(z, 4)*exp(I*pi/4)/sqrt(pi))/(2*root(z, 4)),
cosh(2*sqrt(z)),
sinh(2*sqrt(z))*sqrt(z) ]),
Matrix([[1, 0, 0]]),
Matrix([[Rational(-1, 4), Rational(1, 4), 0 ],
[ 0, 0, 1 ],
[ 0, z, S.Half]]))
# 2F3
# XXX with this five-parameter formula is pretty slow with the current
# Formula.find_instantiations (creates 2!*3!*3**(2+3) ~ 3000
# instantiations ... But it's not too bad.
addb([a, a + S.Half], [2*a, b, 2*a - b + 1],
gamma(b)*gamma(2*a - b + 1) * (sqrt(z)/2)**(1 - 2*a) *
Matrix([besseli(b - 1, sqrt(z))*besseli(2*a - b, sqrt(z)),
sqrt(z)*besseli(b, sqrt(z))*besseli(2*a - b, sqrt(z)),
sqrt(z)*besseli(b - 1, sqrt(z))*besseli(2*a - b + 1, sqrt(z)),
besseli(b, sqrt(z))*besseli(2*a - b + 1, sqrt(z))]),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, S.Half, S.Half, 0],
[z/2, 1 - b, 0, z/2],
[z/2, 0, b - 2*a, z/2],
[0, S.Half, S.Half, -2*a]]))
# (C/f above comment about eulergamma in the basis).
addb([1, 1], [2, 2, Rational(3, 2)],
Matrix([Chi(2*sqrt(z)) - log(2*sqrt(z)),
cosh(2*sqrt(z)), sqrt(z)*sinh(2*sqrt(z)), 1, EulerGamma]),
Matrix([[1/z, 0, 0, 0, -1/z]]),
Matrix([[0, S.Half, 0, Rational(-1, 2), 0],
[0, 0, 1, 0, 0],
[0, z, S.Half, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
# 3F3
# This is rule: http://functions.wolfram.com/07.31.03.0134.01
# Initial reason to add it was a nice solution for
# integrate(erf(a*z)/z**2, z) and same for erfc and erfi.
# Basic rule
# add([1, 1, a], [2, 2, a+1], (a/(z*(a-1)**2)) *
# (1 - (-z)**(1-a) * (gamma(a) - uppergamma(a,-z))
# - (a-1) * (EulerGamma + uppergamma(0,-z) + log(-z))
# - exp(z)))
# Manually tuned rule
addb([1, 1, a], [2, 2, a+1],
Matrix([a*(log(-z) + expint(1, -z) + EulerGamma)/(z*(a**2 - 2*a + 1)),
a*(-z)**(-a)*(gamma(a) - uppergamma(a, -z))/(a - 1)**2,
a*exp(z)/(a**2 - 2*a + 1),
a/(z*(a**2 - 2*a + 1))]),
Matrix([[1-a, 1, -1/z, 1]]),
Matrix([[-1,0,-1/z,1],
[0,-a,1,0],
[0,0,z,0],
[0,0,0,-1]]))
def add_meijerg_formulae(formulae):
from sympy.matrices import Matrix
a, b, c, z = list(map(Dummy, 'abcz'))
rho = Dummy('rho')
def add(an, ap, bm, bq, B, C, M, matcher):
formulae.append(MeijerFormula(an, ap, bm, bq, z, [a, b, c, rho],
B, C, M, matcher))
def detect_uppergamma(func):
x = func.an[0]
y, z = func.bm
swapped = False
if not _mod1((x - y).simplify()):
swapped = True
(y, z) = (z, y)
if _mod1((x - z).simplify()) or x - z > 0:
return None
l = [y, x]
if swapped:
l = [x, y]
return {rho: y, a: x - y}, G_Function([x], [], l, [])
add([a + rho], [], [rho, a + rho], [],
Matrix([gamma(1 - a)*z**rho*exp(z)*uppergamma(a, z),
gamma(1 - a)*z**(a + rho)]),
Matrix([[1, 0]]),
Matrix([[rho + z, -1], [0, a + rho]]),
detect_uppergamma)
def detect_3113(func):
"""http://functions.wolfram.com/07.34.03.0984.01"""
x = func.an[0]
u, v, w = func.bm
if _mod1((u - v).simplify()) == 0:
if _mod1((v - w).simplify()) == 0:
return
sig = (S.Half, S.Half, S.Zero)
x1, x2, y = u, v, w
else:
if _mod1((x - u).simplify()) == 0:
sig = (S.Half, S.Zero, S.Half)
x1, y, x2 = u, v, w
else:
sig = (S.Zero, S.Half, S.Half)
y, x1, x2 = u, v, w
if (_mod1((x - x1).simplify()) != 0 or
_mod1((x - x2).simplify()) != 0 or
_mod1((x - y).simplify()) != S.Half or
x - x1 > 0 or x - x2 > 0):
return
return {a: x}, G_Function([x], [], [x - S.Half + t for t in sig], [])
s = sin(2*sqrt(z))
c_ = cos(2*sqrt(z))
S_ = Si(2*sqrt(z)) - pi/2
C = Ci(2*sqrt(z))
add([a], [], [a, a, a - S.Half], [],
Matrix([sqrt(pi)*z**(a - S.Half)*(c_*S_ - s*C),
sqrt(pi)*z**a*(s*S_ + c_*C),
sqrt(pi)*z**a]),
Matrix([[-2, 0, 0]]),
Matrix([[a - S.Half, -1, 0], [z, a, S.Half], [0, 0, a]]),
detect_3113)
def make_simp(z):
""" Create a function that simplifies rational functions in ``z``. """
def simp(expr):
""" Efficiently simplify the rational function ``expr``. """
numer, denom = expr.as_numer_denom()
numer = numer.expand()
# denom = denom.expand() # is this needed?
c, numer, denom = poly(numer, z).cancel(poly(denom, z))
return c * numer.as_expr() / denom.as_expr()
return simp
def debug(*args):
if SYMPY_DEBUG:
for a in args:
print(a, end="")
print()
class Hyper_Function(Expr):
""" A generalized hypergeometric function. """
def __new__(cls, ap, bq):
obj = super().__new__(cls)
obj.ap = Tuple(*list(map(expand, ap)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.ap, self.bq)
@property
def sizes(self):
return (len(self.ap), len(self.bq))
@property
def gamma(self):
"""
Number of upper parameters that are negative integers
This is a transformation invariant.
"""
return sum(bool(x.is_integer and x.is_negative) for x in self.ap)
def _hashable_content(self):
return super()._hashable_content() + (self.ap,
self.bq)
def __call__(self, arg):
return hyper(self.ap, self.bq, arg)
def build_invariants(self):
"""
Compute the invariant vector.
Explanation
===========
The invariant vector is:
(gamma, ((s1, n1), ..., (sk, nk)), ((t1, m1), ..., (tr, mr)))
where gamma is the number of integer a < 0,
s1 < ... < sk
nl is the number of parameters a_i congruent to sl mod 1
t1 < ... < tr
ml is the number of parameters b_i congruent to tl mod 1
If the index pair contains parameters, then this is not truly an
invariant, since the parameters cannot be sorted uniquely mod1.
Examples
========
>>> from sympy.simplify.hyperexpand import Hyper_Function
>>> from sympy import S
>>> ap = (S.Half, S.One/3, S(-1)/2, -2)
>>> bq = (1, 2)
Here gamma = 1,
k = 3, s1 = 0, s2 = 1/3, s3 = 1/2
n1 = 1, n2 = 1, n2 = 2
r = 1, t1 = 0
m1 = 2:
>>> Hyper_Function(ap, bq).build_invariants()
(1, ((0, 1), (1/3, 1), (1/2, 2)), ((0, 2),))
"""
abuckets, bbuckets = sift(self.ap, _mod1), sift(self.bq, _mod1)
def tr(bucket):
bucket = list(bucket.items())
if not any(isinstance(x[0], Mod) for x in bucket):
bucket.sort(key=lambda x: default_sort_key(x[0]))
bucket = tuple([(mod, len(values)) for mod, values in bucket if
values])
return bucket
return (self.gamma, tr(abuckets), tr(bbuckets))
def difficulty(self, func):
""" Estimate how many steps it takes to reach ``func`` from self.
Return -1 if impossible. """
if self.gamma != func.gamma:
return -1
oabuckets, obbuckets, abuckets, bbuckets = [sift(params, _mod1) for
params in (self.ap, self.bq, func.ap, func.bq)]
diff = 0
for bucket, obucket in [(abuckets, oabuckets), (bbuckets, obbuckets)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (not mod in bucket) or (not mod in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
return -1
l1 = list(bucket[mod])
l2 = list(obucket[mod])
l1.sort()
l2.sort()
for i, j in zip(l1, l2):
diff += abs(i - j)
return diff
def _is_suitable_origin(self):
"""
Decide if ``self`` is a suitable origin.
Explanation
===========
A function is a suitable origin iff:
* none of the ai equals bj + n, with n a non-negative integer
* none of the ai is zero
* none of the bj is a non-positive integer
Note that this gives meaningful results only when none of the indices
are symbolic.
"""
for a in self.ap:
for b in self.bq:
if (a - b).is_integer and (a - b).is_negative is False:
return False
for a in self.ap:
if a == 0:
return False
for b in self.bq:
if b.is_integer and b.is_nonpositive:
return False
return True
class G_Function(Expr):
""" A Meijer G-function. """
def __new__(cls, an, ap, bm, bq):
obj = super().__new__(cls)
obj.an = Tuple(*list(map(expand, an)))
obj.ap = Tuple(*list(map(expand, ap)))
obj.bm = Tuple(*list(map(expand, bm)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.an, self.ap, self.bm, self.bq)
def _hashable_content(self):
return super()._hashable_content() + self.args
def __call__(self, z):
return meijerg(self.an, self.ap, self.bm, self.bq, z)
def compute_buckets(self):
"""
Compute buckets for the fours sets of parameters.
Explanation
===========
We guarantee that any two equal Mod objects returned are actually the
same, and that the buckets are sorted by real part (an and bq
descendending, bm and ap ascending).
Examples
========
>>> from sympy.simplify.hyperexpand import G_Function
>>> from sympy.abc import y
>>> from sympy import S
>>> a, b = [1, 3, 2, S(3)/2], [1 + y, y, 2, y + 3]
>>> G_Function(a, b, [2], [y]).compute_buckets()
({0: [3, 2, 1], 1/2: [3/2]},
{0: [2], y: [y, y + 1, y + 3]}, {0: [2]}, {y: [y]})
"""
dicts = pan, pap, pbm, pbq = [defaultdict(list) for i in range(4)]
for dic, lis in zip(dicts, (self.an, self.ap, self.bm, self.bq)):
for x in lis:
dic[_mod1(x)].append(x)
for dic, flip in zip(dicts, (True, False, False, True)):
for m, items in dic.items():
x0 = items[0]
items.sort(key=lambda x: x - x0, reverse=flip)
dic[m] = items
return tuple([dict(w) for w in dicts])
@property
def signature(self):
return (len(self.an), len(self.ap), len(self.bm), len(self.bq))
# Dummy variable.
_x = Dummy('x')
class Formula:
"""
This class represents hypergeometric formulae.
Explanation
===========
Its data members are:
- z, the argument
- closed_form, the closed form expression
- symbols, the free symbols (parameters) in the formula
- func, the function
- B, C, M (see _compute_basis)
Examples
========
>>> from sympy.abc import a, b, z
>>> from sympy.simplify.hyperexpand import Formula, Hyper_Function
>>> func = Hyper_Function((a/2, a/3 + b, (1+a)/2), (a, b, (a+b)/7))
>>> f = Formula(func, z, None, [a, b])
"""
def _compute_basis(self, closed_form):
"""
Compute a set of functions B=(f1, ..., fn), a nxn matrix M
and a 1xn matrix C such that:
closed_form = C B
z d/dz B = M B.
"""
from sympy.matrices import Matrix, eye, zeros
afactors = [_x + a for a in self.func.ap]
bfactors = [_x + b - 1 for b in self.func.bq]
expr = _x*Mul(*bfactors) - self.z*Mul(*afactors)
poly = Poly(expr, _x)
n = poly.degree() - 1
b = [closed_form]
for _ in range(n):
b.append(self.z*b[-1].diff(self.z))
self.B = Matrix(b)
self.C = Matrix([[1] + [0]*n])
m = eye(n)
m = m.col_insert(0, zeros(n, 1))
l = poly.all_coeffs()[1:]
l.reverse()
self.M = m.row_insert(n, -Matrix([l])/poly.all_coeffs()[0])
def __init__(self, func, z, res, symbols, B=None, C=None, M=None):
z = sympify(z)
res = sympify(res)
symbols = [x for x in sympify(symbols) if func.has(x)]
self.z = z
self.symbols = symbols
self.B = B
self.C = C
self.M = M
self.func = func
# TODO with symbolic parameters, it could be advantageous
# (for prettier answers) to compute a basis only *after*
# instantiation
if res is not None:
self._compute_basis(res)
@property
def closed_form(self):
return reduce(lambda s,m: s+m[0]*m[1], zip(self.C, self.B), S.Zero)
def find_instantiations(self, func):
"""
Find substitutions of the free symbols that match ``func``.
Return the substitution dictionaries as a list. Note that the returned
instantiations need not actually match, or be valid!
"""
from sympy.solvers import solve
ap = func.ap
bq = func.bq
if len(ap) != len(self.func.ap) or len(bq) != len(self.func.bq):
raise TypeError('Cannot instantiate other number of parameters')
symbol_values = []
for a in self.symbols:
if a in self.func.ap.args:
symbol_values.append(ap)
elif a in self.func.bq.args:
symbol_values.append(bq)
else:
raise ValueError("At least one of the parameters of the "
"formula must be equal to %s" % (a,))
base_repl = [dict(list(zip(self.symbols, values)))
for values in product(*symbol_values)]
abuckets, bbuckets = [sift(params, _mod1) for params in [ap, bq]]
a_inv, b_inv = [{a: len(vals) for a, vals in bucket.items()}
for bucket in [abuckets, bbuckets]]
critical_values = [[0] for _ in self.symbols]
result = []
_n = Dummy()
for repl in base_repl:
symb_a, symb_b = [sift(params, lambda x: _mod1(x.xreplace(repl)))
for params in [self.func.ap, self.func.bq]]
for bucket, obucket in [(abuckets, symb_a), (bbuckets, symb_b)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (not mod in bucket) or (not mod in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
break
for a, vals in zip(self.symbols, critical_values):
if repl[a].free_symbols:
continue
exprs = [expr for expr in obucket[mod] if expr.has(a)]
repl0 = repl.copy()
repl0[a] += _n
for expr in exprs:
for target in bucket[mod]:
n0, = solve(expr.xreplace(repl0) - target, _n)
if n0.free_symbols:
raise ValueError("Value should not be true")
vals.append(n0)
else:
values = []
for a, vals in zip(self.symbols, critical_values):
a0 = repl[a]
min_ = floor(min(vals))
max_ = ceiling(max(vals))
values.append([a0 + n for n in range(min_, max_ + 1)])
result.extend(dict(list(zip(self.symbols, l))) for l in product(*values))
return result
class FormulaCollection:
""" A collection of formulae to use as origins. """
def __init__(self):
""" Doing this globally at module init time is a pain ... """
self.symbolic_formulae = {}
self.concrete_formulae = {}
self.formulae = []
add_formulae(self.formulae)
# Now process the formulae into a helpful form.
# These dicts are indexed by (p, q).
for f in self.formulae:
sizes = f.func.sizes
if len(f.symbols) > 0:
self.symbolic_formulae.setdefault(sizes, []).append(f)
else:
inv = f.func.build_invariants()
self.concrete_formulae.setdefault(sizes, {})[inv] = f
def lookup_origin(self, func):
"""
Given the suitable target ``func``, try to find an origin in our
knowledge base.
Examples
========
>>> from sympy.simplify.hyperexpand import (FormulaCollection,
... Hyper_Function)
>>> f = FormulaCollection()
>>> f.lookup_origin(Hyper_Function((), ())).closed_form
exp(_z)
>>> f.lookup_origin(Hyper_Function([1], ())).closed_form
HyperRep_power1(-1, _z)
>>> from sympy import S
>>> i = Hyper_Function([S('1/4'), S('3/4 + 4')], [S.Half])
>>> f.lookup_origin(i).closed_form
HyperRep_sqrts1(-1/4, _z)
"""
inv = func.build_invariants()
sizes = func.sizes
if sizes in self.concrete_formulae and \
inv in self.concrete_formulae[sizes]:
return self.concrete_formulae[sizes][inv]
# We don't have a concrete formula. Try to instantiate.
if not sizes in self.symbolic_formulae:
return None # Too bad...
possible = []
for f in self.symbolic_formulae[sizes]:
repls = f.find_instantiations(func)
for repl in repls:
func2 = f.func.xreplace(repl)
if not func2._is_suitable_origin():
continue
diff = func2.difficulty(func)
if diff == -1:
continue
possible.append((diff, repl, f, func2))
# find the nearest origin
possible.sort(key=lambda x: x[0])
for _, repl, f, func2 in possible:
f2 = Formula(func2, f.z, None, [], f.B.subs(repl),
f.C.subs(repl), f.M.subs(repl))
if not any(e.has(S.NaN, oo, -oo, zoo) for e in [f2.B, f2.M, f2.C]):
return f2
return None
class MeijerFormula:
"""
This class represents a Meijer G-function formula.
Its data members are:
- z, the argument
- symbols, the free symbols (parameters) in the formula
- func, the function
- B, C, M (c/f ordinary Formula)
"""
def __init__(self, an, ap, bm, bq, z, symbols, B, C, M, matcher):
an, ap, bm, bq = [Tuple(*list(map(expand, w))) for w in [an, ap, bm, bq]]
self.func = G_Function(an, ap, bm, bq)
self.z = z
self.symbols = symbols
self._matcher = matcher
self.B = B
self.C = C
self.M = M
@property
def closed_form(self):
return reduce(lambda s,m: s+m[0]*m[1], zip(self.C, self.B), S.Zero)
def try_instantiate(self, func):
"""
Try to instantiate the current formula to (almost) match func.
This uses the _matcher passed on init.
"""
if func.signature != self.func.signature:
return None
res = self._matcher(func)
if res is not None:
subs, newfunc = res
return MeijerFormula(newfunc.an, newfunc.ap, newfunc.bm, newfunc.bq,
self.z, [],
self.B.subs(subs), self.C.subs(subs),
self.M.subs(subs), None)
class MeijerFormulaCollection:
"""
This class holds a collection of meijer g formulae.
"""
def __init__(self):
formulae = []
add_meijerg_formulae(formulae)
self.formulae = defaultdict(list)
for formula in formulae:
self.formulae[formula.func.signature].append(formula)
self.formulae = dict(self.formulae)
def lookup_origin(self, func):
""" Try to find a formula that matches func. """
if not func.signature in self.formulae:
return None
for formula in self.formulae[func.signature]:
res = formula.try_instantiate(func)
if res is not None:
return res
class Operator:
"""
Base class for operators to be applied to our functions.
Explanation
===========
These operators are differential operators. They are by convention
expressed in the variable D = z*d/dz (although this base class does
not actually care).
Note that when the operator is applied to an object, we typically do
*not* blindly differentiate but instead use a different representation
of the z*d/dz operator (see make_derivative_operator).
To subclass from this, define a __init__ method that initializes a
self._poly variable. This variable stores a polynomial. By convention
the generator is z*d/dz, and acts to the right of all coefficients.
Thus this poly
x**2 + 2*z*x + 1
represents the differential operator
(z*d/dz)**2 + 2*z**2*d/dz.
This class is used only in the implementation of the hypergeometric
function expansion algorithm.
"""
def apply(self, obj, op):
"""
Apply ``self`` to the object ``obj``, where the generator is ``op``.
Examples
========
>>> from sympy.simplify.hyperexpand import Operator
>>> from sympy.polys.polytools import Poly
>>> from sympy.abc import x, y, z
>>> op = Operator()
>>> op._poly = Poly(x**2 + z*x + y, x)
>>> op.apply(z**7, lambda f: f.diff(z))
y*z**7 + 7*z**7 + 42*z**5
"""
coeffs = self._poly.all_coeffs()
coeffs.reverse()
diffs = [obj]
for c in coeffs[1:]:
diffs.append(op(diffs[-1]))
r = coeffs[0]*diffs[0]
for c, d in zip(coeffs[1:], diffs[1:]):
r += c*d
return r
class MultOperator(Operator):
""" Simply multiply by a "constant" """
def __init__(self, p):
self._poly = Poly(p, _x)
class ShiftA(Operator):
""" Increment an upper index. """
def __init__(self, ai):
ai = sympify(ai)
if ai == 0:
raise ValueError('Cannot increment zero upper index.')
self._poly = Poly(_x/ai + 1, _x)
def __str__(self):
return '<Increment upper %s.>' % (1/self._poly.all_coeffs()[0])
class ShiftB(Operator):
""" Decrement a lower index. """
def __init__(self, bi):
bi = sympify(bi)
if bi == 1:
raise ValueError('Cannot decrement unit lower index.')
self._poly = Poly(_x/(bi - 1) + 1, _x)
def __str__(self):
return '<Decrement lower %s.>' % (1/self._poly.all_coeffs()[0] + 1)
class UnShiftA(Operator):
""" Decrement an upper index. """
def __init__(self, ap, bq, i, z):
""" Note: i counts from zero! """
ap, bq, i = list(map(sympify, [ap, bq, i]))
self._ap = ap
self._bq = bq
self._i = i
ap = list(ap)
bq = list(bq)
ai = ap.pop(i) - 1
if ai == 0:
raise ValueError('Cannot decrement unit upper index.')
m = Poly(z*ai, _x)
for a in ap:
m *= Poly(_x + a, _x)
A = Dummy('A')
n = D = Poly(ai*A - ai, A)
for b in bq:
n *= D + (b - 1).as_poly(A)
b0 = -n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement upper index: '
'cancels with lower')
n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, _x/ai + 1), _x)
self._poly = Poly((n - m)/b0, _x)
def __str__(self):
return '<Decrement upper index #%s of %s, %s.>' % (self._i,
self._ap, self._bq)
class UnShiftB(Operator):
""" Increment a lower index. """
def __init__(self, ap, bq, i, z):
""" Note: i counts from zero! """
ap, bq, i = list(map(sympify, [ap, bq, i]))
self._ap = ap
self._bq = bq
self._i = i
ap = list(ap)
bq = list(bq)
bi = bq.pop(i) + 1
if bi == 0:
raise ValueError('Cannot increment -1 lower index.')
m = Poly(_x*(bi - 1), _x)
for b in bq:
m *= Poly(_x + b - 1, _x)
B = Dummy('B')
D = Poly((bi - 1)*B - bi + 1, B)
n = Poly(z, B)
for a in ap:
n *= (D + a.as_poly(B))
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment index: cancels with upper')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, _x/(bi - 1) + 1), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower index #%s of %s, %s.>' % (self._i,
self._ap, self._bq)
class MeijerShiftA(Operator):
""" Increment an upper b index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(bi - _x, _x)
def __str__(self):
return '<Increment upper b=%s.>' % (self._poly.all_coeffs()[1])
class MeijerShiftB(Operator):
""" Decrement an upper a index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(1 - bi + _x, _x)
def __str__(self):
return '<Decrement upper a=%s.>' % (1 - self._poly.all_coeffs()[1])
class MeijerShiftC(Operator):
""" Increment a lower b index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(-bi + _x, _x)
def __str__(self):
return '<Increment lower b=%s.>' % (-self._poly.all_coeffs()[1])
class MeijerShiftD(Operator):
""" Decrement a lower a index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(bi - 1 - _x, _x)
def __str__(self):
return '<Decrement lower a=%s.>' % (self._poly.all_coeffs()[1] + 1)
class MeijerUnShiftA(Operator):
""" Decrement an upper b index. """
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bm.pop(i) - 1
m = Poly(1, _x)
for b in bm:
m *= Poly(b - _x, _x)
for b in bq:
m *= Poly(_x - b, _x)
A = Dummy('A')
D = Poly(bi - A, A)
n = Poly(z, A)
for a in an:
n *= (D + 1 - a)
for a in ap:
n *= (-D + a - 1)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement upper b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, bi - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement upper b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftB(Operator):
""" Increment an upper a index. """
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
ai = an.pop(i) + 1
m = Poly(z, _x)
for a in an:
m *= Poly(1 - a + _x, _x)
for a in ap:
m *= Poly(a - 1 - _x, _x)
B = Dummy('B')
D = Poly(B + ai - 1, B)
n = Poly(1, B)
for b in bm:
n *= (-D + b)
for b in bq:
n *= (D - b)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment upper a index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, 1 - ai + _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment upper a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftC(Operator):
""" Decrement a lower b index. """
# XXX this is "essentially" the same as MeijerUnShiftA. This "essentially"
# can be made rigorous using the functional equation G(1/z) = G'(z),
# where G' denotes a G function of slightly altered parameters.
# However, sorting out the details seems harder than just coding it
# again.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bq.pop(i) - 1
m = Poly(1, _x)
for b in bm:
m *= Poly(b - _x, _x)
for b in bq:
m *= Poly(_x - b, _x)
C = Dummy('C')
D = Poly(bi + C, C)
n = Poly(z, C)
for a in an:
n *= (D + 1 - a)
for a in ap:
n *= (-D + a - 1)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement lower b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], C).as_expr().subs(C, _x - bi), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement lower b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftD(Operator):
""" Increment a lower a index. """
# XXX This is essentially the same as MeijerUnShiftA.
# See comment at MeijerUnShiftC.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
ai = ap.pop(i) + 1
m = Poly(z, _x)
for a in an:
m *= Poly(1 - a + _x, _x)
for a in ap:
m *= Poly(a - 1 - _x, _x)
B = Dummy('B') # - this is the shift operator `D_I`
D = Poly(ai - 1 - B, B)
n = Poly(1, B)
for b in bm:
n *= (-D + b)
for b in bq:
n *= (D - b)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment lower a index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, ai - 1 - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class ReduceOrder(Operator):
""" Reduce Order by cancelling an upper and a lower index. """
def __new__(cls, ai, bj):
""" For convenience if reduction is not possible, return None. """
ai = sympify(ai)
bj = sympify(bj)
n = ai - bj
if not n.is_Integer or n < 0:
return None
if bj.is_integer and bj.is_nonpositive:
return None
expr = Operator.__new__(cls)
p = S.One
for k in range(n):
p *= (_x + bj + k)/(bj + k)
expr._poly = Poly(p, _x)
expr._a = ai
expr._b = bj
return expr
@classmethod
def _meijer(cls, b, a, sign):
""" Cancel b + sign*s and a + sign*s
This is for meijer G functions. """
b = sympify(b)
a = sympify(a)
n = b - a
if n.is_negative or not n.is_Integer:
return None
expr = Operator.__new__(cls)
p = S.One
for k in range(n):
p *= (sign*_x + a + k)
expr._poly = Poly(p, _x)
if sign == -1:
expr._a = b
expr._b = a
else:
expr._b = Add(1, a - 1, evaluate=False)
expr._a = Add(1, b - 1, evaluate=False)
return expr
@classmethod
def meijer_minus(cls, b, a):
return cls._meijer(b, a, -1)
@classmethod
def meijer_plus(cls, a, b):
return cls._meijer(1 - a, 1 - b, 1)
def __str__(self):
return '<Reduce order by cancelling upper %s with lower %s.>' % \
(self._a, self._b)
def _reduce_order(ap, bq, gen, key):
""" Order reduction algorithm used in Hypergeometric and Meijer G """
ap = list(ap)
bq = list(bq)
ap.sort(key=key)
bq.sort(key=key)
nap = []
# we will edit bq in place
operators = []
for a in ap:
op = None
for i in range(len(bq)):
op = gen(a, bq[i])
if op is not None:
bq.pop(i)
break
if op is None:
nap.append(a)
else:
operators.append(op)
return nap, bq, operators
def reduce_order(func):
"""
Given the hypergeometric function ``func``, find a sequence of operators to
reduces order as much as possible.
Explanation
===========
Return (newfunc, [operators]), where applying the operators to the
hypergeometric function newfunc yields func.
Examples
========
>>> from sympy.simplify.hyperexpand import reduce_order, Hyper_Function
>>> reduce_order(Hyper_Function((1, 2), (3, 4)))
(Hyper_Function((1, 2), (3, 4)), [])
>>> reduce_order(Hyper_Function((1,), (1,)))
(Hyper_Function((), ()), [<Reduce order by cancelling upper 1 with lower 1.>])
>>> reduce_order(Hyper_Function((2, 4), (3, 3)))
(Hyper_Function((2,), (3,)), [<Reduce order by cancelling
upper 4 with lower 3.>])
"""
nap, nbq, operators = _reduce_order(func.ap, func.bq, ReduceOrder, default_sort_key)
return Hyper_Function(Tuple(*nap), Tuple(*nbq)), operators
def reduce_order_meijer(func):
"""
Given the Meijer G function parameters, ``func``, find a sequence of
operators that reduces order as much as possible.
Return newfunc, [operators].
Examples
========
>>> from sympy.simplify.hyperexpand import (reduce_order_meijer,
... G_Function)
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 2]))[0]
G_Function((4, 3), (5, 6), (3, 4), (2, 1))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 8]))[0]
G_Function((3,), (5, 6), (3, 4), (1,))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [1, 5]))[0]
G_Function((3,), (), (), (1,))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [5, 3]))[0]
G_Function((), (), (), ())
"""
nan, nbq, ops1 = _reduce_order(func.an, func.bq, ReduceOrder.meijer_plus,
lambda x: default_sort_key(-x))
nbm, nap, ops2 = _reduce_order(func.bm, func.ap, ReduceOrder.meijer_minus,
default_sort_key)
return G_Function(nan, nap, nbm, nbq), ops1 + ops2
def make_derivative_operator(M, z):
""" Create a derivative operator, to be passed to Operator.apply. """
def doit(C):
r = z*C.diff(z) + C*M
r = r.applyfunc(make_simp(z))
return r
return doit
def apply_operators(obj, ops, op):
"""
Apply the list of operators ``ops`` to object ``obj``, substituting
``op`` for the generator.
"""
res = obj
for o in reversed(ops):
res = o.apply(res, op)
return res
def devise_plan(target, origin, z):
"""
Devise a plan (consisting of shift and un-shift operators) to be applied
to the hypergeometric function ``target`` to yield ``origin``.
Returns a list of operators.
Examples
========
>>> from sympy.simplify.hyperexpand import devise_plan, Hyper_Function
>>> from sympy.abc import z
Nothing to do:
>>> devise_plan(Hyper_Function((1, 2), ()), Hyper_Function((1, 2), ()), z)
[]
>>> devise_plan(Hyper_Function((), (1, 2)), Hyper_Function((), (1, 2)), z)
[]
Very simple plans:
>>> devise_plan(Hyper_Function((2,), ()), Hyper_Function((1,), ()), z)
[<Increment upper 1.>]
>>> devise_plan(Hyper_Function((), (2,)), Hyper_Function((), (1,)), z)
[<Increment lower index #0 of [], [1].>]
Several buckets:
>>> from sympy import S
>>> devise_plan(Hyper_Function((1, S.Half), ()),
... Hyper_Function((2, S('3/2')), ()), z) #doctest: +NORMALIZE_WHITESPACE
[<Decrement upper index #0 of [3/2, 1], [].>,
<Decrement upper index #0 of [2, 3/2], [].>]
A slightly more complicated plan:
>>> devise_plan(Hyper_Function((1, 3), ()), Hyper_Function((2, 2), ()), z)
[<Increment upper 2.>, <Decrement upper index #0 of [2, 2], [].>]
Another more complicated plan: (note that the ap have to be shifted first!)
>>> devise_plan(Hyper_Function((1, -1), (2,)), Hyper_Function((3, -2), (4,)), z)
[<Decrement lower 3.>, <Decrement lower 4.>,
<Decrement upper index #1 of [-1, 2], [4].>,
<Decrement upper index #1 of [-1, 3], [4].>, <Increment upper -2.>]
"""
abuckets, bbuckets, nabuckets, nbbuckets = [sift(params, _mod1) for
params in (target.ap, target.bq, origin.ap, origin.bq)]
if len(list(abuckets.keys())) != len(list(nabuckets.keys())) or \
len(list(bbuckets.keys())) != len(list(nbbuckets.keys())):
raise ValueError('%s not reachable from %s' % (target, origin))
ops = []
def do_shifts(fro, to, inc, dec):
ops = []
for i in range(len(fro)):
if to[i] - fro[i] > 0:
sh = inc
ch = 1
else:
sh = dec
ch = -1
while to[i] != fro[i]:
ops += [sh(fro, i)]
fro[i] += ch
return ops
def do_shifts_a(nal, nbk, al, aother, bother):
""" Shift us from (nal, nbk) to (al, nbk). """
return do_shifts(nal, al, lambda p, i: ShiftA(p[i]),
lambda p, i: UnShiftA(p + aother, nbk + bother, i, z))
def do_shifts_b(nal, nbk, bk, aother, bother):
""" Shift us from (nal, nbk) to (nal, bk). """
return do_shifts(nbk, bk,
lambda p, i: UnShiftB(nal + aother, p + bother, i, z),
lambda p, i: ShiftB(p[i]))
for r in sorted(list(abuckets.keys()) + list(bbuckets.keys()), key=default_sort_key):
al = ()
nal = ()
bk = ()
nbk = ()
if r in abuckets:
al = abuckets[r]
nal = nabuckets[r]
if r in bbuckets:
bk = bbuckets[r]
nbk = nbbuckets[r]
if len(al) != len(nal) or len(bk) != len(nbk):
raise ValueError('%s not reachable from %s' % (target, origin))
al, nal, bk, nbk = [sorted(list(w), key=default_sort_key)
for w in [al, nal, bk, nbk]]
def others(dic, key):
l = []
for k, value in dic.items():
if k != key:
l += list(dic[k])
return l
aother = others(nabuckets, r)
bother = others(nbbuckets, r)
if len(al) == 0:
# there can be no complications, just shift the bs as we please
ops += do_shifts_b([], nbk, bk, aother, bother)
elif len(bk) == 0:
# there can be no complications, just shift the as as we please
ops += do_shifts_a(nal, [], al, aother, bother)
else:
namax = nal[-1]
amax = al[-1]
if nbk[0] - namax <= 0 or bk[0] - amax <= 0:
raise ValueError('Non-suitable parameters.')
if namax - amax > 0:
# we are going to shift down - first do the as, then the bs
ops += do_shifts_a(nal, nbk, al, aother, bother)
ops += do_shifts_b(al, nbk, bk, aother, bother)
else:
# we are going to shift up - first do the bs, then the as
ops += do_shifts_b(nal, nbk, bk, aother, bother)
ops += do_shifts_a(nal, bk, al, aother, bother)
nabuckets[r] = al
nbbuckets[r] = bk
ops.reverse()
return ops
def try_shifted_sum(func, z):
""" Try to recognise a hypergeometric sum that starts from k > 0. """
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
if len(abuckets[S.Zero]) != 1:
return None
r = abuckets[S.Zero][0]
if r <= 0:
return None
if not S.Zero in bbuckets:
return None
l = list(bbuckets[S.Zero])
l.sort()
k = l[0]
if k <= 0:
return None
nap = list(func.ap)
nap.remove(r)
nbq = list(func.bq)
nbq.remove(k)
k -= 1
nap = [x - k for x in nap]
nbq = [x - k for x in nbq]
ops = []
for n in range(r - 1):
ops.append(ShiftA(n + 1))
ops.reverse()
fac = factorial(k)/z**k
for a in nap:
fac /= rf(a, k)
for b in nbq:
fac *= rf(b, k)
ops += [MultOperator(fac)]
p = 0
for n in range(k):
m = z**n/factorial(n)
for a in nap:
m *= rf(a, n)
for b in nbq:
m /= rf(b, n)
p += m
return Hyper_Function(nap, nbq), ops, -p
def try_polynomial(func, z):
""" Recognise polynomial cases. Returns None if not such a case.
Requires order to be fully reduced. """
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
a0 = abuckets[S.Zero]
b0 = bbuckets[S.Zero]
a0.sort()
b0.sort()
al0 = [x for x in a0 if x <= 0]
bl0 = [x for x in b0 if x <= 0]
if bl0 and all(a < bl0[-1] for a in al0):
return oo
if not al0:
return None
a = al0[-1]
fac = 1
res = S.One
for n in Tuple(*list(range(-a))):
fac *= z
fac /= n + 1
for a in func.ap:
fac *= a + n
for b in func.bq:
fac /= b + n
res += fac
return res
def try_lerchphi(func):
"""
Try to find an expression for Hyper_Function ``func`` in terms of Lerch
Transcendents.
Return None if no such expression can be found.
"""
# This is actually quite simple, and is described in Roach's paper,
# section 18.
# We don't need to implement the reduction to polylog here, this
# is handled by expand_func.
from sympy.matrices import Matrix, zeros
from sympy.polys import apart
# First we need to figure out if the summation coefficient is a rational
# function of the summation index, and construct that rational function.
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
paired = {}
for key, value in abuckets.items():
if key != 0 and not key in bbuckets:
return None
bvalue = bbuckets[key]
paired[key] = (list(value), list(bvalue))
bbuckets.pop(key, None)
if bbuckets != {}:
return None
if not S.Zero in abuckets:
return None
aints, bints = paired[S.Zero]
# Account for the additional n! in denominator
paired[S.Zero] = (aints, bints + [1])
t = Dummy('t')
numer = S.One
denom = S.One
for key, (avalue, bvalue) in paired.items():
if len(avalue) != len(bvalue):
return None
# Note that since order has been reduced fully, all the b are
# bigger than all the a they differ from by an integer. In particular
# if there are any negative b left, this function is not well-defined.
for a, b in zip(avalue, bvalue):
if (a - b).is_positive:
k = a - b
numer *= rf(b + t, k)
denom *= rf(b, k)
else:
k = b - a
numer *= rf(a, k)
denom *= rf(a + t, k)
# Now do a partial fraction decomposition.
# We assemble two structures: a list monomials of pairs (a, b) representing
# a*t**b (b a non-negative integer), and a dict terms, where
# terms[a] = [(b, c)] means that there is a term b/(t-a)**c.
part = apart(numer/denom, t)
args = Add.make_args(part)
monomials = []
terms = {}
for arg in args:
numer, denom = arg.as_numer_denom()
if not denom.has(t):
p = Poly(numer, t)
if not p.is_monomial:
raise TypeError("p should be monomial")
((b, ), a) = p.LT()
monomials += [(a/denom, b)]
continue
if numer.has(t):
raise NotImplementedError('Need partial fraction decomposition'
' with linear denominators')
indep, [dep] = denom.as_coeff_mul(t)
n = 1
if dep.is_Pow:
n = dep.exp
dep = dep.base
if dep == t:
a == 0
elif dep.is_Add:
a, tmp = dep.as_independent(t)
b = 1
if tmp != t:
b, _ = tmp.as_independent(t)
if dep != b*t + a:
raise NotImplementedError('unrecognised form %s' % dep)
a /= b
indep *= b**n
else:
raise NotImplementedError('unrecognised form of partial fraction')
terms.setdefault(a, []).append((numer/indep, n))
# Now that we have this information, assemble our formula. All the
# monomials yield rational functions and go into one basis element.
# The terms[a] are related by differentiation. If the largest exponent is
# n, we need lerchphi(z, k, a) for k = 1, 2, ..., n.
# deriv maps a basis to its derivative, expressed as a C(z)-linear
# combination of other basis elements.
deriv = {}
coeffs = {}
z = Dummy('z')
monomials.sort(key=lambda x: x[1])
mon = {0: 1/(1 - z)}
if monomials:
for k in range(monomials[-1][1]):
mon[k + 1] = z*mon[k].diff(z)
for a, n in monomials:
coeffs.setdefault(S.One, []).append(a*mon[n])
for a, l in terms.items():
for c, k in l:
coeffs.setdefault(lerchphi(z, k, a), []).append(c)
l.sort(key=lambda x: x[1])
for k in range(2, l[-1][1] + 1):
deriv[lerchphi(z, k, a)] = [(-a, lerchphi(z, k, a)),
(1, lerchphi(z, k - 1, a))]
deriv[lerchphi(z, 1, a)] = [(-a, lerchphi(z, 1, a)),
(1/(1 - z), S.One)]
trans = {}
for n, b in enumerate([S.One] + list(deriv.keys())):
trans[b] = n
basis = [expand_func(b) for (b, _) in sorted(list(trans.items()),
key=lambda x:x[1])]
B = Matrix(basis)
C = Matrix([[0]*len(B)])
for b, c in coeffs.items():
C[trans[b]] = Add(*c)
M = zeros(len(B))
for b, l in deriv.items():
for c, b2 in l:
M[trans[b], trans[b2]] = c
return Formula(func, z, None, [], B, C, M)
def build_hypergeometric_formula(func):
"""
Create a formula object representing the hypergeometric function ``func``.
"""
# We know that no `ap` are negative integers, otherwise "detect poly"
# would have kicked in. However, `ap` could be empty. In this case we can
# use a different basis.
# I'm not aware of a basis that works in all cases.
from sympy.matrices.dense import (Matrix, eye, zeros)
z = Dummy('z')
if func.ap:
afactors = [_x + a for a in func.ap]
bfactors = [_x + b - 1 for b in func.bq]
expr = _x*Mul(*bfactors) - z*Mul(*afactors)
poly = Poly(expr, _x)
n = poly.degree()
basis = []
M = zeros(n)
for k in range(n):
a = func.ap[0] + k
basis += [hyper([a] + list(func.ap[1:]), func.bq, z)]
if k < n - 1:
M[k, k] = -a
M[k, k + 1] = a
B = Matrix(basis)
C = Matrix([[1] + [0]*(n - 1)])
derivs = [eye(n)]
for k in range(n):
derivs.append(M*derivs[k])
l = poly.all_coeffs()
l.reverse()
res = [0]*n
for k, c in enumerate(l):
for r, d in enumerate(C*derivs[k]):
res[r] += c*d
for k, c in enumerate(res):
M[n - 1, k] = -c/derivs[n - 1][0, n - 1]/poly.all_coeffs()[0]
return Formula(func, z, None, [], B, C, M)
else:
# Since there are no `ap`, none of the `bq` can be non-positive
# integers.
basis = []
bq = list(func.bq[:])
for i in range(len(bq)):
basis += [hyper([], bq, z)]
bq[i] += 1
basis += [hyper([], bq, z)]
B = Matrix(basis)
n = len(B)
C = Matrix([[1] + [0]*(n - 1)])
M = zeros(n)
M[0, n - 1] = z/Mul(*func.bq)
for k in range(1, n):
M[k, k - 1] = func.bq[k - 1]
M[k, k] = -func.bq[k - 1]
return Formula(func, z, None, [], B, C, M)
def hyperexpand_special(ap, bq, z):
"""
Try to find a closed-form expression for hyper(ap, bq, z), where ``z``
is supposed to be a "special" value, e.g. 1.
This function tries various of the classical summation formulae
(Gauss, Saalschuetz, etc).
"""
# This code is very ad-hoc. There are many clever algorithms
# (notably Zeilberger's) related to this problem.
# For now we just want a few simple cases to work.
p, q = len(ap), len(bq)
z_ = z
z = unpolarify(z)
if z == 0:
return S.One
from sympy.simplify.simplify import simplify
if p == 2 and q == 1:
# 2F1
a, b, c = ap + bq
if z == 1:
# Gauss
return gamma(c - a - b)*gamma(c)/gamma(c - a)/gamma(c - b)
if z == -1 and simplify(b - a + c) == 1:
b, a = a, b
if z == -1 and simplify(a - b + c) == 1:
# Kummer
if b.is_integer and b.is_negative:
return 2*cos(pi*b/2)*gamma(-b)*gamma(b - a + 1) \
/gamma(-b/2)/gamma(b/2 - a + 1)
else:
return gamma(b/2 + 1)*gamma(b - a + 1) \
/gamma(b + 1)/gamma(b/2 - a + 1)
# TODO tons of more formulae
# investigate what algorithms exist
return hyper(ap, bq, z_)
_collection = None
def _hyperexpand(func, z, ops0=[], z0=Dummy('z0'), premult=1, prem=0,
rewrite='default'):
"""
Try to find an expression for the hypergeometric function ``func``.
Explanation
===========
The result is expressed in terms of a dummy variable ``z0``. Then it
is multiplied by ``premult``. Then ``ops0`` is applied.
``premult`` must be a*z**prem for some a independent of ``z``.
"""
if z.is_zero:
return S.One
from sympy.simplify.simplify import simplify
z = polarify(z, subs=False)
if rewrite == 'default':
rewrite = 'nonrepsmall'
def carryout_plan(f, ops):
C = apply_operators(f.C.subs(f.z, z0), ops,
make_derivative_operator(f.M.subs(f.z, z0), z0))
from sympy.matrices.dense import eye
C = apply_operators(C, ops0,
make_derivative_operator(f.M.subs(f.z, z0)
+ prem*eye(f.M.shape[0]), z0))
if premult == 1:
C = C.applyfunc(make_simp(z0))
r = reduce(lambda s,m: s+m[0]*m[1], zip(C, f.B.subs(f.z, z0)), S.Zero)*premult
res = r.subs(z0, z)
if rewrite:
res = res.rewrite(rewrite)
return res
# TODO
# The following would be possible:
# *) PFD Duplication (see Kelly Roach's paper)
# *) In a similar spirit, try_lerchphi() can be generalised considerably.
global _collection
if _collection is None:
_collection = FormulaCollection()
debug('Trying to expand hypergeometric function ', func)
# First reduce order as much as possible.
func, ops = reduce_order(func)
if ops:
debug(' Reduced order to ', func)
else:
debug(' Could not reduce order.')
# Now try polynomial cases
res = try_polynomial(func, z0)
if res is not None:
debug(' Recognised polynomial.')
p = apply_operators(res, ops, lambda f: z0*f.diff(z0))
p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0))
return unpolarify(simplify(p).subs(z0, z))
# Try to recognise a shifted sum.
p = S.Zero
res = try_shifted_sum(func, z0)
if res is not None:
func, nops, p = res
debug(' Recognised shifted sum, reduced order to ', func)
ops += nops
# apply the plan for poly
p = apply_operators(p, ops, lambda f: z0*f.diff(z0))
p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0))
p = simplify(p).subs(z0, z)
# Try special expansions early.
if unpolarify(z) in [1, -1] and (len(func.ap), len(func.bq)) == (2, 1):
f = build_hypergeometric_formula(func)
r = carryout_plan(f, ops).replace(hyper, hyperexpand_special)
if not r.has(hyper):
return r + p
# Try to find a formula in our collection
formula = _collection.lookup_origin(func)
# Now try a lerch phi formula
if formula is None:
formula = try_lerchphi(func)
if formula is None:
debug(' Could not find an origin. ',
'Will return answer in terms of '
'simpler hypergeometric functions.')
formula = build_hypergeometric_formula(func)
debug(' Found an origin: ', formula.closed_form, ' ', formula.func)
# We need to find the operators that convert formula into func.
ops += devise_plan(func, formula.func, z0)
# Now carry out the plan.
r = carryout_plan(formula, ops) + p
return powdenest(r, polar=True).replace(hyper, hyperexpand_special)
def devise_plan_meijer(fro, to, z):
"""
Find operators to convert G-function ``fro`` into G-function ``to``.
Explanation
===========
It is assumed that ``fro`` and ``to`` have the same signatures, and that in fact
any corresponding pair of parameters differs by integers, and a direct path
is possible. I.e. if there are parameters a1 b1 c1 and a2 b2 c2 it is
assumed that a1 can be shifted to a2, etc. The only thing this routine
determines is the order of shifts to apply, nothing clever will be tried.
It is also assumed that ``fro`` is suitable.
Examples
========
>>> from sympy.simplify.hyperexpand import (devise_plan_meijer,
... G_Function)
>>> from sympy.abc import z
Empty plan:
>>> devise_plan_meijer(G_Function([1], [2], [3], [4]),
... G_Function([1], [2], [3], [4]), z)
[]
Very simple plans:
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([1], [], [], []), z)
[<Increment upper a index #0 of [0], [], [], [].>]
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([-1], [], [], []), z)
[<Decrement upper a=0.>]
>>> devise_plan_meijer(G_Function([], [1], [], []),
... G_Function([], [2], [], []), z)
[<Increment lower a index #0 of [], [1], [], [].>]
Slightly more complicated plans:
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([2], [], [], []), z)
[<Increment upper a index #0 of [1], [], [], [].>,
<Increment upper a index #0 of [0], [], [], [].>]
>>> devise_plan_meijer(G_Function([0], [], [0], []),
... G_Function([-1], [], [1], []), z)
[<Increment upper b=0.>, <Decrement upper a=0.>]
Order matters:
>>> devise_plan_meijer(G_Function([0], [], [0], []),
... G_Function([1], [], [1], []), z)
[<Increment upper a index #0 of [0], [], [1], [].>, <Increment upper b=0.>]
"""
# TODO for now, we use the following simple heuristic: inverse-shift
# when possible, shift otherwise. Give up if we cannot make progress.
def try_shift(f, t, shifter, diff, counter):
""" Try to apply ``shifter`` in order to bring some element in ``f``
nearer to its counterpart in ``to``. ``diff`` is +/- 1 and
determines the effect of ``shifter``. Counter is a list of elements
blocking the shift.
Return an operator if change was possible, else None.
"""
for idx, (a, b) in enumerate(zip(f, t)):
if (
(a - b).is_integer and (b - a)/diff > 0 and
all(a != x for x in counter)):
sh = shifter(idx)
f[idx] += diff
return sh
fan = list(fro.an)
fap = list(fro.ap)
fbm = list(fro.bm)
fbq = list(fro.bq)
ops = []
change = True
while change:
change = False
op = try_shift(fan, to.an,
lambda i: MeijerUnShiftB(fan, fap, fbm, fbq, i, z),
1, fbm + fbq)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fap, to.ap,
lambda i: MeijerUnShiftD(fan, fap, fbm, fbq, i, z),
1, fbm + fbq)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbm, to.bm,
lambda i: MeijerUnShiftA(fan, fap, fbm, fbq, i, z),
-1, fan + fap)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbq, to.bq,
lambda i: MeijerUnShiftC(fan, fap, fbm, fbq, i, z),
-1, fan + fap)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fan, to.an, lambda i: MeijerShiftB(fan[i]), -1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fap, to.ap, lambda i: MeijerShiftD(fap[i]), -1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbm, to.bm, lambda i: MeijerShiftA(fbm[i]), 1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbq, to.bq, lambda i: MeijerShiftC(fbq[i]), 1, [])
if op is not None:
ops += [op]
change = True
continue
if fan != list(to.an) or fap != list(to.ap) or fbm != list(to.bm) or \
fbq != list(to.bq):
raise NotImplementedError('Could not devise plan.')
ops.reverse()
return ops
_meijercollection = None
def _meijergexpand(func, z0, allow_hyper=False, rewrite='default',
place=None):
"""
Try to find an expression for the Meijer G function specified
by the G_Function ``func``. If ``allow_hyper`` is True, then returning
an expression in terms of hypergeometric functions is allowed.
Currently this just does Slater's theorem.
If expansions exist both at zero and at infinity, ``place``
can be set to ``0`` or ``zoo`` for the preferred choice.
"""
global _meijercollection
if _meijercollection is None:
_meijercollection = MeijerFormulaCollection()
if rewrite == 'default':
rewrite = None
func0 = func
debug('Try to expand Meijer G function corresponding to ', func)
# We will play games with analytic continuation - rather use a fresh symbol
z = Dummy('z')
func, ops = reduce_order_meijer(func)
if ops:
debug(' Reduced order to ', func)
else:
debug(' Could not reduce order.')
# Try to find a direct formula
f = _meijercollection.lookup_origin(func)
if f is not None:
debug(' Found a Meijer G formula: ', f.func)
ops += devise_plan_meijer(f.func, func, z)
# Now carry out the plan.
C = apply_operators(f.C.subs(f.z, z), ops,
make_derivative_operator(f.M.subs(f.z, z), z))
C = C.applyfunc(make_simp(z))
r = C*f.B.subs(f.z, z)
r = r[0].subs(z, z0)
return powdenest(r, polar=True)
debug(" Could not find a direct formula. Trying Slater's theorem.")
# TODO the following would be possible:
# *) Paired Index Theorems
# *) PFD Duplication
# (See Kelly Roach's paper for details on either.)
#
# TODO Also, we tend to create combinations of gamma functions that can be
# simplified.
def can_do(pbm, pap):
""" Test if slater applies. """
for i in pbm:
if len(pbm[i]) > 1:
l = 0
if i in pap:
l = len(pap[i])
if l + 1 < len(pbm[i]):
return False
return True
def do_slater(an, bm, ap, bq, z, zfinal):
# zfinal is the value that will eventually be substituted for z.
# We pass it to _hyperexpand to improve performance.
from sympy.series import residue
func = G_Function(an, bm, ap, bq)
_, pbm, pap, _ = func.compute_buckets()
if not can_do(pbm, pap):
return S.Zero, False
cond = len(an) + len(ap) < len(bm) + len(bq)
if len(an) + len(ap) == len(bm) + len(bq):
cond = abs(z) < 1
if cond is False:
return S.Zero, False
res = S.Zero
for m in pbm:
if len(pbm[m]) == 1:
bh = pbm[m][0]
fac = 1
bo = list(bm)
bo.remove(bh)
for bj in bo:
fac *= gamma(bj - bh)
for aj in an:
fac *= gamma(1 + bh - aj)
for bj in bq:
fac /= gamma(1 + bh - bj)
for aj in ap:
fac /= gamma(aj - bh)
nap = [1 + bh - a for a in list(an) + list(ap)]
nbq = [1 + bh - b for b in list(bo) + list(bq)]
k = polar_lift(S.NegativeOne**(len(ap) - len(bm)))
harg = k*zfinal
# NOTE even though k "is" +-1, this has to be t/k instead of
# t*k ... we are using polar numbers for consistency!
premult = (t/k)**bh
hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops,
t, premult, bh, rewrite=None)
res += fac * hyp
else:
b_ = pbm[m][0]
ki = [bi - b_ for bi in pbm[m][1:]]
u = len(ki)
li = [ai - b_ for ai in pap[m][:u + 1]]
bo = list(bm)
for b in pbm[m]:
bo.remove(b)
ao = list(ap)
for a in pap[m][:u]:
ao.remove(a)
lu = li[-1]
di = [l - k for (l, k) in zip(li, ki)]
# We first work out the integrand:
s = Dummy('s')
integrand = z**s
for b in bm:
if not Mod(b, 1) and b.is_Number:
b = int(round(b))
integrand *= gamma(b - s)
for a in an:
integrand *= gamma(1 - a + s)
for b in bq:
integrand /= gamma(1 - b + s)
for a in ap:
integrand /= gamma(a - s)
# Now sum the finitely many residues:
# XXX This speeds up some cases - is it a good idea?
integrand = expand_func(integrand)
for r in range(int(round(lu))):
resid = residue(integrand, s, b_ + r)
resid = apply_operators(resid, ops, lambda f: z*f.diff(z))
res -= resid
# Now the hypergeometric term.
au = b_ + lu
k = polar_lift(S.NegativeOne**(len(ao) + len(bo) + 1))
harg = k*zfinal
premult = (t/k)**au
nap = [1 + au - a for a in list(an) + list(ap)] + [1]
nbq = [1 + au - b for b in list(bm) + list(bq)]
hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops,
t, premult, au, rewrite=None)
C = S.NegativeOne**(lu)/factorial(lu)
for i in range(u):
C *= S.NegativeOne**di[i]/rf(lu - li[i] + 1, di[i])
for a in an:
C *= gamma(1 - a + au)
for b in bo:
C *= gamma(b - au)
for a in ao:
C /= gamma(a - au)
for b in bq:
C /= gamma(1 - b + au)
res += C*hyp
return res, cond
t = Dummy('t')
slater1, cond1 = do_slater(func.an, func.bm, func.ap, func.bq, z, z0)
def tr(l):
return [1 - x for x in l]
for op in ops:
op._poly = Poly(op._poly.subs({z: 1/t, _x: -_x}), _x)
slater2, cond2 = do_slater(tr(func.bm), tr(func.an), tr(func.bq), tr(func.ap),
t, 1/z0)
slater1 = powdenest(slater1.subs(z, z0), polar=True)
slater2 = powdenest(slater2.subs(t, 1/z0), polar=True)
if not isinstance(cond2, bool):
cond2 = cond2.subs(t, 1/z)
m = func(z)
if m.delta > 0 or \
(m.delta == 0 and len(m.ap) == len(m.bq) and
(re(m.nu) < -1) is not False and polar_lift(z0) == polar_lift(1)):
# The condition delta > 0 means that the convergence region is
# connected. Any expression we find can be continued analytically
# to the entire convergence region.
# The conditions delta==0, p==q, re(nu) < -1 imply that G is continuous
# on the positive reals, so the values at z=1 agree.
if cond1 is not False:
cond1 = True
if cond2 is not False:
cond2 = True
if cond1 is True:
slater1 = slater1.rewrite(rewrite or 'nonrep')
else:
slater1 = slater1.rewrite(rewrite or 'nonrepsmall')
if cond2 is True:
slater2 = slater2.rewrite(rewrite or 'nonrep')
else:
slater2 = slater2.rewrite(rewrite or 'nonrepsmall')
if cond1 is not False and cond2 is not False:
# If one condition is False, there is no choice.
if place == 0:
cond2 = False
if place == zoo:
cond1 = False
if not isinstance(cond1, bool):
cond1 = cond1.subs(z, z0)
if not isinstance(cond2, bool):
cond2 = cond2.subs(z, z0)
def weight(expr, cond):
if cond is True:
c0 = 0
elif cond is False:
c0 = 1
else:
c0 = 2
if expr.has(oo, zoo, -oo, nan):
# XXX this actually should not happen, but consider
# S('meijerg(((0, -1/2, 0, -1/2, 1/2), ()), ((0,),
# (-1/2, -1/2, -1/2, -1)), exp_polar(I*pi))/4')
c0 = 3
return (c0, expr.count(hyper), expr.count_ops())
w1 = weight(slater1, cond1)
w2 = weight(slater2, cond2)
if min(w1, w2) <= (0, 1, oo):
if w1 < w2:
return slater1
else:
return slater2
if max(w1[0], w2[0]) <= 1 and max(w1[1], w2[1]) <= 1:
return Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True))
# We couldn't find an expression without hypergeometric functions.
# TODO it would be helpful to give conditions under which the integral
# is known to diverge.
r = Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True))
if r.has(hyper) and not allow_hyper:
debug(' Could express using hypergeometric functions, '
'but not allowed.')
if not r.has(hyper) or allow_hyper:
return r
return func0(z0)
def hyperexpand(f, allow_hyper=False, rewrite='default', place=None):
"""
Expand hypergeometric functions. If allow_hyper is True, allow partial
simplification (that is a result different from input,
but still containing hypergeometric functions).
If a G-function has expansions both at zero and at infinity,
``place`` can be set to ``0`` or ``zoo`` to indicate the
preferred choice.
Examples
========
>>> from sympy.simplify.hyperexpand import hyperexpand
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyperexpand(hyper([], [], z))
exp(z)
Non-hyperegeometric parts of the expression and hypergeometric expressions
that are not recognised are left unchanged:
>>> hyperexpand(1 + hyper([1, 1, 1], [], z))
hyper((1, 1, 1), (), z) + 1
"""
f = sympify(f)
def do_replace(ap, bq, z):
r = _hyperexpand(Hyper_Function(ap, bq), z, rewrite=rewrite)
if r is None:
return hyper(ap, bq, z)
else:
return r
def do_meijer(ap, bq, z):
r = _meijergexpand(G_Function(ap[0], ap[1], bq[0], bq[1]), z,
allow_hyper, rewrite=rewrite, place=place)
if not r.has(nan, zoo, oo, -oo):
return r
return f.replace(hyper, do_replace).replace(meijerg, do_meijer)
|
65309a39d3ab82d89f5b1426173ca782b2eaf6ec3307205d8924b9984f830e81 | """ Optimizations of the expression tree representation for better CSE
opportunities.
"""
from sympy.core import Add, Basic, Mul
from sympy.core.singleton import S
from sympy.core.sorting import default_sort_key
from sympy.core.traversal import preorder_traversal
def sub_pre(e):
""" Replace y - x with -(x - y) if -1 can be extracted from y - x.
"""
# replacing Add, A, from which -1 can be extracted with -1*-A
adds = [a for a in e.atoms(Add) if a.could_extract_minus_sign()]
reps = {}
ignore = set()
for a in adds:
na = -a
if na.is_Mul: # e.g. MatExpr
ignore.add(a)
continue
reps[a] = Mul._from_args([S.NegativeOne, na])
e = e.xreplace(reps)
# repeat again for persisting Adds but mark these with a leading 1, -1
# e.g. y - x -> 1*-1*(x - y)
if isinstance(e, Basic):
negs = {}
for a in sorted(e.atoms(Add), key=default_sort_key):
if a in ignore:
continue
if a in reps:
negs[a] = reps[a]
elif a.could_extract_minus_sign():
negs[a] = Mul._from_args([S.One, S.NegativeOne, -a])
e = e.xreplace(negs)
return e
def sub_post(e):
""" Replace 1*-1*x with -x.
"""
replacements = []
for node in preorder_traversal(e):
if isinstance(node, Mul) and \
node.args[0] is S.One and node.args[1] is S.NegativeOne:
replacements.append((node, -Mul._from_args(node.args[2:])))
for node, replacement in replacements:
e = e.xreplace({node: replacement})
return e
|
6965da09d653280b3370ab1ef2fd0744683cdeb7aebd2c62e5ef7bb1a7c670b0 | from collections import defaultdict
from sympy.core import (Basic, S, Add, Mul, Pow, Symbol, sympify,
expand_func, Function, Dummy, Expr, factor_terms,
expand_power_exp, Eq)
from sympy.core.exprtools import factor_nc
from sympy.core.parameters import global_parameters
from sympy.core.function import (expand_log, count_ops, _mexpand,
nfloat, expand_mul, expand)
from sympy.core.numbers import Float, I, pi, Rational
from sympy.core.relational import Relational
from sympy.core.rules import Transform
from sympy.core.sorting import ordered
from sympy.core.sympify import _sympify
from sympy.core.traversal import bottom_up as _bottom_up, walk as _walk
from sympy.functions import gamma, exp, sqrt, log, exp_polar, re
from sympy.functions.combinatorial.factorials import CombinatorialFunction
from sympy.functions.elementary.complexes import unpolarify, Abs, sign
from sympy.functions.elementary.exponential import ExpBase
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.functions.special.bessel import (BesselBase, besselj, besseli,
besselk, bessely, jn)
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.polys import together, cancel, factor
from sympy.simplify.combsimp import combsimp
from sympy.simplify.cse_opts import sub_pre, sub_post
from sympy.simplify.hyperexpand import hyperexpand
from sympy.simplify.powsimp import powsimp
from sympy.simplify.radsimp import radsimp, fraction, collect_abs
from sympy.simplify.sqrtdenest import sqrtdenest
from sympy.simplify.trigsimp import trigsimp, exptrigsimp
from sympy.utilities.decorator import deprecated
from sympy.utilities.iterables import has_variety, sift, subsets, iterable
from sympy.utilities.misc import as_int
import mpmath
def separatevars(expr, symbols=[], dict=False, force=False):
"""
Separates variables in an expression, if possible. By
default, it separates with respect to all symbols in an
expression and collects constant coefficients that are
independent of symbols.
Explanation
===========
If ``dict=True`` then the separated terms will be returned
in a dictionary keyed to their corresponding symbols.
By default, all symbols in the expression will appear as
keys; if symbols are provided, then all those symbols will
be used as keys, and any terms in the expression containing
other symbols or non-symbols will be returned keyed to the
string 'coeff'. (Passing None for symbols will return the
expression in a dictionary keyed to 'coeff'.)
If ``force=True``, then bases of powers will be separated regardless
of assumptions on the symbols involved.
Notes
=====
The order of the factors is determined by Mul, so that the
separated expressions may not necessarily be grouped together.
Although factoring is necessary to separate variables in some
expressions, it is not necessary in all cases, so one should not
count on the returned factors being factored.
Examples
========
>>> from sympy.abc import x, y, z, alpha
>>> from sympy import separatevars, sin
>>> separatevars((x*y)**y)
(x*y)**y
>>> separatevars((x*y)**y, force=True)
x**y*y**y
>>> e = 2*x**2*z*sin(y)+2*z*x**2
>>> separatevars(e)
2*x**2*z*(sin(y) + 1)
>>> separatevars(e, symbols=(x, y), dict=True)
{'coeff': 2*z, x: x**2, y: sin(y) + 1}
>>> separatevars(e, [x, y, alpha], dict=True)
{'coeff': 2*z, alpha: 1, x: x**2, y: sin(y) + 1}
If the expression is not really separable, or is only partially
separable, separatevars will do the best it can to separate it
by using factoring.
>>> separatevars(x + x*y - 3*x**2)
-x*(3*x - y - 1)
If the expression is not separable then expr is returned unchanged
or (if dict=True) then None is returned.
>>> eq = 2*x + y*sin(x)
>>> separatevars(eq) == eq
True
>>> separatevars(2*x + y*sin(x), symbols=(x, y), dict=True) is None
True
"""
expr = sympify(expr)
if dict:
return _separatevars_dict(_separatevars(expr, force), symbols)
else:
return _separatevars(expr, force)
def _separatevars(expr, force):
if isinstance(expr, Abs):
arg = expr.args[0]
if arg.is_Mul and not arg.is_number:
s = separatevars(arg, dict=True, force=force)
if s is not None:
return Mul(*map(expr.func, s.values()))
else:
return expr
if len(expr.free_symbols) < 2:
return expr
# don't destroy a Mul since much of the work may already be done
if expr.is_Mul:
args = list(expr.args)
changed = False
for i, a in enumerate(args):
args[i] = separatevars(a, force)
changed = changed or args[i] != a
if changed:
expr = expr.func(*args)
return expr
# get a Pow ready for expansion
if expr.is_Pow and expr.base != S.Exp1:
expr = Pow(separatevars(expr.base, force=force), expr.exp)
# First try other expansion methods
expr = expr.expand(mul=False, multinomial=False, force=force)
_expr, reps = posify(expr) if force else (expr, {})
expr = factor(_expr).subs(reps)
if not expr.is_Add:
return expr
# Find any common coefficients to pull out
args = list(expr.args)
commonc = args[0].args_cnc(cset=True, warn=False)[0]
for i in args[1:]:
commonc &= i.args_cnc(cset=True, warn=False)[0]
commonc = Mul(*commonc)
commonc = commonc.as_coeff_Mul()[1] # ignore constants
commonc_set = commonc.args_cnc(cset=True, warn=False)[0]
# remove them
for i, a in enumerate(args):
c, nc = a.args_cnc(cset=True, warn=False)
c = c - commonc_set
args[i] = Mul(*c)*Mul(*nc)
nonsepar = Add(*args)
if len(nonsepar.free_symbols) > 1:
_expr = nonsepar
_expr, reps = posify(_expr) if force else (_expr, {})
_expr = (factor(_expr)).subs(reps)
if not _expr.is_Add:
nonsepar = _expr
return commonc*nonsepar
def _separatevars_dict(expr, symbols):
if symbols:
if not all(t.is_Atom for t in symbols):
raise ValueError("symbols must be Atoms.")
symbols = list(symbols)
elif symbols is None:
return {'coeff': expr}
else:
symbols = list(expr.free_symbols)
if not symbols:
return None
ret = {i: [] for i in symbols + ['coeff']}
for i in Mul.make_args(expr):
expsym = i.free_symbols
intersection = set(symbols).intersection(expsym)
if len(intersection) > 1:
return None
if len(intersection) == 0:
# There are no symbols, so it is part of the coefficient
ret['coeff'].append(i)
else:
ret[intersection.pop()].append(i)
# rebuild
for k, v in ret.items():
ret[k] = Mul(*v)
return ret
def _is_sum_surds(p):
args = p.args if p.is_Add else [p]
for y in args:
if not ((y**2).is_Rational and y.is_extended_real):
return False
return True
def posify(eq):
"""Return ``eq`` (with generic symbols made positive) and a
dictionary containing the mapping between the old and new
symbols.
Explanation
===========
Any symbol that has positive=None will be replaced with a positive dummy
symbol having the same name. This replacement will allow more symbolic
processing of expressions, especially those involving powers and
logarithms.
A dictionary that can be sent to subs to restore ``eq`` to its original
symbols is also returned.
>>> from sympy import posify, Symbol, log, solve
>>> from sympy.abc import x
>>> posify(x + Symbol('p', positive=True) + Symbol('n', negative=True))
(_x + n + p, {_x: x})
>>> eq = 1/x
>>> log(eq).expand()
log(1/x)
>>> log(posify(eq)[0]).expand()
-log(_x)
>>> p, rep = posify(eq)
>>> log(p).expand().subs(rep)
-log(x)
It is possible to apply the same transformations to an iterable
of expressions:
>>> eq = x**2 - 4
>>> solve(eq, x)
[-2, 2]
>>> eq_x, reps = posify([eq, x]); eq_x
[_x**2 - 4, _x]
>>> solve(*eq_x)
[2]
"""
eq = sympify(eq)
if iterable(eq):
f = type(eq)
eq = list(eq)
syms = set()
for e in eq:
syms = syms.union(e.atoms(Symbol))
reps = {}
for s in syms:
reps.update({v: k for k, v in posify(s)[1].items()})
for i, e in enumerate(eq):
eq[i] = e.subs(reps)
return f(eq), {r: s for s, r in reps.items()}
reps = {s: Dummy(s.name, positive=True, **s.assumptions0)
for s in eq.free_symbols if s.is_positive is None}
eq = eq.subs(reps)
return eq, {r: s for s, r in reps.items()}
def hypersimp(f, k):
"""Given combinatorial term f(k) simplify its consecutive term ratio
i.e. f(k+1)/f(k). The input term can be composed of functions and
integer sequences which have equivalent representation in terms
of gamma special function.
Explanation
===========
The algorithm performs three basic steps:
1. Rewrite all functions in terms of gamma, if possible.
2. Rewrite all occurrences of gamma in terms of products
of gamma and rising factorial with integer, absolute
constant exponent.
3. Perform simplification of nested fractions, powers
and if the resulting expression is a quotient of
polynomials, reduce their total degree.
If f(k) is hypergeometric then as result we arrive with a
quotient of polynomials of minimal degree. Otherwise None
is returned.
For more information on the implemented algorithm refer to:
1. W. Koepf, Algorithms for m-fold Hypergeometric Summation,
Journal of Symbolic Computation (1995) 20, 399-417
"""
f = sympify(f)
g = f.subs(k, k + 1) / f
g = g.rewrite(gamma)
if g.has(Piecewise):
g = piecewise_fold(g)
g = g.args[-1][0]
g = expand_func(g)
g = powsimp(g, deep=True, combine='exp')
if g.is_rational_function(k):
return simplify(g, ratio=S.Infinity)
else:
return None
def hypersimilar(f, g, k):
"""
Returns True if ``f`` and ``g`` are hyper-similar.
Explanation
===========
Similarity in hypergeometric sense means that a quotient of
f(k) and g(k) is a rational function in ``k``. This procedure
is useful in solving recurrence relations.
For more information see hypersimp().
"""
f, g = list(map(sympify, (f, g)))
h = (f/g).rewrite(gamma)
h = h.expand(func=True, basic=False)
return h.is_rational_function(k)
def signsimp(expr, evaluate=None):
"""Make all Add sub-expressions canonical wrt sign.
Explanation
===========
If an Add subexpression, ``a``, can have a sign extracted,
as determined by could_extract_minus_sign, it is replaced
with Mul(-1, a, evaluate=False). This allows signs to be
extracted from powers and products.
Examples
========
>>> from sympy import signsimp, exp, symbols
>>> from sympy.abc import x, y
>>> i = symbols('i', odd=True)
>>> n = -1 + 1/x
>>> n/x/(-n)**2 - 1/n/x
(-1 + 1/x)/(x*(1 - 1/x)**2) - 1/(x*(-1 + 1/x))
>>> signsimp(_)
0
>>> x*n + x*-n
x*(-1 + 1/x) + x*(1 - 1/x)
>>> signsimp(_)
0
Since powers automatically handle leading signs
>>> (-2)**i
-2**i
signsimp can be used to put the base of a power with an integer
exponent into canonical form:
>>> n**i
(-1 + 1/x)**i
By default, signsimp doesn't leave behind any hollow simplification:
if making an Add canonical wrt sign didn't change the expression, the
original Add is restored. If this is not desired then the keyword
``evaluate`` can be set to False:
>>> e = exp(y - x)
>>> signsimp(e) == e
True
>>> signsimp(e, evaluate=False)
exp(-(x - y))
"""
if evaluate is None:
evaluate = global_parameters.evaluate
expr = sympify(expr)
if not isinstance(expr, (Expr, Relational)) or expr.is_Atom:
return expr
e = sub_post(sub_pre(expr))
if not isinstance(e, (Expr, Relational)) or e.is_Atom:
return e
if e.is_Add:
rv = e.func(*[signsimp(a) for a in e.args])
if not evaluate and isinstance(rv, Add
) and rv.could_extract_minus_sign():
return Mul(S.NegativeOne, -rv, evaluate=False)
return rv
if evaluate:
e = e.xreplace({m: -(-m) for m in e.atoms(Mul) if -(-m) != m})
return e
def simplify(expr, ratio=1.7, measure=count_ops, rational=False, inverse=False, doit=True, **kwargs):
"""Simplifies the given expression.
Explanation
===========
Simplification is not a well defined term and the exact strategies
this function tries can change in the future versions of SymPy. If
your algorithm relies on "simplification" (whatever it is), try to
determine what you need exactly - is it powsimp()?, radsimp()?,
together()?, logcombine()?, or something else? And use this particular
function directly, because those are well defined and thus your algorithm
will be robust.
Nonetheless, especially for interactive use, or when you do not know
anything about the structure of the expression, simplify() tries to apply
intelligent heuristics to make the input expression "simpler". For
example:
>>> from sympy import simplify, cos, sin
>>> from sympy.abc import x, y
>>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2)
>>> a
(x**2 + x)/(x*sin(y)**2 + x*cos(y)**2)
>>> simplify(a)
x + 1
Note that we could have obtained the same result by using specific
simplification functions:
>>> from sympy import trigsimp, cancel
>>> trigsimp(a)
(x**2 + x)/x
>>> cancel(_)
x + 1
In some cases, applying :func:`simplify` may actually result in some more
complicated expression. The default ``ratio=1.7`` prevents more extreme
cases: if (result length)/(input length) > ratio, then input is returned
unmodified. The ``measure`` parameter lets you specify the function used
to determine how complex an expression is. The function should take a
single argument as an expression and return a number such that if
expression ``a`` is more complex than expression ``b``, then
``measure(a) > measure(b)``. The default measure function is
:func:`~.count_ops`, which returns the total number of operations in the
expression.
For example, if ``ratio=1``, ``simplify`` output cannot be longer
than input.
::
>>> from sympy import sqrt, simplify, count_ops, oo
>>> root = 1/(sqrt(2)+3)
Since ``simplify(root)`` would result in a slightly longer expression,
root is returned unchanged instead::
>>> simplify(root, ratio=1) == root
True
If ``ratio=oo``, simplify will be applied anyway::
>>> count_ops(simplify(root, ratio=oo)) > count_ops(root)
True
Note that the shortest expression is not necessary the simplest, so
setting ``ratio`` to 1 may not be a good idea.
Heuristically, the default value ``ratio=1.7`` seems like a reasonable
choice.
You can easily define your own measure function based on what you feel
should represent the "size" or "complexity" of the input expression. Note
that some choices, such as ``lambda expr: len(str(expr))`` may appear to be
good metrics, but have other problems (in this case, the measure function
may slow down simplify too much for very large expressions). If you do not
know what a good metric would be, the default, ``count_ops``, is a good
one.
For example:
>>> from sympy import symbols, log
>>> a, b = symbols('a b', positive=True)
>>> g = log(a) + log(b) + log(a)*log(1/b)
>>> h = simplify(g)
>>> h
log(a*b**(1 - log(a)))
>>> count_ops(g)
8
>>> count_ops(h)
5
So you can see that ``h`` is simpler than ``g`` using the count_ops metric.
However, we may not like how ``simplify`` (in this case, using
``logcombine``) has created the ``b**(log(1/a) + 1)`` term. A simple way
to reduce this would be to give more weight to powers as operations in
``count_ops``. We can do this by using the ``visual=True`` option:
>>> print(count_ops(g, visual=True))
2*ADD + DIV + 4*LOG + MUL
>>> print(count_ops(h, visual=True))
2*LOG + MUL + POW + SUB
>>> from sympy import Symbol, S
>>> def my_measure(expr):
... POW = Symbol('POW')
... # Discourage powers by giving POW a weight of 10
... count = count_ops(expr, visual=True).subs(POW, 10)
... # Every other operation gets a weight of 1 (the default)
... count = count.replace(Symbol, type(S.One))
... return count
>>> my_measure(g)
8
>>> my_measure(h)
14
>>> 15./8 > 1.7 # 1.7 is the default ratio
True
>>> simplify(g, measure=my_measure)
-log(a)*log(b) + log(a) + log(b)
Note that because ``simplify()`` internally tries many different
simplification strategies and then compares them using the measure
function, we get a completely different result that is still different
from the input expression by doing this.
If ``rational=True``, Floats will be recast as Rationals before simplification.
If ``rational=None``, Floats will be recast as Rationals but the result will
be recast as Floats. If rational=False(default) then nothing will be done
to the Floats.
If ``inverse=True``, it will be assumed that a composition of inverse
functions, such as sin and asin, can be cancelled in any order.
For example, ``asin(sin(x))`` will yield ``x`` without checking whether
x belongs to the set where this relation is true. The default is
False.
Note that ``simplify()`` automatically calls ``doit()`` on the final
expression. You can avoid this behavior by passing ``doit=False`` as
an argument.
Also, it should be noted that simplifying the boolian expression is not
well defined. If the expression prefers automatic evaluation (such as
:obj:`~.Eq()` or :obj:`~.Or()`), simplification will return ``True`` or
``False`` if truth value can be determined. If the expression is not
evaluated by default (such as :obj:`~.Predicate()`), simplification will
not reduce it and you should use :func:`~.refine()` or :func:`~.ask()`
function. This inconsistency will be resolved in future version.
See Also
========
sympy.assumptions.refine.refine : Simplification using assumptions.
sympy.assumptions.ask.ask : Query for boolean expressions using assumptions.
"""
def shorter(*choices):
"""
Return the choice that has the fewest ops. In case of a tie,
the expression listed first is selected.
"""
if not has_variety(choices):
return choices[0]
return min(choices, key=measure)
def done(e):
rv = e.doit() if doit else e
return shorter(rv, collect_abs(rv))
expr = sympify(expr, rational=rational)
kwargs = dict(
ratio=kwargs.get('ratio', ratio),
measure=kwargs.get('measure', measure),
rational=kwargs.get('rational', rational),
inverse=kwargs.get('inverse', inverse),
doit=kwargs.get('doit', doit))
# no routine for Expr needs to check for is_zero
if isinstance(expr, Expr) and expr.is_zero:
return S.Zero if not expr.is_Number else expr
_eval_simplify = getattr(expr, '_eval_simplify', None)
if _eval_simplify is not None:
return _eval_simplify(**kwargs)
original_expr = expr = collect_abs(signsimp(expr))
if not isinstance(expr, Basic) or not expr.args: # XXX: temporary hack
return expr
if inverse and expr.has(Function):
expr = inversecombine(expr)
if not expr.args: # simplified to atomic
return expr
# do deep simplification
handled = Add, Mul, Pow, ExpBase
expr = expr.replace(
# here, checking for x.args is not enough because Basic has
# args but Basic does not always play well with replace, e.g.
# when simultaneous is True found expressions will be masked
# off with a Dummy but not all Basic objects in an expression
# can be replaced with a Dummy
lambda x: isinstance(x, Expr) and x.args and not isinstance(
x, handled),
lambda x: x.func(*[simplify(i, **kwargs) for i in x.args]),
simultaneous=False)
if not isinstance(expr, handled):
return done(expr)
if not expr.is_commutative:
expr = nc_simplify(expr)
# TODO: Apply different strategies, considering expression pattern:
# is it a purely rational function? Is there any trigonometric function?...
# See also https://github.com/sympy/sympy/pull/185.
# rationalize Floats
floats = False
if rational is not False and expr.has(Float):
floats = True
expr = nsimplify(expr, rational=True)
expr = _bottom_up(expr, lambda w: getattr(w, 'normal', lambda: w)())
expr = Mul(*powsimp(expr).as_content_primitive())
_e = cancel(expr)
expr1 = shorter(_e, _mexpand(_e).cancel()) # issue 6829
expr2 = shorter(together(expr, deep=True), together(expr1, deep=True))
if ratio is S.Infinity:
expr = expr2
else:
expr = shorter(expr2, expr1, expr)
if not isinstance(expr, Basic): # XXX: temporary hack
return expr
expr = factor_terms(expr, sign=False)
# must come before `Piecewise` since this introduces more `Piecewise` terms
if expr.has(sign):
expr = expr.rewrite(Abs)
# Deal with Piecewise separately to avoid recursive growth of expressions
if expr.has(Piecewise):
# Fold into a single Piecewise
expr = piecewise_fold(expr)
# Apply doit, if doit=True
expr = done(expr)
# Still a Piecewise?
if expr.has(Piecewise):
# Fold into a single Piecewise, in case doit lead to some
# expressions being Piecewise
expr = piecewise_fold(expr)
# kroneckersimp also affects Piecewise
if expr.has(KroneckerDelta):
expr = kroneckersimp(expr)
# Still a Piecewise?
if expr.has(Piecewise):
from sympy.functions.elementary.piecewise import piecewise_simplify
# Do not apply doit on the segments as it has already
# been done above, but simplify
expr = piecewise_simplify(expr, deep=True, doit=False)
# Still a Piecewise?
if expr.has(Piecewise):
# Try factor common terms
expr = shorter(expr, factor_terms(expr))
# As all expressions have been simplified above with the
# complete simplify, nothing more needs to be done here
return expr
# hyperexpand automatically only works on hypergeometric terms
# Do this after the Piecewise part to avoid recursive expansion
expr = hyperexpand(expr)
if expr.has(KroneckerDelta):
expr = kroneckersimp(expr)
if expr.has(BesselBase):
expr = besselsimp(expr)
if expr.has(TrigonometricFunction, HyperbolicFunction):
expr = trigsimp(expr, deep=True)
if expr.has(log):
expr = shorter(expand_log(expr, deep=True), logcombine(expr))
if expr.has(CombinatorialFunction, gamma):
# expression with gamma functions or non-integer arguments is
# automatically passed to gammasimp
expr = combsimp(expr)
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from sympy.integrals.integrals import Integral
if expr.has(Sum):
expr = sum_simplify(expr, **kwargs)
if expr.has(Integral):
expr = expr.xreplace({
i: factor_terms(i) for i in expr.atoms(Integral)})
if expr.has(Product):
expr = product_simplify(expr)
from sympy.physics.units import Quantity
if expr.has(Quantity):
from sympy.physics.units.util import quantity_simplify
expr = quantity_simplify(expr)
short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr)
short = shorter(short, cancel(short))
short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short)))
if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase, exp):
short = exptrigsimp(short)
# get rid of hollow 2-arg Mul factorization
hollow_mul = Transform(
lambda x: Mul(*x.args),
lambda x:
x.is_Mul and
len(x.args) == 2 and
x.args[0].is_Number and
x.args[1].is_Add and
x.is_commutative)
expr = short.xreplace(hollow_mul)
numer, denom = expr.as_numer_denom()
if denom.is_Add:
n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1))
if n is not S.One:
expr = (numer*n).expand()/d
if expr.could_extract_minus_sign():
n, d = fraction(expr)
if d != 0:
expr = signsimp(-n/(-d))
if measure(expr) > ratio*measure(original_expr):
expr = original_expr
# restore floats
if floats and rational is None:
expr = nfloat(expr, exponent=False)
return done(expr)
def sum_simplify(s, **kwargs):
"""Main function for Sum simplification"""
from sympy.concrete.summations import Sum
if not isinstance(s, Add):
s = s.xreplace({a: sum_simplify(a, **kwargs)
for a in s.atoms(Add) if a.has(Sum)})
s = expand(s)
if not isinstance(s, Add):
return s
terms = s.args
s_t = [] # Sum Terms
o_t = [] # Other Terms
for term in terms:
sum_terms, other = sift(Mul.make_args(term),
lambda i: isinstance(i, Sum), binary=True)
if not sum_terms:
o_t.append(term)
continue
other = [Mul(*other)]
s_t.append(Mul(*(other + [s._eval_simplify(**kwargs) for s in sum_terms])))
result = Add(sum_combine(s_t), *o_t)
return result
def sum_combine(s_t):
"""Helper function for Sum simplification
Attempts to simplify a list of sums, by combining limits / sum function's
returns the simplified sum
"""
from sympy.concrete.summations import Sum
used = [False] * len(s_t)
for method in range(2):
for i, s_term1 in enumerate(s_t):
if not used[i]:
for j, s_term2 in enumerate(s_t):
if not used[j] and i != j:
temp = sum_add(s_term1, s_term2, method)
if isinstance(temp, (Sum, Mul)):
s_t[i] = temp
s_term1 = s_t[i]
used[j] = True
result = S.Zero
for i, s_term in enumerate(s_t):
if not used[i]:
result = Add(result, s_term)
return result
def factor_sum(self, limits=None, radical=False, clear=False, fraction=False, sign=True):
"""Return Sum with constant factors extracted.
If ``limits`` is specified then ``self`` is the summand; the other
keywords are passed to ``factor_terms``.
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> from sympy.simplify.simplify import factor_sum
>>> s = Sum(x*y, (x, 1, 3))
>>> factor_sum(s)
y*Sum(x, (x, 1, 3))
>>> factor_sum(s.function, s.limits)
y*Sum(x, (x, 1, 3))
"""
# XXX deprecate in favor of direct call to factor_terms
from sympy.concrete.summations import Sum
kwargs = dict(radical=radical, clear=clear,
fraction=fraction, sign=sign)
expr = Sum(self, *limits) if limits else self
return factor_terms(expr, **kwargs)
def sum_add(self, other, method=0):
"""Helper function for Sum simplification"""
from sympy.concrete.summations import Sum
#we know this is something in terms of a constant * a sum
#so we temporarily put the constants inside for simplification
#then simplify the result
def __refactor(val):
args = Mul.make_args(val)
sumv = next(x for x in args if isinstance(x, Sum))
constant = Mul(*[x for x in args if x != sumv])
return Sum(constant * sumv.function, *sumv.limits)
if isinstance(self, Mul):
rself = __refactor(self)
else:
rself = self
if isinstance(other, Mul):
rother = __refactor(other)
else:
rother = other
if type(rself) == type(rother):
if method == 0:
if rself.limits == rother.limits:
return factor_sum(Sum(rself.function + rother.function, *rself.limits))
elif method == 1:
if simplify(rself.function - rother.function) == 0:
if len(rself.limits) == len(rother.limits) == 1:
i = rself.limits[0][0]
x1 = rself.limits[0][1]
y1 = rself.limits[0][2]
j = rother.limits[0][0]
x2 = rother.limits[0][1]
y2 = rother.limits[0][2]
if i == j:
if x2 == y1 + 1:
return factor_sum(Sum(rself.function, (i, x1, y2)))
elif x1 == y2 + 1:
return factor_sum(Sum(rself.function, (i, x2, y1)))
return Add(self, other)
def product_simplify(s):
"""Main function for Product simplification"""
from sympy.concrete.products import Product
terms = Mul.make_args(s)
p_t = [] # Product Terms
o_t = [] # Other Terms
for term in terms:
if isinstance(term, Product):
p_t.append(term)
else:
o_t.append(term)
used = [False] * len(p_t)
for method in range(2):
for i, p_term1 in enumerate(p_t):
if not used[i]:
for j, p_term2 in enumerate(p_t):
if not used[j] and i != j:
if isinstance(product_mul(p_term1, p_term2, method), Product):
p_t[i] = product_mul(p_term1, p_term2, method)
used[j] = True
result = Mul(*o_t)
for i, p_term in enumerate(p_t):
if not used[i]:
result = Mul(result, p_term)
return result
def product_mul(self, other, method=0):
"""Helper function for Product simplification"""
from sympy.concrete.products import Product
if type(self) == type(other):
if method == 0:
if self.limits == other.limits:
return Product(self.function * other.function, *self.limits)
elif method == 1:
if simplify(self.function - other.function) == 0:
if len(self.limits) == len(other.limits) == 1:
i = self.limits[0][0]
x1 = self.limits[0][1]
y1 = self.limits[0][2]
j = other.limits[0][0]
x2 = other.limits[0][1]
y2 = other.limits[0][2]
if i == j:
if x2 == y1 + 1:
return Product(self.function, (i, x1, y2))
elif x1 == y2 + 1:
return Product(self.function, (i, x2, y1))
return Mul(self, other)
def _nthroot_solve(p, n, prec):
"""
helper function for ``nthroot``
It denests ``p**Rational(1, n)`` using its minimal polynomial
"""
from sympy.polys.numberfields.minpoly import _minimal_polynomial_sq
from sympy.solvers import solve
while n % 2 == 0:
p = sqrtdenest(sqrt(p))
n = n // 2
if n == 1:
return p
pn = p**Rational(1, n)
x = Symbol('x')
f = _minimal_polynomial_sq(p, n, x)
if f is None:
return None
sols = solve(f, x)
for sol in sols:
if abs(sol - pn).n() < 1./10**prec:
sol = sqrtdenest(sol)
if _mexpand(sol**n) == p:
return sol
def logcombine(expr, force=False):
"""
Takes logarithms and combines them using the following rules:
- log(x) + log(y) == log(x*y) if both are positive
- a*log(x) == log(x**a) if x is positive and a is real
If ``force`` is ``True`` then the assumptions above will be assumed to hold if
there is no assumption already in place on a quantity. For example, if
``a`` is imaginary or the argument negative, force will not perform a
combination but if ``a`` is a symbol with no assumptions the change will
take place.
Examples
========
>>> from sympy import Symbol, symbols, log, logcombine, I
>>> from sympy.abc import a, x, y, z
>>> logcombine(a*log(x) + log(y) - log(z))
a*log(x) + log(y) - log(z)
>>> logcombine(a*log(x) + log(y) - log(z), force=True)
log(x**a*y/z)
>>> x,y,z = symbols('x,y,z', positive=True)
>>> a = Symbol('a', real=True)
>>> logcombine(a*log(x) + log(y) - log(z))
log(x**a*y/z)
The transformation is limited to factors and/or terms that
contain logs, so the result depends on the initial state of
expansion:
>>> eq = (2 + 3*I)*log(x)
>>> logcombine(eq, force=True) == eq
True
>>> logcombine(eq.expand(), force=True)
log(x**2) + I*log(x**3)
See Also
========
posify: replace all symbols with symbols having positive assumptions
sympy.core.function.expand_log: expand the logarithms of products
and powers; the opposite of logcombine
"""
def f(rv):
if not (rv.is_Add or rv.is_Mul):
return rv
def gooda(a):
# bool to tell whether the leading ``a`` in ``a*log(x)``
# could appear as log(x**a)
return (a is not S.NegativeOne and # -1 *could* go, but we disallow
(a.is_extended_real or force and a.is_extended_real is not False))
def goodlog(l):
# bool to tell whether log ``l``'s argument can combine with others
a = l.args[0]
return a.is_positive or force and a.is_nonpositive is not False
other = []
logs = []
log1 = defaultdict(list)
for a in Add.make_args(rv):
if isinstance(a, log) and goodlog(a):
log1[()].append(([], a))
elif not a.is_Mul:
other.append(a)
else:
ot = []
co = []
lo = []
for ai in a.args:
if ai.is_Rational and ai < 0:
ot.append(S.NegativeOne)
co.append(-ai)
elif isinstance(ai, log) and goodlog(ai):
lo.append(ai)
elif gooda(ai):
co.append(ai)
else:
ot.append(ai)
if len(lo) > 1:
logs.append((ot, co, lo))
elif lo:
log1[tuple(ot)].append((co, lo[0]))
else:
other.append(a)
# if there is only one log in other, put it with the
# good logs
if len(other) == 1 and isinstance(other[0], log):
log1[()].append(([], other.pop()))
# if there is only one log at each coefficient and none have
# an exponent to place inside the log then there is nothing to do
if not logs and all(len(log1[k]) == 1 and log1[k][0] == [] for k in log1):
return rv
# collapse multi-logs as far as possible in a canonical way
# TODO: see if x*log(a)+x*log(a)*log(b) -> x*log(a)*(1+log(b))?
# -- in this case, it's unambiguous, but if it were were a log(c) in
# each term then it's arbitrary whether they are grouped by log(a) or
# by log(c). So for now, just leave this alone; it's probably better to
# let the user decide
for o, e, l in logs:
l = list(ordered(l))
e = log(l.pop(0).args[0]**Mul(*e))
while l:
li = l.pop(0)
e = log(li.args[0]**e)
c, l = Mul(*o), e
if isinstance(l, log): # it should be, but check to be sure
log1[(c,)].append(([], l))
else:
other.append(c*l)
# logs that have the same coefficient can multiply
for k in list(log1.keys()):
log1[Mul(*k)] = log(logcombine(Mul(*[
l.args[0]**Mul(*c) for c, l in log1.pop(k)]),
force=force), evaluate=False)
# logs that have oppositely signed coefficients can divide
for k in ordered(list(log1.keys())):
if not k in log1: # already popped as -k
continue
if -k in log1:
# figure out which has the minus sign; the one with
# more op counts should be the one
num, den = k, -k
if num.count_ops() > den.count_ops():
num, den = den, num
other.append(
num*log(log1.pop(num).args[0]/log1.pop(den).args[0],
evaluate=False))
else:
other.append(k*log1.pop(k))
return Add(*other)
return _bottom_up(expr, f)
def inversecombine(expr):
"""Simplify the composition of a function and its inverse.
Explanation
===========
No attention is paid to whether the inverse is a left inverse or a
right inverse; thus, the result will in general not be equivalent
to the original expression.
Examples
========
>>> from sympy.simplify.simplify import inversecombine
>>> from sympy import asin, sin, log, exp
>>> from sympy.abc import x
>>> inversecombine(asin(sin(x)))
x
>>> inversecombine(2*log(exp(3*x)))
6*x
"""
def f(rv):
if isinstance(rv, log):
if isinstance(rv.args[0], exp) or (rv.args[0].is_Pow and rv.args[0].base == S.Exp1):
rv = rv.args[0].exp
elif rv.is_Function and hasattr(rv, "inverse"):
if (len(rv.args) == 1 and len(rv.args[0].args) == 1 and
isinstance(rv.args[0], rv.inverse(argindex=1))):
rv = rv.args[0].args[0]
if rv.is_Pow and rv.base == S.Exp1:
if isinstance(rv.exp, log):
rv = rv.exp.args[0]
return rv
return _bottom_up(expr, f)
def kroneckersimp(expr):
"""
Simplify expressions with KroneckerDelta.
The only simplification currently attempted is to identify multiplicative cancellation:
Examples
========
>>> from sympy import KroneckerDelta, kroneckersimp
>>> from sympy.abc import i
>>> kroneckersimp(1 + KroneckerDelta(0, i) * KroneckerDelta(1, i))
1
"""
def args_cancel(args1, args2):
for i1 in range(2):
for i2 in range(2):
a1 = args1[i1]
a2 = args2[i2]
a3 = args1[(i1 + 1) % 2]
a4 = args2[(i2 + 1) % 2]
if Eq(a1, a2) is S.true and Eq(a3, a4) is S.false:
return True
return False
def cancel_kronecker_mul(m):
args = m.args
deltas = [a for a in args if isinstance(a, KroneckerDelta)]
for delta1, delta2 in subsets(deltas, 2):
args1 = delta1.args
args2 = delta2.args
if args_cancel(args1, args2):
return S.Zero * m # In case of oo etc
return m
if not expr.has(KroneckerDelta):
return expr
if expr.has(Piecewise):
expr = expr.rewrite(KroneckerDelta)
newexpr = expr
expr = None
while newexpr != expr:
expr = newexpr
newexpr = expr.replace(lambda e: isinstance(e, Mul), cancel_kronecker_mul)
return expr
def besselsimp(expr):
"""
Simplify bessel-type functions.
Explanation
===========
This routine tries to simplify bessel-type functions. Currently it only
works on the Bessel J and I functions, however. It works by looking at all
such functions in turn, and eliminating factors of "I" and "-1" (actually
their polar equivalents) in front of the argument. Then, functions of
half-integer order are rewritten using strigonometric functions and
functions of integer order (> 1) are rewritten using functions
of low order. Finally, if the expression was changed, compute
factorization of the result with factor().
>>> from sympy import besselj, besseli, besselsimp, polar_lift, I, S
>>> from sympy.abc import z, nu
>>> besselsimp(besselj(nu, z*polar_lift(-1)))
exp(I*pi*nu)*besselj(nu, z)
>>> besselsimp(besseli(nu, z*polar_lift(-I)))
exp(-I*pi*nu/2)*besselj(nu, z)
>>> besselsimp(besseli(S(-1)/2, z))
sqrt(2)*cosh(z)/(sqrt(pi)*sqrt(z))
>>> besselsimp(z*besseli(0, z) + z*(besseli(2, z))/2 + besseli(1, z))
3*z*besseli(0, z)/2
"""
# TODO
# - better algorithm?
# - simplify (cos(pi*b)*besselj(b,z) - besselj(-b,z))/sin(pi*b) ...
# - use contiguity relations?
def replacer(fro, to, factors):
factors = set(factors)
def repl(nu, z):
if factors.intersection(Mul.make_args(z)):
return to(nu, z)
return fro(nu, z)
return repl
def torewrite(fro, to):
def tofunc(nu, z):
return fro(nu, z).rewrite(to)
return tofunc
def tominus(fro):
def tofunc(nu, z):
return exp(I*pi*nu)*fro(nu, exp_polar(-I*pi)*z)
return tofunc
orig_expr = expr
ifactors = [I, exp_polar(I*pi/2), exp_polar(-I*pi/2)]
expr = expr.replace(
besselj, replacer(besselj,
torewrite(besselj, besseli), ifactors))
expr = expr.replace(
besseli, replacer(besseli,
torewrite(besseli, besselj), ifactors))
minusfactors = [-1, exp_polar(I*pi)]
expr = expr.replace(
besselj, replacer(besselj, tominus(besselj), minusfactors))
expr = expr.replace(
besseli, replacer(besseli, tominus(besseli), minusfactors))
z0 = Dummy('z')
def expander(fro):
def repl(nu, z):
if (nu % 1) == S.Half:
return simplify(trigsimp(unpolarify(
fro(nu, z0).rewrite(besselj).rewrite(jn).expand(
func=True)).subs(z0, z)))
elif nu.is_Integer and nu > 1:
return fro(nu, z).expand(func=True)
return fro(nu, z)
return repl
expr = expr.replace(besselj, expander(besselj))
expr = expr.replace(bessely, expander(bessely))
expr = expr.replace(besseli, expander(besseli))
expr = expr.replace(besselk, expander(besselk))
def _bessel_simp_recursion(expr):
def _use_recursion(bessel, expr):
while True:
bessels = expr.find(lambda x: isinstance(x, bessel))
try:
for ba in sorted(bessels, key=lambda x: re(x.args[0])):
a, x = ba.args
bap1 = bessel(a+1, x)
bap2 = bessel(a+2, x)
if expr.has(bap1) and expr.has(bap2):
expr = expr.subs(ba, 2*(a+1)/x*bap1 - bap2)
break
else:
return expr
except (ValueError, TypeError):
return expr
if expr.has(besselj):
expr = _use_recursion(besselj, expr)
if expr.has(bessely):
expr = _use_recursion(bessely, expr)
return expr
expr = _bessel_simp_recursion(expr)
if expr != orig_expr:
expr = expr.factor()
return expr
def nthroot(expr, n, max_len=4, prec=15):
"""
Compute a real nth-root of a sum of surds.
Parameters
==========
expr : sum of surds
n : integer
max_len : maximum number of surds passed as constants to ``nsimplify``
Algorithm
=========
First ``nsimplify`` is used to get a candidate root; if it is not a
root the minimal polynomial is computed; the answer is one of its
roots.
Examples
========
>>> from sympy.simplify.simplify import nthroot
>>> from sympy import sqrt
>>> nthroot(90 + 34*sqrt(7), 3)
sqrt(7) + 3
"""
expr = sympify(expr)
n = sympify(n)
p = expr**Rational(1, n)
if not n.is_integer:
return p
if not _is_sum_surds(expr):
return p
surds = []
coeff_muls = [x.as_coeff_Mul() for x in expr.args]
for x, y in coeff_muls:
if not x.is_rational:
return p
if y is S.One:
continue
if not (y.is_Pow and y.exp == S.Half and y.base.is_integer):
return p
surds.append(y)
surds.sort()
surds = surds[:max_len]
if expr < 0 and n % 2 == 1:
p = (-expr)**Rational(1, n)
a = nsimplify(p, constants=surds)
res = a if _mexpand(a**n) == _mexpand(-expr) else p
return -res
a = nsimplify(p, constants=surds)
if _mexpand(a) is not _mexpand(p) and _mexpand(a**n) == _mexpand(expr):
return _mexpand(a)
expr = _nthroot_solve(expr, n, prec)
if expr is None:
return p
return expr
def nsimplify(expr, constants=(), tolerance=None, full=False, rational=None,
rational_conversion='base10'):
"""
Find a simple representation for a number or, if there are free symbols or
if ``rational=True``, then replace Floats with their Rational equivalents. If
no change is made and rational is not False then Floats will at least be
converted to Rationals.
Explanation
===========
For numerical expressions, a simple formula that numerically matches the
given numerical expression is sought (and the input should be possible
to evalf to a precision of at least 30 digits).
Optionally, a list of (rationally independent) constants to
include in the formula may be given.
A lower tolerance may be set to find less exact matches. If no tolerance
is given then the least precise value will set the tolerance (e.g. Floats
default to 15 digits of precision, so would be tolerance=10**-15).
With ``full=True``, a more extensive search is performed
(this is useful to find simpler numbers when the tolerance
is set low).
When converting to rational, if rational_conversion='base10' (the default), then
convert floats to rationals using their base-10 (string) representation.
When rational_conversion='exact' it uses the exact, base-2 representation.
Examples
========
>>> from sympy import nsimplify, sqrt, GoldenRatio, exp, I, pi
>>> nsimplify(4/(1+sqrt(5)), [GoldenRatio])
-2 + 2*GoldenRatio
>>> nsimplify((1/(exp(3*pi*I/5)+1)))
1/2 - I*sqrt(sqrt(5)/10 + 1/4)
>>> nsimplify(I**I, [pi])
exp(-pi/2)
>>> nsimplify(pi, tolerance=0.01)
22/7
>>> nsimplify(0.333333333333333, rational=True, rational_conversion='exact')
6004799503160655/18014398509481984
>>> nsimplify(0.333333333333333, rational=True)
1/3
See Also
========
sympy.core.function.nfloat
"""
try:
return sympify(as_int(expr))
except (TypeError, ValueError):
pass
expr = sympify(expr).xreplace({
Float('inf'): S.Infinity,
Float('-inf'): S.NegativeInfinity,
})
if expr is S.Infinity or expr is S.NegativeInfinity:
return expr
if rational or expr.free_symbols:
return _real_to_rational(expr, tolerance, rational_conversion)
# SymPy's default tolerance for Rationals is 15; other numbers may have
# lower tolerances set, so use them to pick the largest tolerance if None
# was given
if tolerance is None:
tolerance = 10**-min([15] +
[mpmath.libmp.libmpf.prec_to_dps(n._prec)
for n in expr.atoms(Float)])
# XXX should prec be set independent of tolerance or should it be computed
# from tolerance?
prec = 30
bprec = int(prec*3.33)
constants_dict = {}
for constant in constants:
constant = sympify(constant)
v = constant.evalf(prec)
if not v.is_Float:
raise ValueError("constants must be real-valued")
constants_dict[str(constant)] = v._to_mpmath(bprec)
exprval = expr.evalf(prec, chop=True)
re, im = exprval.as_real_imag()
# safety check to make sure that this evaluated to a number
if not (re.is_Number and im.is_Number):
return expr
def nsimplify_real(x):
orig = mpmath.mp.dps
xv = x._to_mpmath(bprec)
try:
# We'll be happy with low precision if a simple fraction
if not (tolerance or full):
mpmath.mp.dps = 15
rat = mpmath.pslq([xv, 1])
if rat is not None:
return Rational(-int(rat[1]), int(rat[0]))
mpmath.mp.dps = prec
newexpr = mpmath.identify(xv, constants=constants_dict,
tol=tolerance, full=full)
if not newexpr:
raise ValueError
if full:
newexpr = newexpr[0]
expr = sympify(newexpr)
if x and not expr: # don't let x become 0
raise ValueError
if expr.is_finite is False and not xv in [mpmath.inf, mpmath.ninf]:
raise ValueError
return expr
finally:
# even though there are returns above, this is executed
# before leaving
mpmath.mp.dps = orig
try:
if re:
re = nsimplify_real(re)
if im:
im = nsimplify_real(im)
except ValueError:
if rational is None:
return _real_to_rational(expr, rational_conversion=rational_conversion)
return expr
rv = re + im*S.ImaginaryUnit
# if there was a change or rational is explicitly not wanted
# return the value, else return the Rational representation
if rv != expr or rational is False:
return rv
return _real_to_rational(expr, rational_conversion=rational_conversion)
def _real_to_rational(expr, tolerance=None, rational_conversion='base10'):
"""
Replace all reals in expr with rationals.
Examples
========
>>> from sympy.simplify.simplify import _real_to_rational
>>> from sympy.abc import x
>>> _real_to_rational(.76 + .1*x**.5)
sqrt(x)/10 + 19/25
If rational_conversion='base10', this uses the base-10 string. If
rational_conversion='exact', the exact, base-2 representation is used.
>>> _real_to_rational(0.333333333333333, rational_conversion='exact')
6004799503160655/18014398509481984
>>> _real_to_rational(0.333333333333333)
1/3
"""
expr = _sympify(expr)
inf = Float('inf')
p = expr
reps = {}
reduce_num = None
if tolerance is not None and tolerance < 1:
reduce_num = ceiling(1/tolerance)
for fl in p.atoms(Float):
key = fl
if reduce_num is not None:
r = Rational(fl).limit_denominator(reduce_num)
elif (tolerance is not None and tolerance >= 1 and
fl.is_Integer is False):
r = Rational(tolerance*round(fl/tolerance)
).limit_denominator(int(tolerance))
else:
if rational_conversion == 'exact':
r = Rational(fl)
reps[key] = r
continue
elif rational_conversion != 'base10':
raise ValueError("rational_conversion must be 'base10' or 'exact'")
r = nsimplify(fl, rational=False)
# e.g. log(3).n() -> log(3) instead of a Rational
if fl and not r:
r = Rational(fl)
elif not r.is_Rational:
if fl in (inf, -inf):
r = S.ComplexInfinity
elif fl < 0:
fl = -fl
d = Pow(10, int(mpmath.log(fl)/mpmath.log(10)))
r = -Rational(str(fl/d))*d
elif fl > 0:
d = Pow(10, int(mpmath.log(fl)/mpmath.log(10)))
r = Rational(str(fl/d))*d
else:
r = S.Zero
reps[key] = r
return p.subs(reps, simultaneous=True)
def clear_coefficients(expr, rhs=S.Zero):
"""Return `p, r` where `p` is the expression obtained when Rational
additive and multiplicative coefficients of `expr` have been stripped
away in a naive fashion (i.e. without simplification). The operations
needed to remove the coefficients will be applied to `rhs` and returned
as `r`.
Examples
========
>>> from sympy.simplify.simplify import clear_coefficients
>>> from sympy.abc import x, y
>>> from sympy import Dummy
>>> expr = 4*y*(6*x + 3)
>>> clear_coefficients(expr - 2)
(y*(2*x + 1), 1/6)
When solving 2 or more expressions like `expr = a`,
`expr = b`, etc..., it is advantageous to provide a Dummy symbol
for `rhs` and simply replace it with `a`, `b`, etc... in `r`.
>>> rhs = Dummy('rhs')
>>> clear_coefficients(expr, rhs)
(y*(2*x + 1), _rhs/12)
>>> _[1].subs(rhs, 2)
1/6
"""
was = None
free = expr.free_symbols
if expr.is_Rational:
return (S.Zero, rhs - expr)
while expr and was != expr:
was = expr
m, expr = (
expr.as_content_primitive()
if free else
factor_terms(expr).as_coeff_Mul(rational=True))
rhs /= m
c, expr = expr.as_coeff_Add(rational=True)
rhs -= c
expr = signsimp(expr, evaluate = False)
if expr.could_extract_minus_sign():
expr = -expr
rhs = -rhs
return expr, rhs
def nc_simplify(expr, deep=True):
'''
Simplify a non-commutative expression composed of multiplication
and raising to a power by grouping repeated subterms into one power.
Priority is given to simplifications that give the fewest number
of arguments in the end (for example, in a*b*a*b*c*a*b*c simplifying
to (a*b)**2*c*a*b*c gives 5 arguments while a*b*(a*b*c)**2 has 3).
If ``expr`` is a sum of such terms, the sum of the simplified terms
is returned.
Keyword argument ``deep`` controls whether or not subexpressions
nested deeper inside the main expression are simplified. See examples
below. Setting `deep` to `False` can save time on nested expressions
that do not need simplifying on all levels.
Examples
========
>>> from sympy import symbols
>>> from sympy.simplify.simplify import nc_simplify
>>> a, b, c = symbols("a b c", commutative=False)
>>> nc_simplify(a*b*a*b*c*a*b*c)
a*b*(a*b*c)**2
>>> expr = a**2*b*a**4*b*a**4
>>> nc_simplify(expr)
a**2*(b*a**4)**2
>>> nc_simplify(a*b*a*b*c**2*(a*b)**2*c**2)
((a*b)**2*c**2)**2
>>> nc_simplify(a*b*a*b + 2*a*c*a**2*c*a**2*c*a)
(a*b)**2 + 2*(a*c*a)**3
>>> nc_simplify(b**-1*a**-1*(a*b)**2)
a*b
>>> nc_simplify(a**-1*b**-1*c*a)
(b*a)**(-1)*c*a
>>> expr = (a*b*a*b)**2*a*c*a*c
>>> nc_simplify(expr)
(a*b)**4*(a*c)**2
>>> nc_simplify(expr, deep=False)
(a*b*a*b)**2*(a*c)**2
'''
from sympy.matrices.expressions import (MatrixExpr, MatAdd, MatMul,
MatPow, MatrixSymbol)
if isinstance(expr, MatrixExpr):
expr = expr.doit(inv_expand=False)
_Add, _Mul, _Pow, _Symbol = MatAdd, MatMul, MatPow, MatrixSymbol
else:
_Add, _Mul, _Pow, _Symbol = Add, Mul, Pow, Symbol
# =========== Auxiliary functions ========================
def _overlaps(args):
# Calculate a list of lists m such that m[i][j] contains the lengths
# of all possible overlaps between args[:i+1] and args[i+1+j:].
# An overlap is a suffix of the prefix that matches a prefix
# of the suffix.
# For example, let expr=c*a*b*a*b*a*b*a*b. Then m[3][0] contains
# the lengths of overlaps of c*a*b*a*b with a*b*a*b. The overlaps
# are a*b*a*b, a*b and the empty word so that m[3][0]=[4,2,0].
# All overlaps rather than only the longest one are recorded
# because this information helps calculate other overlap lengths.
m = [[([1, 0] if a == args[0] else [0]) for a in args[1:]]]
for i in range(1, len(args)):
overlaps = []
j = 0
for j in range(len(args) - i - 1):
overlap = []
for v in m[i-1][j+1]:
if j + i + 1 + v < len(args) and args[i] == args[j+i+1+v]:
overlap.append(v + 1)
overlap += [0]
overlaps.append(overlap)
m.append(overlaps)
return m
def _reduce_inverses(_args):
# replace consecutive negative powers by an inverse
# of a product of positive powers, e.g. a**-1*b**-1*c
# will simplify to (a*b)**-1*c;
# return that new args list and the number of negative
# powers in it (inv_tot)
inv_tot = 0 # total number of inverses
inverses = []
args = []
for arg in _args:
if isinstance(arg, _Pow) and arg.args[1] < 0:
inverses = [arg**-1] + inverses
inv_tot += 1
else:
if len(inverses) == 1:
args.append(inverses[0]**-1)
elif len(inverses) > 1:
args.append(_Pow(_Mul(*inverses), -1))
inv_tot -= len(inverses) - 1
inverses = []
args.append(arg)
if inverses:
args.append(_Pow(_Mul(*inverses), -1))
inv_tot -= len(inverses) - 1
return inv_tot, tuple(args)
def get_score(s):
# compute the number of arguments of s
# (including in nested expressions) overall
# but ignore exponents
if isinstance(s, _Pow):
return get_score(s.args[0])
elif isinstance(s, (_Add, _Mul)):
return sum([get_score(a) for a in s.args])
return 1
def compare(s, alt_s):
# compare two possible simplifications and return a
# "better" one
if s != alt_s and get_score(alt_s) < get_score(s):
return alt_s
return s
# ========================================================
if not isinstance(expr, (_Add, _Mul, _Pow)) or expr.is_commutative:
return expr
args = expr.args[:]
if isinstance(expr, _Pow):
if deep:
return _Pow(nc_simplify(args[0]), args[1]).doit()
else:
return expr
elif isinstance(expr, _Add):
return _Add(*[nc_simplify(a, deep=deep) for a in args]).doit()
else:
# get the non-commutative part
c_args, args = expr.args_cnc()
com_coeff = Mul(*c_args)
if com_coeff != 1:
return com_coeff*nc_simplify(expr/com_coeff, deep=deep)
inv_tot, args = _reduce_inverses(args)
# if most arguments are negative, work with the inverse
# of the expression, e.g. a**-1*b*a**-1*c**-1 will become
# (c*a*b**-1*a)**-1 at the end so can work with c*a*b**-1*a
invert = False
if inv_tot > len(args)/2:
invert = True
args = [a**-1 for a in args[::-1]]
if deep:
args = tuple(nc_simplify(a) for a in args)
m = _overlaps(args)
# simps will be {subterm: end} where `end` is the ending
# index of a sequence of repetitions of subterm;
# this is for not wasting time with subterms that are part
# of longer, already considered sequences
simps = {}
post = 1
pre = 1
# the simplification coefficient is the number of
# arguments by which contracting a given sequence
# would reduce the word; e.g. in a*b*a*b*c*a*b*c,
# contracting a*b*a*b to (a*b)**2 removes 3 arguments
# while a*b*c*a*b*c to (a*b*c)**2 removes 6. It's
# better to contract the latter so simplification
# with a maximum simplification coefficient will be chosen
max_simp_coeff = 0
simp = None # information about future simplification
for i in range(1, len(args)):
simp_coeff = 0
l = 0 # length of a subterm
p = 0 # the power of a subterm
if i < len(args) - 1:
rep = m[i][0]
start = i # starting index of the repeated sequence
end = i+1 # ending index of the repeated sequence
if i == len(args)-1 or rep == [0]:
# no subterm is repeated at this stage, at least as
# far as the arguments are concerned - there may be
# a repetition if powers are taken into account
if (isinstance(args[i], _Pow) and
not isinstance(args[i].args[0], _Symbol)):
subterm = args[i].args[0].args
l = len(subterm)
if args[i-l:i] == subterm:
# e.g. a*b in a*b*(a*b)**2 is not repeated
# in args (= [a, b, (a*b)**2]) but it
# can be matched here
p += 1
start -= l
if args[i+1:i+1+l] == subterm:
# e.g. a*b in (a*b)**2*a*b
p += 1
end += l
if p:
p += args[i].args[1]
else:
continue
else:
l = rep[0] # length of the longest repeated subterm at this point
start -= l - 1
subterm = args[start:end]
p = 2
end += l
if subterm in simps and simps[subterm] >= start:
# the subterm is part of a sequence that
# has already been considered
continue
# count how many times it's repeated
while end < len(args):
if l in m[end-1][0]:
p += 1
end += l
elif isinstance(args[end], _Pow) and args[end].args[0].args == subterm:
# for cases like a*b*a*b*(a*b)**2*a*b
p += args[end].args[1]
end += 1
else:
break
# see if another match can be made, e.g.
# for b*a**2 in b*a**2*b*a**3 or a*b in
# a**2*b*a*b
pre_exp = 0
pre_arg = 1
if start - l >= 0 and args[start-l+1:start] == subterm[1:]:
if isinstance(subterm[0], _Pow):
pre_arg = subterm[0].args[0]
exp = subterm[0].args[1]
else:
pre_arg = subterm[0]
exp = 1
if isinstance(args[start-l], _Pow) and args[start-l].args[0] == pre_arg:
pre_exp = args[start-l].args[1] - exp
start -= l
p += 1
elif args[start-l] == pre_arg:
pre_exp = 1 - exp
start -= l
p += 1
post_exp = 0
post_arg = 1
if end + l - 1 < len(args) and args[end:end+l-1] == subterm[:-1]:
if isinstance(subterm[-1], _Pow):
post_arg = subterm[-1].args[0]
exp = subterm[-1].args[1]
else:
post_arg = subterm[-1]
exp = 1
if isinstance(args[end+l-1], _Pow) and args[end+l-1].args[0] == post_arg:
post_exp = args[end+l-1].args[1] - exp
end += l
p += 1
elif args[end+l-1] == post_arg:
post_exp = 1 - exp
end += l
p += 1
# Consider a*b*a**2*b*a**2*b*a:
# b*a**2 is explicitly repeated, but note
# that in this case a*b*a is also repeated
# so there are two possible simplifications:
# a*(b*a**2)**3*a**-1 or (a*b*a)**3
# The latter is obviously simpler.
# But in a*b*a**2*b**2*a**2 the simplifications are
# a*(b*a**2)**2 and (a*b*a)**3*a in which case
# it's better to stick with the shorter subterm
if post_exp and exp % 2 == 0 and start > 0:
exp = exp/2
_pre_exp = 1
_post_exp = 1
if isinstance(args[start-1], _Pow) and args[start-1].args[0] == post_arg:
_post_exp = post_exp + exp
_pre_exp = args[start-1].args[1] - exp
elif args[start-1] == post_arg:
_post_exp = post_exp + exp
_pre_exp = 1 - exp
if _pre_exp == 0 or _post_exp == 0:
if not pre_exp:
start -= 1
post_exp = _post_exp
pre_exp = _pre_exp
pre_arg = post_arg
subterm = (post_arg**exp,) + subterm[:-1] + (post_arg**exp,)
simp_coeff += end-start
if post_exp:
simp_coeff -= 1
if pre_exp:
simp_coeff -= 1
simps[subterm] = end
if simp_coeff > max_simp_coeff:
max_simp_coeff = simp_coeff
simp = (start, _Mul(*subterm), p, end, l)
pre = pre_arg**pre_exp
post = post_arg**post_exp
if simp:
subterm = _Pow(nc_simplify(simp[1], deep=deep), simp[2])
pre = nc_simplify(_Mul(*args[:simp[0]])*pre, deep=deep)
post = post*nc_simplify(_Mul(*args[simp[3]:]), deep=deep)
simp = pre*subterm*post
if pre != 1 or post != 1:
# new simplifications may be possible but no need
# to recurse over arguments
simp = nc_simplify(simp, deep=False)
else:
simp = _Mul(*args)
if invert:
simp = _Pow(simp, -1)
# see if factor_nc(expr) is simplified better
if not isinstance(expr, MatrixExpr):
f_expr = factor_nc(expr)
if f_expr != expr:
alt_simp = nc_simplify(f_expr, deep=deep)
simp = compare(simp, alt_simp)
else:
simp = simp.doit(inv_expand=False)
return simp
def dotprodsimp(expr, withsimp=False):
"""Simplification for a sum of products targeted at the kind of blowup that
occurs during summation of products. Intended to reduce expression blowup
during matrix multiplication or other similar operations. Only works with
algebraic expressions and does not recurse into non.
Parameters
==========
withsimp : bool, optional
Specifies whether a flag should be returned along with the expression
to indicate roughly whether simplification was successful. It is used
in ``MatrixArithmetic._eval_pow_by_recursion`` to avoid attempting to
simplify an expression repetitively which does not simplify.
"""
def count_ops_alg(expr):
"""Optimized count algebraic operations with no recursion into
non-algebraic args that ``core.function.count_ops`` does. Also returns
whether rational functions may be present according to negative
exponents of powers or non-number fractions.
Returns
=======
ops, ratfunc : int, bool
``ops`` is the number of algebraic operations starting at the top
level expression (not recursing into non-alg children). ``ratfunc``
specifies whether the expression MAY contain rational functions
which ``cancel`` MIGHT optimize.
"""
ops = 0
args = [expr]
ratfunc = False
while args:
a = args.pop()
if not isinstance(a, Basic):
continue
if a.is_Rational:
if a is not S.One: # -1/3 = NEG + DIV
ops += bool (a.p < 0) + bool (a.q != 1)
elif a.is_Mul:
if a.could_extract_minus_sign():
ops += 1
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops += 1 + bool (n < 0)
args.append(d) # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ratfunc=True
ops += 1
args.append(n) # could be -Mul
else:
ops += len(a.args) - 1
args.extend(a.args)
elif a.is_Add:
laargs = len(a.args)
negs = 0
for ai in a.args:
if ai.could_extract_minus_sign():
negs += 1
ai = -ai
args.append(ai)
ops += laargs - (negs != laargs) # -x - y = NEG + SUB
elif a.is_Pow:
ops += 1
args.append(a.base)
if not ratfunc:
ratfunc = a.exp.is_negative is not False
return ops, ratfunc
def nonalg_subs_dummies(expr, dummies):
"""Substitute dummy variables for non-algebraic expressions to avoid
evaluation of non-algebraic terms that ``polys.polytools.cancel`` does.
"""
if not expr.args:
return expr
if expr.is_Add or expr.is_Mul or expr.is_Pow:
args = None
for i, a in enumerate(expr.args):
c = nonalg_subs_dummies(a, dummies)
if c is a:
continue
if args is None:
args = list(expr.args)
args[i] = c
if args is None:
return expr
return expr.func(*args)
return dummies.setdefault(expr, Dummy())
simplified = False # doesn't really mean simplified, rather "can simplify again"
if isinstance(expr, Basic) and (expr.is_Add or expr.is_Mul or expr.is_Pow):
expr2 = expr.expand(deep=True, modulus=None, power_base=False,
power_exp=False, mul=True, log=False, multinomial=True, basic=False)
if expr2 != expr:
expr = expr2
simplified = True
exprops, ratfunc = count_ops_alg(expr)
if exprops >= 6: # empirically tested cutoff for expensive simplification
if ratfunc:
dummies = {}
expr2 = nonalg_subs_dummies(expr, dummies)
if expr2 is expr or count_ops_alg(expr2)[0] >= 6: # check again after substitution
expr3 = cancel(expr2)
if expr3 != expr2:
expr = expr3.subs([(d, e) for e, d in dummies.items()])
simplified = True
# very special case: x/(x-1) - 1/(x-1) -> 1
elif (exprops == 5 and expr.is_Add and expr.args [0].is_Mul and
expr.args [1].is_Mul and expr.args [0].args [-1].is_Pow and
expr.args [1].args [-1].is_Pow and
expr.args [0].args [-1].exp is S.NegativeOne and
expr.args [1].args [-1].exp is S.NegativeOne):
expr2 = together (expr)
expr2ops = count_ops_alg(expr2)[0]
if expr2ops < exprops:
expr = expr2
simplified = True
else:
simplified = True
return (expr, simplified) if withsimp else expr
bottom_up = deprecated(
useinstead="sympy.core.traversal.bottom_up",
deprecated_since_version="1.10", issue=22288)(_bottom_up)
walk = deprecated(
useinstead="sympy.core.traversal.walk",
deprecated_since_version="1.10", issue=22288)(_walk)
|
307d6f9301ead09e6d366eecb3c57abfe9a3ac31945f07ceb1aae22307e2dfc7 | """The module helps converting SymPy expressions into shorter forms of them.
for example:
the expression E**(pi*I) will be converted into -1
the expression (x+x)**2 will be converted into 4*x**2
"""
from .simplify import (simplify, hypersimp, hypersimilar,
logcombine, separatevars, posify, besselsimp, kroneckersimp,
signsimp, nsimplify)
from .fu import FU, fu
from .sqrtdenest import sqrtdenest
from .cse_main import cse
from .epathtools import epath, EPath
from .hyperexpand import hyperexpand
from .radsimp import collect, rcollect, radsimp, collect_const, fraction, numer, denom
from .trigsimp import trigsimp, exptrigsimp
from .powsimp import powsimp, powdenest
from .combsimp import combsimp
from .gammasimp import gammasimp
from .ratsimp import ratsimp, ratsimpmodprime
__all__ = [
'simplify', 'hypersimp', 'hypersimilar', 'logcombine', 'separatevars',
'posify', 'besselsimp', 'kroneckersimp', 'signsimp',
'nsimplify',
'FU', 'fu',
'sqrtdenest',
'cse',
'epath', 'EPath',
'hyperexpand',
'collect', 'rcollect', 'radsimp', 'collect_const', 'fraction', 'numer',
'denom',
'trigsimp', 'exptrigsimp',
'powsimp', 'powdenest',
'combsimp',
'gammasimp',
'ratsimp', 'ratsimpmodprime',
]
|
33e8c6eb66183f7c57004b605d9117c38baf037342d1ea9ab1716c1c04b9902e | from sympy.core.traversal import use as _use
from sympy.utilities.decorator import deprecated
use = deprecated(
useinstead="sympy.core.traversal.use",
deprecated_since_version="1.10", issue=22288)(_use)
|
bd5d3a2d04acfe895ef267983ce93a2c6acabb1df28b012b34ae1ce26c22ca93 | from collections import defaultdict
from sympy import SYMPY_DEBUG
from sympy.core import sympify, S, Mul, Derivative, Pow
from sympy.core.add import _unevaluated_Add, Add
from sympy.core.assumptions import assumptions
from sympy.core.exprtools import Factors, gcd_terms
from sympy.core.function import _mexpand, expand_mul, expand_power_base
from sympy.core.mul import _keep_coeff, _unevaluated_Mul, _mulsort
from sympy.core.numbers import Rational, zoo, nan
from sympy.core.parameters import global_parameters
from sympy.core.sorting import ordered, default_sort_key
from sympy.core.symbol import Dummy, Wild, symbols
from sympy.functions import exp, sqrt, log
from sympy.functions.elementary.complexes import Abs
from sympy.polys import gcd
from sympy.simplify.sqrtdenest import sqrtdenest
from sympy.utilities.iterables import iterable, sift
def collect(expr, syms, func=None, evaluate=None, exact=False, distribute_order_term=True):
"""
Collect additive terms of an expression.
Explanation
===========
This function collects additive terms of an expression with respect
to a list of expression up to powers with rational exponents. By the
term symbol here are meant arbitrary expressions, which can contain
powers, products, sums etc. In other words symbol is a pattern which
will be searched for in the expression's terms.
The input expression is not expanded by :func:`collect`, so user is
expected to provide an expression in an appropriate form. This makes
:func:`collect` more predictable as there is no magic happening behind the
scenes. However, it is important to note, that powers of products are
converted to products of powers using the :func:`~.expand_power_base`
function.
There are two possible types of output. First, if ``evaluate`` flag is
set, this function will return an expression with collected terms or
else it will return a dictionary with expressions up to rational powers
as keys and collected coefficients as values.
Examples
========
>>> from sympy import S, collect, expand, factor, Wild
>>> from sympy.abc import a, b, c, x, y
This function can collect symbolic coefficients in polynomials or
rational expressions. It will manage to find all integer or rational
powers of collection variable::
>>> collect(a*x**2 + b*x**2 + a*x - b*x + c, x)
c + x**2*(a + b) + x*(a - b)
The same result can be achieved in dictionary form::
>>> d = collect(a*x**2 + b*x**2 + a*x - b*x + c, x, evaluate=False)
>>> d[x**2]
a + b
>>> d[x]
a - b
>>> d[S.One]
c
You can also work with multivariate polynomials. However, remember that
this function is greedy so it will care only about a single symbol at time,
in specification order::
>>> collect(x**2 + y*x**2 + x*y + y + a*y, [x, y])
x**2*(y + 1) + x*y + y*(a + 1)
Also more complicated expressions can be used as patterns::
>>> from sympy import sin, log
>>> collect(a*sin(2*x) + b*sin(2*x), sin(2*x))
(a + b)*sin(2*x)
>>> collect(a*x*log(x) + b*(x*log(x)), x*log(x))
x*(a + b)*log(x)
You can use wildcards in the pattern::
>>> w = Wild('w1')
>>> collect(a*x**y - b*x**y, w**y)
x**y*(a - b)
It is also possible to work with symbolic powers, although it has more
complicated behavior, because in this case power's base and symbolic part
of the exponent are treated as a single symbol::
>>> collect(a*x**c + b*x**c, x)
a*x**c + b*x**c
>>> collect(a*x**c + b*x**c, x**c)
x**c*(a + b)
However if you incorporate rationals to the exponents, then you will get
well known behavior::
>>> collect(a*x**(2*c) + b*x**(2*c), x**c)
x**(2*c)*(a + b)
Note also that all previously stated facts about :func:`collect` function
apply to the exponential function, so you can get::
>>> from sympy import exp
>>> collect(a*exp(2*x) + b*exp(2*x), exp(x))
(a + b)*exp(2*x)
If you are interested only in collecting specific powers of some symbols
then set ``exact`` flag in arguments::
>>> collect(a*x**7 + b*x**7, x, exact=True)
a*x**7 + b*x**7
>>> collect(a*x**7 + b*x**7, x**7, exact=True)
x**7*(a + b)
You can also apply this function to differential equations, where
derivatives of arbitrary order can be collected. Note that if you
collect with respect to a function or a derivative of a function, all
derivatives of that function will also be collected. Use
``exact=True`` to prevent this from happening::
>>> from sympy import Derivative as D, collect, Function
>>> f = Function('f') (x)
>>> collect(a*D(f,x) + b*D(f,x), D(f,x))
(a + b)*Derivative(f(x), x)
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), f)
(a + b)*Derivative(f(x), (x, 2))
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), D(f,x), exact=True)
a*Derivative(f(x), (x, 2)) + b*Derivative(f(x), (x, 2))
>>> collect(a*D(f,x) + b*D(f,x) + a*f + b*f, f)
(a + b)*f(x) + (a + b)*Derivative(f(x), x)
Or you can even match both derivative order and exponent at the same time::
>>> collect(a*D(D(f,x),x)**2 + b*D(D(f,x),x)**2, D(f,x))
(a + b)*Derivative(f(x), (x, 2))**2
Finally, you can apply a function to each of the collected coefficients.
For example you can factorize symbolic coefficients of polynomial::
>>> f = expand((x + a + 1)**3)
>>> collect(f, x, factor)
x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + (a + 1)**3
.. note:: Arguments are expected to be in expanded form, so you might have
to call :func:`~.expand` prior to calling this function.
See Also
========
collect_const, collect_sqrt, rcollect
"""
expr = sympify(expr)
syms = [sympify(i) for i in (syms if iterable(syms) else [syms])]
# replace syms[i] if it is not x, -x or has Wild symbols
cond = lambda x: x.is_Symbol or (-x).is_Symbol or bool(
x.atoms(Wild))
_, nonsyms = sift(syms, cond, binary=True)
if nonsyms:
reps = dict(zip(nonsyms, [Dummy(**assumptions(i)) for i in nonsyms]))
syms = [reps.get(s, s) for s in syms]
rv = collect(expr.subs(reps), syms,
func=func, evaluate=evaluate, exact=exact,
distribute_order_term=distribute_order_term)
urep = {v: k for k, v in reps.items()}
if not isinstance(rv, dict):
return rv.xreplace(urep)
else:
return {urep.get(k, k).xreplace(urep): v.xreplace(urep)
for k, v in rv.items()}
if evaluate is None:
evaluate = global_parameters.evaluate
def make_expression(terms):
product = []
for term, rat, sym, deriv in terms:
if deriv is not None:
var, order = deriv
while order > 0:
term, order = Derivative(term, var), order - 1
if sym is None:
if rat is S.One:
product.append(term)
else:
product.append(Pow(term, rat))
else:
product.append(Pow(term, rat*sym))
return Mul(*product)
def parse_derivative(deriv):
# scan derivatives tower in the input expression and return
# underlying function and maximal differentiation order
expr, sym, order = deriv.expr, deriv.variables[0], 1
for s in deriv.variables[1:]:
if s == sym:
order += 1
else:
raise NotImplementedError(
'Improve MV Derivative support in collect')
while isinstance(expr, Derivative):
s0 = expr.variables[0]
for s in expr.variables:
if s != s0:
raise NotImplementedError(
'Improve MV Derivative support in collect')
if s0 == sym:
expr, order = expr.expr, order + len(expr.variables)
else:
break
return expr, (sym, Rational(order))
def parse_term(expr):
"""Parses expression expr and outputs tuple (sexpr, rat_expo,
sym_expo, deriv)
where:
- sexpr is the base expression
- rat_expo is the rational exponent that sexpr is raised to
- sym_expo is the symbolic exponent that sexpr is raised to
- deriv contains the derivatives of the expression
For example, the output of x would be (x, 1, None, None)
the output of 2**x would be (2, 1, x, None).
"""
rat_expo, sym_expo = S.One, None
sexpr, deriv = expr, None
if expr.is_Pow:
if isinstance(expr.base, Derivative):
sexpr, deriv = parse_derivative(expr.base)
else:
sexpr = expr.base
if expr.base == S.Exp1:
arg = expr.exp
if arg.is_Rational:
sexpr, rat_expo = S.Exp1, arg
elif arg.is_Mul:
coeff, tail = arg.as_coeff_Mul(rational=True)
sexpr, rat_expo = exp(tail), coeff
elif expr.exp.is_Number:
rat_expo = expr.exp
else:
coeff, tail = expr.exp.as_coeff_Mul()
if coeff.is_Number:
rat_expo, sym_expo = coeff, tail
else:
sym_expo = expr.exp
elif isinstance(expr, exp):
arg = expr.exp
if arg.is_Rational:
sexpr, rat_expo = S.Exp1, arg
elif arg.is_Mul:
coeff, tail = arg.as_coeff_Mul(rational=True)
sexpr, rat_expo = exp(tail), coeff
elif isinstance(expr, Derivative):
sexpr, deriv = parse_derivative(expr)
return sexpr, rat_expo, sym_expo, deriv
def parse_expression(terms, pattern):
"""Parse terms searching for a pattern.
Terms is a list of tuples as returned by parse_terms;
Pattern is an expression treated as a product of factors.
"""
pattern = Mul.make_args(pattern)
if len(terms) < len(pattern):
# pattern is longer than matched product
# so no chance for positive parsing result
return None
else:
pattern = [parse_term(elem) for elem in pattern]
terms = terms[:] # need a copy
elems, common_expo, has_deriv = [], None, False
for elem, e_rat, e_sym, e_ord in pattern:
if elem.is_Number and e_rat == 1 and e_sym is None:
# a constant is a match for everything
continue
for j in range(len(terms)):
if terms[j] is None:
continue
term, t_rat, t_sym, t_ord = terms[j]
# keeping track of whether one of the terms had
# a derivative or not as this will require rebuilding
# the expression later
if t_ord is not None:
has_deriv = True
if (term.match(elem) is not None and
(t_sym == e_sym or t_sym is not None and
e_sym is not None and
t_sym.match(e_sym) is not None)):
if exact is False:
# we don't have to be exact so find common exponent
# for both expression's term and pattern's element
expo = t_rat / e_rat
if common_expo is None:
# first time
common_expo = expo
else:
# common exponent was negotiated before so
# there is no chance for a pattern match unless
# common and current exponents are equal
if common_expo != expo:
common_expo = 1
else:
# we ought to be exact so all fields of
# interest must match in every details
if e_rat != t_rat or e_ord != t_ord:
continue
# found common term so remove it from the expression
# and try to match next element in the pattern
elems.append(terms[j])
terms[j] = None
break
else:
# pattern element not found
return None
return [_f for _f in terms if _f], elems, common_expo, has_deriv
if evaluate:
if expr.is_Add:
o = expr.getO() or 0
expr = expr.func(*[
collect(a, syms, func, True, exact, distribute_order_term)
for a in expr.args if a != o]) + o
elif expr.is_Mul:
return expr.func(*[
collect(term, syms, func, True, exact, distribute_order_term)
for term in expr.args])
elif expr.is_Pow:
b = collect(
expr.base, syms, func, True, exact, distribute_order_term)
return Pow(b, expr.exp)
syms = [expand_power_base(i, deep=False) for i in syms]
order_term = None
if distribute_order_term:
order_term = expr.getO()
if order_term is not None:
if order_term.has(*syms):
order_term = None
else:
expr = expr.removeO()
summa = [expand_power_base(i, deep=False) for i in Add.make_args(expr)]
collected, disliked = defaultdict(list), S.Zero
for product in summa:
c, nc = product.args_cnc(split_1=False)
args = list(ordered(c)) + nc
terms = [parse_term(i) for i in args]
small_first = True
for symbol in syms:
if SYMPY_DEBUG:
print("DEBUG: parsing of expression %s with symbol %s " % (
str(terms), str(symbol))
)
if isinstance(symbol, Derivative) and small_first:
terms = list(reversed(terms))
small_first = not small_first
result = parse_expression(terms, symbol)
if SYMPY_DEBUG:
print("DEBUG: returned %s" % str(result))
if result is not None:
if not symbol.is_commutative:
raise AttributeError("Can not collect noncommutative symbol")
terms, elems, common_expo, has_deriv = result
# when there was derivative in current pattern we
# will need to rebuild its expression from scratch
if not has_deriv:
margs = []
for elem in elems:
if elem[2] is None:
e = elem[1]
else:
e = elem[1]*elem[2]
margs.append(Pow(elem[0], e))
index = Mul(*margs)
else:
index = make_expression(elems)
terms = expand_power_base(make_expression(terms), deep=False)
index = expand_power_base(index, deep=False)
collected[index].append(terms)
break
else:
# none of the patterns matched
disliked += product
# add terms now for each key
collected = {k: Add(*v) for k, v in collected.items()}
if disliked is not S.Zero:
collected[S.One] = disliked
if order_term is not None:
for key, val in collected.items():
collected[key] = val + order_term
if func is not None:
collected = {
key: func(val) for key, val in collected.items()}
if evaluate:
return Add(*[key*val for key, val in collected.items()])
else:
return collected
def rcollect(expr, *vars):
"""
Recursively collect sums in an expression.
Examples
========
>>> from sympy.simplify import rcollect
>>> from sympy.abc import x, y
>>> expr = (x**2*y + x*y + x + y)/(x + y)
>>> rcollect(expr, y)
(x + y*(x**2 + x + 1))/(x + y)
See Also
========
collect, collect_const, collect_sqrt
"""
if expr.is_Atom or not expr.has(*vars):
return expr
else:
expr = expr.__class__(*[rcollect(arg, *vars) for arg in expr.args])
if expr.is_Add:
return collect(expr, vars)
else:
return expr
def collect_sqrt(expr, evaluate=None):
"""Return expr with terms having common square roots collected together.
If ``evaluate`` is False a count indicating the number of sqrt-containing
terms will be returned and, if non-zero, the terms of the Add will be
returned, else the expression itself will be returned as a single term.
If ``evaluate`` is True, the expression with any collected terms will be
returned.
Note: since I = sqrt(-1), it is collected, too.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b
>>> r2, r3, r5 = [sqrt(i) for i in [2, 3, 5]]
>>> collect_sqrt(a*r2 + b*r2)
sqrt(2)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r3)
sqrt(2)*(a + b) + sqrt(3)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5)
sqrt(3)*a + sqrt(5)*b + sqrt(2)*(a + b)
If evaluate is False then the arguments will be sorted and
returned as a list and a count of the number of sqrt-containing
terms will be returned:
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5, evaluate=False)
((sqrt(3)*a, sqrt(5)*b, sqrt(2)*(a + b)), 3)
>>> collect_sqrt(a*sqrt(2) + b, evaluate=False)
((b, sqrt(2)*a), 1)
>>> collect_sqrt(a + b, evaluate=False)
((a + b,), 0)
See Also
========
collect, collect_const, rcollect
"""
if evaluate is None:
evaluate = global_parameters.evaluate
# this step will help to standardize any complex arguments
# of sqrts
coeff, expr = expr.as_content_primitive()
vars = set()
for a in Add.make_args(expr):
for m in a.args_cnc()[0]:
if m.is_number and (
m.is_Pow and m.exp.is_Rational and m.exp.q == 2 or
m is S.ImaginaryUnit):
vars.add(m)
# we only want radicals, so exclude Number handling; in this case
# d will be evaluated
d = collect_const(expr, *vars, Numbers=False)
hit = expr != d
if not evaluate:
nrad = 0
# make the evaluated args canonical
args = list(ordered(Add.make_args(d)))
for i, m in enumerate(args):
c, nc = m.args_cnc()
for ci in c:
# XXX should this be restricted to ci.is_number as above?
if ci.is_Pow and ci.exp.is_Rational and ci.exp.q == 2 or \
ci is S.ImaginaryUnit:
nrad += 1
break
args[i] *= coeff
if not (hit or nrad):
args = [Add(*args)]
return tuple(args), nrad
return coeff*d
def collect_abs(expr):
"""Return ``expr`` with arguments of multiple Abs in a term collected
under a single instance.
Examples
========
>>> from sympy.simplify.radsimp import collect_abs
>>> from sympy.abc import x
>>> collect_abs(abs(x + 1)/abs(x**2 - 1))
Abs((x + 1)/(x**2 - 1))
>>> collect_abs(abs(1/x))
Abs(1/x)
"""
def _abs(mul):
c, nc = mul.args_cnc()
a = []
o = []
for i in c:
if isinstance(i, Abs):
a.append(i.args[0])
elif isinstance(i, Pow) and isinstance(i.base, Abs) and i.exp.is_real:
a.append(i.base.args[0]**i.exp)
else:
o.append(i)
if len(a) < 2 and not any(i.exp.is_negative for i in a if isinstance(i, Pow)):
return mul
absarg = Mul(*a)
A = Abs(absarg)
args = [A]
args.extend(o)
if not A.has(Abs):
args.extend(nc)
return Mul(*args)
if not isinstance(A, Abs):
# reevaluate and make it unevaluated
A = Abs(absarg, evaluate=False)
args[0] = A
_mulsort(args)
args.extend(nc) # nc always go last
return Mul._from_args(args, is_commutative=not nc)
return expr.replace(
lambda x: isinstance(x, Mul),
lambda x: _abs(x)).replace(
lambda x: isinstance(x, Pow),
lambda x: _abs(x))
def collect_const(expr, *vars, Numbers=True):
"""A non-greedy collection of terms with similar number coefficients in
an Add expr. If ``vars`` is given then only those constants will be
targeted. Although any Number can also be targeted, if this is not
desired set ``Numbers=False`` and no Float or Rational will be collected.
Parameters
==========
expr : SymPy expression
This parameter defines the expression the expression from which
terms with similar coefficients are to be collected. A non-Add
expression is returned as it is.
vars : variable length collection of Numbers, optional
Specifies the constants to target for collection. Can be multiple in
number.
Numbers : bool
Specifies to target all instance of
:class:`sympy.core.numbers.Number` class. If ``Numbers=False``, then
no Float or Rational will be collected.
Returns
=======
expr : Expr
Returns an expression with similar coefficient terms collected.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import s, x, y, z
>>> from sympy.simplify.radsimp import collect_const
>>> collect_const(sqrt(3) + sqrt(3)*(1 + sqrt(2)))
sqrt(3)*(sqrt(2) + 2)
>>> collect_const(sqrt(3)*s + sqrt(7)*s + sqrt(3) + sqrt(7))
(sqrt(3) + sqrt(7))*(s + 1)
>>> s = sqrt(2) + 2
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7))
(sqrt(2) + 3)*(sqrt(3) + sqrt(7))
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7), sqrt(3))
sqrt(7) + sqrt(3)*(sqrt(2) + 3) + sqrt(7)*(sqrt(2) + 2)
The collection is sign-sensitive, giving higher precedence to the
unsigned values:
>>> collect_const(x - y - z)
x - (y + z)
>>> collect_const(-y - z)
-(y + z)
>>> collect_const(2*x - 2*y - 2*z, 2)
2*(x - y - z)
>>> collect_const(2*x - 2*y - 2*z, -2)
2*x - 2*(y + z)
See Also
========
collect, collect_sqrt, rcollect
"""
if not expr.is_Add:
return expr
recurse = False
if not vars:
recurse = True
vars = set()
for a in expr.args:
for m in Mul.make_args(a):
if m.is_number:
vars.add(m)
else:
vars = sympify(vars)
if not Numbers:
vars = [v for v in vars if not v.is_Number]
vars = list(ordered(vars))
for v in vars:
terms = defaultdict(list)
Fv = Factors(v)
for m in Add.make_args(expr):
f = Factors(m)
q, r = f.div(Fv)
if r.is_one:
# only accept this as a true factor if
# it didn't change an exponent from an Integer
# to a non-Integer, e.g. 2/sqrt(2) -> sqrt(2)
# -- we aren't looking for this sort of change
fwas = f.factors.copy()
fnow = q.factors
if not any(k in fwas and fwas[k].is_Integer and not
fnow[k].is_Integer for k in fnow):
terms[v].append(q.as_expr())
continue
terms[S.One].append(m)
args = []
hit = False
uneval = False
for k in ordered(terms):
v = terms[k]
if k is S.One:
args.extend(v)
continue
if len(v) > 1:
v = Add(*v)
hit = True
if recurse and v != expr:
vars.append(v)
else:
v = v[0]
# be careful not to let uneval become True unless
# it must be because it's going to be more expensive
# to rebuild the expression as an unevaluated one
if Numbers and k.is_Number and v.is_Add:
args.append(_keep_coeff(k, v, sign=True))
uneval = True
else:
args.append(k*v)
if hit:
if uneval:
expr = _unevaluated_Add(*args)
else:
expr = Add(*args)
if not expr.is_Add:
break
return expr
def radsimp(expr, symbolic=True, max_terms=4):
r"""
Rationalize the denominator by removing square roots.
Explanation
===========
The expression returned from radsimp must be used with caution
since if the denominator contains symbols, it will be possible to make
substitutions that violate the assumptions of the simplification process:
that for a denominator matching a + b*sqrt(c), a != +/-b*sqrt(c). (If
there are no symbols, this assumptions is made valid by collecting terms
of sqrt(c) so the match variable ``a`` does not contain ``sqrt(c)``.) If
you do not want the simplification to occur for symbolic denominators, set
``symbolic`` to False.
If there are more than ``max_terms`` radical terms then the expression is
returned unchanged.
Examples
========
>>> from sympy import radsimp, sqrt, Symbol, pprint
>>> from sympy import factor_terms, fraction, signsimp
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b, c
>>> radsimp(1/(2 + sqrt(2)))
(2 - sqrt(2))/2
>>> x,y = map(Symbol, 'xy')
>>> e = ((2 + 2*sqrt(2))*x + (2 + sqrt(8))*y)/(2 + sqrt(2))
>>> radsimp(e)
sqrt(2)*(x + y)
No simplification beyond removal of the gcd is done. One might
want to polish the result a little, however, by collecting
square root terms:
>>> r2 = sqrt(2)
>>> r5 = sqrt(5)
>>> ans = radsimp(1/(y*r2 + x*r2 + a*r5 + b*r5)); pprint(ans)
___ ___ ___ ___
\/ 5 *a + \/ 5 *b - \/ 2 *x - \/ 2 *y
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
>>> n, d = fraction(ans)
>>> pprint(factor_terms(signsimp(collect_sqrt(n))/d, radical=True))
___ ___
\/ 5 *(a + b) - \/ 2 *(x + y)
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
If radicals in the denominator cannot be removed or there is no denominator,
the original expression will be returned.
>>> radsimp(sqrt(2)*x + sqrt(2))
sqrt(2)*x + sqrt(2)
Results with symbols will not always be valid for all substitutions:
>>> eq = 1/(a + b*sqrt(c))
>>> eq.subs(a, b*sqrt(c))
1/(2*b*sqrt(c))
>>> radsimp(eq).subs(a, b*sqrt(c))
nan
If ``symbolic=False``, symbolic denominators will not be transformed (but
numeric denominators will still be processed):
>>> radsimp(eq, symbolic=False)
1/(a + b*sqrt(c))
"""
from sympy.simplify.simplify import signsimp
syms = symbols("a:d A:D")
def _num(rterms):
# return the multiplier that will simplify the expression described
# by rterms [(sqrt arg, coeff), ... ]
a, b, c, d, A, B, C, D = syms
if len(rterms) == 2:
reps = dict(list(zip([A, a, B, b], [j for i in rterms for j in i])))
return (
sqrt(A)*a - sqrt(B)*b).xreplace(reps)
if len(rterms) == 3:
reps = dict(list(zip([A, a, B, b, C, c], [j for i in rterms for j in i])))
return (
(sqrt(A)*a + sqrt(B)*b - sqrt(C)*c)*(2*sqrt(A)*sqrt(B)*a*b - A*a**2 -
B*b**2 + C*c**2)).xreplace(reps)
elif len(rterms) == 4:
reps = dict(list(zip([A, a, B, b, C, c, D, d], [j for i in rterms for j in i])))
return ((sqrt(A)*a + sqrt(B)*b - sqrt(C)*c - sqrt(D)*d)*(2*sqrt(A)*sqrt(B)*a*b
- A*a**2 - B*b**2 - 2*sqrt(C)*sqrt(D)*c*d + C*c**2 +
D*d**2)*(-8*sqrt(A)*sqrt(B)*sqrt(C)*sqrt(D)*a*b*c*d + A**2*a**4 -
2*A*B*a**2*b**2 - 2*A*C*a**2*c**2 - 2*A*D*a**2*d**2 + B**2*b**4 -
2*B*C*b**2*c**2 - 2*B*D*b**2*d**2 + C**2*c**4 - 2*C*D*c**2*d**2 +
D**2*d**4)).xreplace(reps)
elif len(rterms) == 1:
return sqrt(rterms[0][0])
else:
raise NotImplementedError
def ispow2(d, log2=False):
if not d.is_Pow:
return False
e = d.exp
if e.is_Rational and e.q == 2 or symbolic and denom(e) == 2:
return True
if log2:
q = 1
if e.is_Rational:
q = e.q
elif symbolic:
d = denom(e)
if d.is_Integer:
q = d
if q != 1 and log(q, 2).is_Integer:
return True
return False
def handle(expr):
# Handle first reduces to the case
# expr = 1/d, where d is an add, or d is base**p/2.
# We do this by recursively calling handle on each piece.
from sympy.simplify.simplify import nsimplify
n, d = fraction(expr)
if expr.is_Atom or (d.is_Atom and n.is_Atom):
return expr
elif not n.is_Atom:
n = n.func(*[handle(a) for a in n.args])
return _unevaluated_Mul(n, handle(1/d))
elif n is not S.One:
return _unevaluated_Mul(n, handle(1/d))
elif d.is_Mul:
return _unevaluated_Mul(*[handle(1/d) for d in d.args])
# By this step, expr is 1/d, and d is not a mul.
if not symbolic and d.free_symbols:
return expr
if ispow2(d):
d2 = sqrtdenest(sqrt(d.base))**numer(d.exp)
if d2 != d:
return handle(1/d2)
elif d.is_Pow and (d.exp.is_integer or d.base.is_positive):
# (1/d**i) = (1/d)**i
return handle(1/d.base)**d.exp
if not (d.is_Add or ispow2(d)):
return 1/d.func(*[handle(a) for a in d.args])
# handle 1/d treating d as an Add (though it may not be)
keep = True # keep changes that are made
# flatten it and collect radicals after checking for special
# conditions
d = _mexpand(d)
# did it change?
if d.is_Atom:
return 1/d
# is it a number that might be handled easily?
if d.is_number:
_d = nsimplify(d)
if _d.is_Number and _d.equals(d):
return 1/_d
while True:
# collect similar terms
collected = defaultdict(list)
for m in Add.make_args(d): # d might have become non-Add
p2 = []
other = []
for i in Mul.make_args(m):
if ispow2(i, log2=True):
p2.append(i.base if i.exp is S.Half else i.base**(2*i.exp))
elif i is S.ImaginaryUnit:
p2.append(S.NegativeOne)
else:
other.append(i)
collected[tuple(ordered(p2))].append(Mul(*other))
rterms = list(ordered(list(collected.items())))
rterms = [(Mul(*i), Add(*j)) for i, j in rterms]
nrad = len(rterms) - (1 if rterms[0][0] is S.One else 0)
if nrad < 1:
break
elif nrad > max_terms:
# there may have been invalid operations leading to this point
# so don't keep changes, e.g. this expression is troublesome
# in collecting terms so as not to raise the issue of 2834:
# r = sqrt(sqrt(5) + 5)
# eq = 1/(sqrt(5)*r + 2*sqrt(5)*sqrt(-sqrt(5) + 5) + 5*r)
keep = False
break
if len(rterms) > 4:
# in general, only 4 terms can be removed with repeated squaring
# but other considerations can guide selection of radical terms
# so that radicals are removed
if all(x.is_Integer and (y**2).is_Rational for x, y in rterms):
nd, d = rad_rationalize(S.One, Add._from_args(
[sqrt(x)*y for x, y in rterms]))
n *= nd
else:
# is there anything else that might be attempted?
keep = False
break
from sympy.simplify.powsimp import powsimp, powdenest
num = powsimp(_num(rterms))
n *= num
d *= num
d = powdenest(_mexpand(d), force=symbolic)
if d.has(S.Zero, nan, zoo):
return expr
if d.is_Atom:
break
if not keep:
return expr
return _unevaluated_Mul(n, 1/d)
coeff, expr = expr.as_coeff_Add()
expr = expr.normal()
old = fraction(expr)
n, d = fraction(handle(expr))
if old != (n, d):
if not d.is_Atom:
was = (n, d)
n = signsimp(n, evaluate=False)
d = signsimp(d, evaluate=False)
u = Factors(_unevaluated_Mul(n, 1/d))
u = _unevaluated_Mul(*[k**v for k, v in u.factors.items()])
n, d = fraction(u)
if old == (n, d):
n, d = was
n = expand_mul(n)
if d.is_Number or d.is_Add:
n2, d2 = fraction(gcd_terms(_unevaluated_Mul(n, 1/d)))
if d2.is_Number or (d2.count_ops() <= d.count_ops()):
n, d = [signsimp(i) for i in (n2, d2)]
if n.is_Mul and n.args[0].is_Number:
n = n.func(*n.args)
return coeff + _unevaluated_Mul(n, 1/d)
def rad_rationalize(num, den):
"""
Rationalize ``num/den`` by removing square roots in the denominator;
num and den are sum of terms whose squares are positive rationals.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import rad_rationalize
>>> rad_rationalize(sqrt(3), 1 + sqrt(2)/3)
(-sqrt(3) + sqrt(6)/3, -7/9)
"""
if not den.is_Add:
return num, den
g, a, b = split_surds(den)
a = a*sqrt(g)
num = _mexpand((a - b)*num)
den = _mexpand(a**2 - b**2)
return rad_rationalize(num, den)
def fraction(expr, exact=False):
"""Returns a pair with expression's numerator and denominator.
If the given expression is not a fraction then this function
will return the tuple (expr, 1).
This function will not make any attempt to simplify nested
fractions or to do any term rewriting at all.
If only one of the numerator/denominator pair is needed then
use numer(expr) or denom(expr) functions respectively.
>>> from sympy import fraction, Rational, Symbol
>>> from sympy.abc import x, y
>>> fraction(x/y)
(x, y)
>>> fraction(x)
(x, 1)
>>> fraction(1/y**2)
(1, y**2)
>>> fraction(x*y/2)
(x*y, 2)
>>> fraction(Rational(1, 2))
(1, 2)
This function will also work fine with assumptions:
>>> k = Symbol('k', negative=True)
>>> fraction(x * y**k)
(x, y**(-k))
If we know nothing about sign of some exponent and ``exact``
flag is unset, then structure this exponent's structure will
be analyzed and pretty fraction will be returned:
>>> from sympy import exp, Mul
>>> fraction(2*x**(-y))
(2, x**y)
>>> fraction(exp(-x))
(1, exp(x))
>>> fraction(exp(-x), exact=True)
(exp(-x), 1)
The ``exact`` flag will also keep any unevaluated Muls from
being evaluated:
>>> u = Mul(2, x + 1, evaluate=False)
>>> fraction(u)
(2*x + 2, 1)
>>> fraction(u, exact=True)
(2*(x + 1), 1)
"""
expr = sympify(expr)
numer, denom = [], []
for term in Mul.make_args(expr):
if term.is_commutative and (term.is_Pow or isinstance(term, exp)):
b, ex = term.as_base_exp()
if ex.is_negative:
if ex is S.NegativeOne:
denom.append(b)
elif exact:
if ex.is_constant():
denom.append(Pow(b, -ex))
else:
numer.append(term)
else:
denom.append(Pow(b, -ex))
elif ex.is_positive:
numer.append(term)
elif not exact and ex.is_Mul:
n, d = term.as_numer_denom()
if n != 1:
numer.append(n)
denom.append(d)
else:
numer.append(term)
elif term.is_Rational and not term.is_Integer:
if term.p != 1:
numer.append(term.p)
denom.append(term.q)
else:
numer.append(term)
return Mul(*numer, evaluate=not exact), Mul(*denom, evaluate=not exact)
def numer(expr):
return fraction(expr)[0]
def denom(expr):
return fraction(expr)[1]
def fraction_expand(expr, **hints):
return expr.expand(frac=True, **hints)
def numer_expand(expr, **hints):
a, b = fraction(expr)
return a.expand(numer=True, **hints) / b
def denom_expand(expr, **hints):
a, b = fraction(expr)
return a / b.expand(denom=True, **hints)
expand_numer = numer_expand
expand_denom = denom_expand
expand_fraction = fraction_expand
def split_surds(expr):
"""
Split an expression with terms whose squares are positive rationals
into a sum of terms whose surds squared have gcd equal to g
and a sum of terms with surds squared prime with g.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import split_surds
>>> split_surds(3*sqrt(3) + sqrt(5)/7 + sqrt(6) + sqrt(10) + sqrt(15))
(3, sqrt(2) + sqrt(5) + 3, sqrt(5)/7 + sqrt(10))
"""
args = sorted(expr.args, key=default_sort_key)
coeff_muls = [x.as_coeff_Mul() for x in args]
surds = [x[1]**2 for x in coeff_muls if x[1].is_Pow]
surds.sort(key=default_sort_key)
g, b1, b2 = _split_gcd(*surds)
g2 = g
if not b2 and len(b1) >= 2:
b1n = [x/g for x in b1]
b1n = [x for x in b1n if x != 1]
# only a common factor has been factored; split again
g1, b1n, b2 = _split_gcd(*b1n)
g2 = g*g1
a1v, a2v = [], []
for c, s in coeff_muls:
if s.is_Pow and s.exp == S.Half:
s1 = s.base
if s1 in b1:
a1v.append(c*sqrt(s1/g2))
else:
a2v.append(c*s)
else:
a2v.append(c*s)
a = Add(*a1v)
b = Add(*a2v)
return g2, a, b
def _split_gcd(*a):
"""
Split the list of integers ``a`` into a list of integers, ``a1`` having
``g = gcd(a1)``, and a list ``a2`` whose elements are not divisible by
``g``. Returns ``g, a1, a2``.
Examples
========
>>> from sympy.simplify.radsimp import _split_gcd
>>> _split_gcd(55, 35, 22, 14, 77, 10)
(5, [55, 35, 10], [22, 14, 77])
"""
g = a[0]
b1 = [g]
b2 = []
for x in a[1:]:
g1 = gcd(g, x)
if g1 == 1:
b2.append(x)
else:
g = g1
b1.append(x)
return g, b1, b2
|
604792e5a998b1e4f59c2e1b1eacb27a6ae3004d6a9615b3d08497264e48f17a | """Tools for manipulation of expressions using paths. """
from sympy.core import Basic
class EPath:
r"""
Manipulate expressions using paths.
EPath grammar in EBNF notation::
literal ::= /[A-Za-z_][A-Za-z_0-9]*/
number ::= /-?\d+/
type ::= literal
attribute ::= literal "?"
all ::= "*"
slice ::= "[" number? (":" number? (":" number?)?)? "]"
range ::= all | slice
query ::= (type | attribute) ("|" (type | attribute))*
selector ::= range | query range?
path ::= "/" selector ("/" selector)*
See the docstring of the epath() function.
"""
__slots__ = ("_path", "_epath")
def __new__(cls, path):
"""Construct new EPath. """
if isinstance(path, EPath):
return path
if not path:
raise ValueError("empty EPath")
_path = path
if path[0] == '/':
path = path[1:]
else:
raise NotImplementedError("non-root EPath")
epath = []
for selector in path.split('/'):
selector = selector.strip()
if not selector:
raise ValueError("empty selector")
index = 0
for c in selector:
if c.isalnum() or c in ('_', '|', '?'):
index += 1
else:
break
attrs = []
types = []
if index:
elements = selector[:index]
selector = selector[index:]
for element in elements.split('|'):
element = element.strip()
if not element:
raise ValueError("empty element")
if element.endswith('?'):
attrs.append(element[:-1])
else:
types.append(element)
span = None
if selector == '*':
pass
else:
if selector.startswith('['):
try:
i = selector.index(']')
except ValueError:
raise ValueError("expected ']', got EOL")
_span, span = selector[1:i], []
if ':' not in _span:
span = int(_span)
else:
for elt in _span.split(':', 3):
if not elt:
span.append(None)
else:
span.append(int(elt))
span = slice(*span)
selector = selector[i + 1:]
if selector:
raise ValueError("trailing characters in selector")
epath.append((attrs, types, span))
obj = object.__new__(cls)
obj._path = _path
obj._epath = epath
return obj
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._path)
def _get_ordered_args(self, expr):
"""Sort ``expr.args`` using printing order. """
if expr.is_Add:
return expr.as_ordered_terms()
elif expr.is_Mul:
return expr.as_ordered_factors()
else:
return expr.args
def _hasattrs(self, expr, attrs):
"""Check if ``expr`` has any of ``attrs``. """
for attr in attrs:
if not hasattr(expr, attr):
return False
return True
def _hastypes(self, expr, types):
"""Check if ``expr`` is any of ``types``. """
_types = [ cls.__name__ for cls in expr.__class__.mro() ]
return bool(set(_types).intersection(types))
def _has(self, expr, attrs, types):
"""Apply ``_hasattrs`` and ``_hastypes`` to ``expr``. """
if not (attrs or types):
return True
if attrs and self._hasattrs(expr, attrs):
return True
if types and self._hastypes(expr, types):
return True
return False
def apply(self, expr, func, args=None, kwargs=None):
"""
Modify parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.apply(expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.apply(expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
def _apply(path, expr, func):
if not path:
return func(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
if not expr.is_Atom:
args, basic = self._get_ordered_args(expr), True
else:
return expr
elif hasattr(expr, '__iter__'):
args, basic = expr, False
else:
return expr
args = list(args)
if span is not None:
if isinstance(span, slice):
indices = range(*span.indices(len(args)))
else:
indices = [span]
else:
indices = range(len(args))
for i in indices:
try:
arg = args[i]
except IndexError:
continue
if self._has(arg, attrs, types):
args[i] = _apply(path, arg, func)
if basic:
return expr.func(*args)
else:
return expr.__class__(args)
_args, _kwargs = args or (), kwargs or {}
_func = lambda expr: func(expr, *_args, **_kwargs)
return _apply(self._epath, expr, _func)
def select(self, expr):
"""
Retrieve parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.select(expr)
[x, y]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.select(expr)
[x, x, y]
"""
result = []
def _select(path, expr):
if not path:
result.append(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
args = self._get_ordered_args(expr)
elif hasattr(expr, '__iter__'):
args = expr
else:
return
if span is not None:
if isinstance(span, slice):
args = args[span]
else:
try:
args = [args[span]]
except IndexError:
return
for arg in args:
if self._has(arg, attrs, types):
_select(path, arg)
_select(self._epath, expr)
return result
def epath(path, expr=None, func=None, args=None, kwargs=None):
r"""
Manipulate parts of an expression selected by a path.
Explanation
===========
This function allows to manipulate large nested expressions in single
line of code, utilizing techniques to those applied in XML processing
standards (e.g. XPath).
If ``func`` is ``None``, :func:`epath` retrieves elements selected by
the ``path``. Otherwise it applies ``func`` to each matching element.
Note that it is more efficient to create an EPath object and use the select
and apply methods of that object, since this will compile the path string
only once. This function should only be used as a convenient shortcut for
interactive use.
This is the supported syntax:
* select all: ``/*``
Equivalent of ``for arg in args:``.
* select slice: ``/[0]`` or ``/[1:5]`` or ``/[1:5:2]``
Supports standard Python's slice syntax.
* select by type: ``/list`` or ``/list|tuple``
Emulates ``isinstance()``.
* select by attribute: ``/__iter__?``
Emulates ``hasattr()``.
Parameters
==========
path : str | EPath
A path as a string or a compiled EPath.
expr : Basic | iterable
An expression or a container of expressions.
func : callable (optional)
A callable that will be applied to matching parts.
args : tuple (optional)
Additional positional arguments to ``func``.
kwargs : dict (optional)
Additional keyword arguments to ``func``.
Examples
========
>>> from sympy.simplify.epathtools import epath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = "/*/[0]/Symbol"
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> epath(path, expr)
[x, y]
>>> epath(path, expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = "/*/*/Symbol"
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> epath(path, expr)
[x, x, y]
>>> epath(path, expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
_epath = EPath(path)
if expr is None:
return _epath
if func is None:
return _epath.select(expr)
else:
return _epath.apply(expr, func, args, kwargs)
|
5d93b23fcab1a30b1735baed7548a29d415351abb80d9be2be677ef82c3c2679 | from sympy.core import Mul
from sympy.core.function import count_ops
from sympy.core.traversal import preorder_traversal, bottom_up
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.functions import gamma
from sympy.simplify.gammasimp import gammasimp, _gammasimp
from sympy.utilities.timeutils import timethis
@timethis('combsimp')
def combsimp(expr):
r"""
Simplify combinatorial expressions.
Explanation
===========
This function takes as input an expression containing factorials,
binomials, Pochhammer symbol and other "combinatorial" functions,
and tries to minimize the number of those functions and reduce
the size of their arguments.
The algorithm works by rewriting all combinatorial functions as
gamma functions and applying gammasimp() except simplification
steps that may make an integer argument non-integer. See docstring
of gammasimp for more information.
Then it rewrites expression in terms of factorials and binomials by
rewriting gammas as factorials and converting (a+b)!/a!b! into
binomials.
If expression has gamma functions or combinatorial functions
with non-integer argument, it is automatically passed to gammasimp.
Examples
========
>>> from sympy.simplify import combsimp
>>> from sympy import factorial, binomial, symbols
>>> n, k = symbols('n k', integer = True)
>>> combsimp(factorial(n)/factorial(n - 3))
n*(n - 2)*(n - 1)
>>> combsimp(binomial(n+1, k+1)/binomial(n, k))
(n + 1)/(k + 1)
"""
expr = expr.rewrite(gamma, piecewise=False)
if any(isinstance(node, gamma) and not node.args[0].is_integer
for node in preorder_traversal(expr)):
return gammasimp(expr);
expr = _gammasimp(expr, as_comb = True)
expr = _gamma_as_comb(expr)
return expr
def _gamma_as_comb(expr):
"""
Helper function for combsimp.
Rewrites expression in terms of factorials and binomials
"""
expr = expr.rewrite(factorial)
def f(rv):
if not rv.is_Mul:
return rv
rvd = rv.as_powers_dict()
nd_fact_args = [[], []] # numerator, denominator
for k in rvd:
if isinstance(k, factorial) and rvd[k].is_Integer:
if rvd[k].is_positive:
nd_fact_args[0].extend([k.args[0]]*rvd[k])
else:
nd_fact_args[1].extend([k.args[0]]*-rvd[k])
rvd[k] = 0
if not nd_fact_args[0] or not nd_fact_args[1]:
return rv
hit = False
for m in range(2):
i = 0
while i < len(nd_fact_args[m]):
ai = nd_fact_args[m][i]
for j in range(i + 1, len(nd_fact_args[m])):
aj = nd_fact_args[m][j]
sum = ai + aj
if sum in nd_fact_args[1 - m]:
hit = True
nd_fact_args[1 - m].remove(sum)
del nd_fact_args[m][j]
del nd_fact_args[m][i]
rvd[binomial(sum, ai if count_ops(ai) <
count_ops(aj) else aj)] += (
-1 if m == 0 else 1)
break
else:
i += 1
if hit:
return Mul(*([k**rvd[k] for k in rvd] + [factorial(k)
for k in nd_fact_args[0]]))/Mul(*[factorial(k)
for k in nd_fact_args[1]])
return rv
return bottom_up(expr, f)
|
53884d63e147bc110df02ac2881b5671c416de7cfafc748bdca82cc40cfe7f22 | from sympy.core import Add, Expr, Mul, S, sympify
from sympy.core.function import _mexpand, count_ops, expand_mul
from sympy.core.sorting import default_sort_key
from sympy.core.symbol import Dummy
from sympy.functions import root, sign, sqrt
from sympy.polys import Poly, PolynomialError
def is_sqrt(expr):
"""Return True if expr is a sqrt, otherwise False."""
return expr.is_Pow and expr.exp.is_Rational and abs(expr.exp) is S.Half
def sqrt_depth(p):
"""Return the maximum depth of any square root argument of p.
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import sqrt_depth
Neither of these square roots contains any other square roots
so the depth is 1:
>>> sqrt_depth(1 + sqrt(2)*(1 + sqrt(3)))
1
The sqrt(3) is contained within a square root so the depth is
2:
>>> sqrt_depth(1 + sqrt(2)*sqrt(1 + sqrt(3)))
2
"""
if p is S.ImaginaryUnit:
return 1
if p.is_Atom:
return 0
elif p.is_Add or p.is_Mul:
return max([sqrt_depth(x) for x in p.args], key=default_sort_key)
elif is_sqrt(p):
return sqrt_depth(p.base) + 1
else:
return 0
def is_algebraic(p):
"""Return True if p is comprised of only Rationals or square roots
of Rationals and algebraic operations.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import is_algebraic
>>> from sympy import cos
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*sqrt(2))))
True
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*cos(2))))
False
"""
if p.is_Rational:
return True
elif p.is_Atom:
return False
elif is_sqrt(p) or p.is_Pow and p.exp.is_Integer:
return is_algebraic(p.base)
elif p.is_Add or p.is_Mul:
return all(is_algebraic(x) for x in p.args)
else:
return False
def _subsets(n):
"""
Returns all possible subsets of the set (0, 1, ..., n-1) except the
empty set, listed in reversed lexicographical order according to binary
representation, so that the case of the fourth root is treated last.
Examples
========
>>> from sympy.simplify.sqrtdenest import _subsets
>>> _subsets(2)
[[1, 0], [0, 1], [1, 1]]
"""
if n == 1:
a = [[1]]
elif n == 2:
a = [[1, 0], [0, 1], [1, 1]]
elif n == 3:
a = [[1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
else:
b = _subsets(n - 1)
a0 = [x + [0] for x in b]
a1 = [x + [1] for x in b]
a = a0 + [[0]*(n - 1) + [1]] + a1
return a
def sqrtdenest(expr, max_iter=3):
"""Denests sqrts in an expression that contain other square roots
if possible, otherwise returns the expr unchanged. This is based on the
algorithms of [1].
Examples
========
>>> from sympy.simplify.sqrtdenest import sqrtdenest
>>> from sympy import sqrt
>>> sqrtdenest(sqrt(5 + 2 * sqrt(6)))
sqrt(2) + sqrt(3)
See Also
========
sympy.solvers.solvers.unrad
References
==========
.. [1] http://researcher.watson.ibm.com/researcher/files/us-fagin/symb85.pdf
.. [2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots
by Denesting' (available at http://www.cybertester.com/data/denest.pdf)
"""
expr = expand_mul(sympify(expr))
for i in range(max_iter):
z = _sqrtdenest0(expr)
if expr == z:
return expr
expr = z
return expr
def _sqrt_match(p):
"""Return [a, b, r] for p.match(a + b*sqrt(r)) where, in addition to
matching, sqrt(r) also has then maximal sqrt_depth among addends of p.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match
>>> _sqrt_match(1 + sqrt(2) + sqrt(2)*sqrt(3) + 2*sqrt(1+sqrt(5)))
[1 + sqrt(2) + sqrt(6), 2, 1 + sqrt(5)]
"""
from sympy.simplify.radsimp import split_surds
p = _mexpand(p)
if p.is_Number:
res = (p, S.Zero, S.Zero)
elif p.is_Add:
pargs = sorted(p.args, key=default_sort_key)
sqargs = [x**2 for x in pargs]
if all(sq.is_Rational and sq.is_positive for sq in sqargs):
r, b, a = split_surds(p)
res = a, b, r
return list(res)
# to make the process canonical, the argument is included in the tuple
# so when the max is selected, it will be the largest arg having a
# given depth
v = [(sqrt_depth(x), x, i) for i, x in enumerate(pargs)]
nmax = max(v, key=default_sort_key)
if nmax[0] == 0:
res = []
else:
# select r
depth, _, i = nmax
r = pargs.pop(i)
v.pop(i)
b = S.One
if r.is_Mul:
bv = []
rv = []
for x in r.args:
if sqrt_depth(x) < depth:
bv.append(x)
else:
rv.append(x)
b = Mul._from_args(bv)
r = Mul._from_args(rv)
# collect terms comtaining r
a1 = []
b1 = [b]
for x in v:
if x[0] < depth:
a1.append(x[1])
else:
x1 = x[1]
if x1 == r:
b1.append(1)
else:
if x1.is_Mul:
x1args = list(x1.args)
if r in x1args:
x1args.remove(r)
b1.append(Mul(*x1args))
else:
a1.append(x[1])
else:
a1.append(x[1])
a = Add(*a1)
b = Add(*b1)
res = (a, b, r**2)
else:
b, r = p.as_coeff_Mul()
if is_sqrt(r):
res = (S.Zero, b, r**2)
else:
res = []
return list(res)
class SqrtdenestStopIteration(StopIteration):
pass
def _sqrtdenest0(expr):
"""Returns expr after denesting its arguments."""
if is_sqrt(expr):
n, d = expr.as_numer_denom()
if d is S.One: # n is a square root
if n.base.is_Add:
args = sorted(n.base.args, key=default_sort_key)
if len(args) > 2 and all((x**2).is_Integer for x in args):
try:
return _sqrtdenest_rec(n)
except SqrtdenestStopIteration:
pass
expr = sqrt(_mexpand(Add(*[_sqrtdenest0(x) for x in args])))
return _sqrtdenest1(expr)
else:
n, d = [_sqrtdenest0(i) for i in (n, d)]
return n/d
if isinstance(expr, Add):
cs = []
args = []
for arg in expr.args:
c, a = arg.as_coeff_Mul()
cs.append(c)
args.append(a)
if all(c.is_Rational for c in cs) and all(is_sqrt(arg) for arg in args):
return _sqrt_ratcomb(cs, args)
if isinstance(expr, Expr):
args = expr.args
if args:
return expr.func(*[_sqrtdenest0(a) for a in args])
return expr
def _sqrtdenest_rec(expr):
"""Helper that denests the square root of three or more surds.
Explanation
===========
It returns the denested expression; if it cannot be denested it
throws SqrtdenestStopIteration
Algorithm: expr.base is in the extension Q_m = Q(sqrt(r_1),..,sqrt(r_k));
split expr.base = a + b*sqrt(r_k), where `a` and `b` are on
Q_(m-1) = Q(sqrt(r_1),..,sqrt(r_(k-1))); then a**2 - b**2*r_k is
on Q_(m-1); denest sqrt(a**2 - b**2*r_k) and so on.
See [1], section 6.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrtdenest_rec
>>> _sqrtdenest_rec(sqrt(-72*sqrt(2) + 158*sqrt(5) + 498))
-sqrt(10) + sqrt(2) + 9 + 9*sqrt(5)
>>> w=-6*sqrt(55)-6*sqrt(35)-2*sqrt(22)-2*sqrt(14)+2*sqrt(77)+6*sqrt(10)+65
>>> _sqrtdenest_rec(sqrt(w))
-sqrt(11) - sqrt(7) + sqrt(2) + 3*sqrt(5)
"""
from sympy.simplify.radsimp import radsimp, rad_rationalize, split_surds
if not expr.is_Pow:
return sqrtdenest(expr)
if expr.base < 0:
return sqrt(-1)*_sqrtdenest_rec(sqrt(-expr.base))
g, a, b = split_surds(expr.base)
a = a*sqrt(g)
if a < b:
a, b = b, a
c2 = _mexpand(a**2 - b**2)
if len(c2.args) > 2:
g, a1, b1 = split_surds(c2)
a1 = a1*sqrt(g)
if a1 < b1:
a1, b1 = b1, a1
c2_1 = _mexpand(a1**2 - b1**2)
c_1 = _sqrtdenest_rec(sqrt(c2_1))
d_1 = _sqrtdenest_rec(sqrt(a1 + c_1))
num, den = rad_rationalize(b1, d_1)
c = _mexpand(d_1/sqrt(2) + num/(den*sqrt(2)))
else:
c = _sqrtdenest1(sqrt(c2))
if sqrt_depth(c) > 1:
raise SqrtdenestStopIteration
ac = a + c
if len(ac.args) >= len(expr.args):
if count_ops(ac) >= count_ops(expr.base):
raise SqrtdenestStopIteration
d = sqrtdenest(sqrt(ac))
if sqrt_depth(d) > 1:
raise SqrtdenestStopIteration
num, den = rad_rationalize(b, d)
r = d/sqrt(2) + num/(den*sqrt(2))
r = radsimp(r)
return _mexpand(r)
def _sqrtdenest1(expr, denester=True):
"""Return denested expr after denesting with simpler methods or, that
failing, using the denester."""
from sympy.simplify.simplify import radsimp
if not is_sqrt(expr):
return expr
a = expr.base
if a.is_Atom:
return expr
val = _sqrt_match(a)
if not val:
return expr
a, b, r = val
# try a quick numeric denesting
d2 = _mexpand(a**2 - b**2*r)
if d2.is_Rational:
if d2.is_positive:
z = _sqrt_numeric_denest(a, b, r, d2)
if z is not None:
return z
else:
# fourth root case
# sqrtdenest(sqrt(3 + 2*sqrt(3))) =
# sqrt(2)*3**(1/4)/2 + sqrt(2)*3**(3/4)/2
dr2 = _mexpand(-d2*r)
dr = sqrt(dr2)
if dr.is_Rational:
z = _sqrt_numeric_denest(_mexpand(b*r), a, r, dr2)
if z is not None:
return z/root(r, 4)
else:
z = _sqrt_symbolic_denest(a, b, r)
if z is not None:
return z
if not denester or not is_algebraic(expr):
return expr
res = sqrt_biquadratic_denest(expr, a, b, r, d2)
if res:
return res
# now call to the denester
av0 = [a, b, r, d2]
z = _denester([radsimp(expr**2)], av0, 0, sqrt_depth(expr))[0]
if av0[1] is None:
return expr
if z is not None:
if sqrt_depth(z) == sqrt_depth(expr) and count_ops(z) > count_ops(expr):
return expr
return z
return expr
def _sqrt_symbolic_denest(a, b, r):
"""Given an expression, sqrt(a + b*sqrt(b)), return the denested
expression or None.
Explanation
===========
If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with
(y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and
(cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as
sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2).
Examples
========
>>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest
>>> from sympy import sqrt, Symbol
>>> from sympy.abc import x
>>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55
>>> _sqrt_symbolic_denest(a, b, r)
sqrt(11 - 2*sqrt(29)) + sqrt(5)
If the expression is numeric, it will be simplified:
>>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2)
>>> sqrtdenest(sqrt((w**2).expand()))
1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3)))
Otherwise, it will only be simplified if assumptions allow:
>>> w = w.subs(sqrt(3), sqrt(x + 3))
>>> sqrtdenest(sqrt((w**2).expand()))
sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2)
Notice that the argument of the sqrt is a square. If x is made positive
then the sqrt of the square is resolved:
>>> _.subs(x, Symbol('x', positive=True))
sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2)
"""
a, b, r = map(sympify, (a, b, r))
rval = _sqrt_match(r)
if not rval:
return None
ra, rb, rr = rval
if rb:
y = Dummy('y', positive=True)
try:
newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y)
except PolynomialError:
return None
if newa.degree() == 2:
ca, cb, cc = newa.all_coeffs()
cb += b
if _mexpand(cb**2 - 4*ca*cc).equals(0):
z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2)
if z.is_number:
z = _mexpand(Mul._from_args(z.as_content_primitive()))
return z
def _sqrt_numeric_denest(a, b, r, d2):
r"""Helper that denest
$\sqrt{a + b \sqrt{r}}, d^2 = a^2 - b^2 r > 0$
If it cannot be denested, it returns ``None``.
"""
d = sqrt(d2)
s = a + d
# sqrt_depth(res) <= sqrt_depth(s) + 1
# sqrt_depth(expr) = sqrt_depth(r) + 2
# there is denesting if sqrt_depth(s) + 1 < sqrt_depth(r) + 2
# if s**2 is Number there is a fourth root
if sqrt_depth(s) < sqrt_depth(r) + 1 or (s**2).is_Rational:
s1, s2 = sign(s), sign(b)
if s1 == s2 == -1:
s1 = s2 = 1
res = (s1 * sqrt(a + d) + s2 * sqrt(a - d)) * sqrt(2) / 2
return res.expand()
def sqrt_biquadratic_denest(expr, a, b, r, d2):
"""denest expr = sqrt(a + b*sqrt(r))
where a, b, r are linear combinations of square roots of
positive rationals on the rationals (SQRR) and r > 0, b != 0,
d2 = a**2 - b**2*r > 0
If it cannot denest it returns None.
Explanation
===========
Search for a solution A of type SQRR of the biquadratic equation
4*A**4 - 4*a*A**2 + b**2*r = 0 (1)
sqd = sqrt(a**2 - b**2*r)
Choosing the sqrt to be positive, the possible solutions are
A = sqrt(a/2 +/- sqd/2)
Since a, b, r are SQRR, then a**2 - b**2*r is a SQRR,
so if sqd can be denested, it is done by
_sqrtdenest_rec, and the result is a SQRR.
Similarly for A.
Examples of solutions (in both cases a and sqd are positive):
Example of expr with solution sqrt(a/2 + sqd/2) but not
solution sqrt(a/2 - sqd/2):
expr = sqrt(-sqrt(15) - sqrt(2)*sqrt(-sqrt(5) + 5) - sqrt(3) + 8)
a = -sqrt(15) - sqrt(3) + 8; sqd = -2*sqrt(5) - 2 + 4*sqrt(3)
Example of expr with solution sqrt(a/2 - sqd/2) but not
solution sqrt(a/2 + sqd/2):
w = 2 + r2 + r3 + (1 + r3)*sqrt(2 + r2 + 5*r3)
expr = sqrt((w**2).expand())
a = 4*sqrt(6) + 8*sqrt(2) + 47 + 28*sqrt(3)
sqd = 29 + 20*sqrt(3)
Define B = b/2*A; eq.(1) implies a = A**2 + B**2*r; then
expr**2 = a + b*sqrt(r) = (A + B*sqrt(r))**2
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match, sqrt_biquadratic_denest
>>> z = sqrt((2*sqrt(2) + 4)*sqrt(2 + sqrt(2)) + 5*sqrt(2) + 8)
>>> a, b, r = _sqrt_match(z**2)
>>> d2 = a**2 - b**2*r
>>> sqrt_biquadratic_denest(z, a, b, r, d2)
sqrt(2) + sqrt(sqrt(2) + 2) + 2
"""
from sympy.simplify.radsimp import radsimp, rad_rationalize
if r <= 0 or d2 < 0 or not b or sqrt_depth(expr.base) < 2:
return None
for x in (a, b, r):
for y in x.args:
y2 = y**2
if not y2.is_Integer or not y2.is_positive:
return None
sqd = _mexpand(sqrtdenest(sqrt(radsimp(d2))))
if sqrt_depth(sqd) > 1:
return None
x1, x2 = [a/2 + sqd/2, a/2 - sqd/2]
# look for a solution A with depth 1
for x in (x1, x2):
A = sqrtdenest(sqrt(x))
if sqrt_depth(A) > 1:
continue
Bn, Bd = rad_rationalize(b, _mexpand(2*A))
B = Bn/Bd
z = A + B*sqrt(r)
if z < 0:
z = -z
return _mexpand(z)
return None
def _denester(nested, av0, h, max_depth_level):
"""Denests a list of expressions that contain nested square roots.
Explanation
===========
Algorithm based on <http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf>.
It is assumed that all of the elements of 'nested' share the same
bottom-level radicand. (This is stated in the paper, on page 177, in
the paragraph immediately preceding the algorithm.)
When evaluating all of the arguments in parallel, the bottom-level
radicand only needs to be denested once. This means that calling
_denester with x arguments results in a recursive invocation with x+1
arguments; hence _denester has polynomial complexity.
However, if the arguments were evaluated separately, each call would
result in two recursive invocations, and the algorithm would have
exponential complexity.
This is discussed in the paper in the middle paragraph of page 179.
"""
from sympy.simplify.simplify import radsimp
if h > max_depth_level:
return None, None
if av0[1] is None:
return None, None
if (av0[0] is None and
all(n.is_Number for n in nested)): # no arguments are nested
for f in _subsets(len(nested)): # test subset 'f' of nested
p = _mexpand(Mul(*[nested[i] for i in range(len(f)) if f[i]]))
if f.count(1) > 1 and f[-1]:
p = -p
sqp = sqrt(p)
if sqp.is_Rational:
return sqp, f # got a perfect square so return its square root.
# Otherwise, return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
else:
R = None
if av0[0] is not None:
values = [av0[:2]]
R = av0[2]
nested2 = [av0[3], R]
av0[0] = None
else:
values = list(filter(None, [_sqrt_match(expr) for expr in nested]))
for v in values:
if v[2]: # Since if b=0, r is not defined
if R is not None:
if R != v[2]:
av0[1] = None
return None, None
else:
R = v[2]
if R is None:
# return the radicand from the previous invocation
return sqrt(nested[-1]), [0]*len(nested)
nested2 = [_mexpand(v[0]**2) -
_mexpand(R*v[1]**2) for v in values] + [R]
d, f = _denester(nested2, av0, h + 1, max_depth_level)
if not f:
return None, None
if not any(f[i] for i in range(len(nested))):
v = values[-1]
return sqrt(v[0] + _mexpand(v[1]*d)), f
else:
p = Mul(*[nested[i] for i in range(len(nested)) if f[i]])
v = _sqrt_match(p)
if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested) - 1]:
v[0] = -v[0]
v[1] = -v[1]
if not f[len(nested)]: # Solution denests with square roots
vad = _mexpand(v[0] + d)
if vad <= 0:
# return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
if not(sqrt_depth(vad) <= sqrt_depth(R) + 1 or
(vad**2).is_Number):
av0[1] = None
return None, None
sqvad = _sqrtdenest1(sqrt(vad), denester=False)
if not (sqrt_depth(sqvad) <= sqrt_depth(R) + 1):
av0[1] = None
return None, None
sqvad1 = radsimp(1/sqvad)
res = _mexpand(sqvad/sqrt(2) + (v[1]*sqrt(R)*sqvad1/sqrt(2)))
return res, f
# sign(v[1])*sqrt(_mexpand(v[1]**2*R*vad1/2))), f
else: # Solution requires a fourth root
s2 = _mexpand(v[1]*R) + d
if s2 <= 0:
return sqrt(nested[-1]), [0]*len(nested)
FR, s = root(_mexpand(R), 4), sqrt(s2)
return _mexpand(s/(sqrt(2)*FR) + v[0]*FR/(sqrt(2)*s)), f
def _sqrt_ratcomb(cs, args):
"""Denest rational combinations of radicals.
Based on section 5 of [1].
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import sqrtdenest
>>> z = sqrt(1+sqrt(3)) + sqrt(3+3*sqrt(3)) - sqrt(10+6*sqrt(3))
>>> sqrtdenest(z)
0
"""
from sympy.simplify.radsimp import radsimp
# check if there exists a pair of sqrt that can be denested
def find(a):
n = len(a)
for i in range(n - 1):
for j in range(i + 1, n):
s1 = a[i].base
s2 = a[j].base
p = _mexpand(s1 * s2)
s = sqrtdenest(sqrt(p))
if s != sqrt(p):
return s, i, j
indices = find(args)
if indices is None:
return Add(*[c * arg for c, arg in zip(cs, args)])
s, i1, i2 = indices
c2 = cs.pop(i2)
args.pop(i2)
a1 = args[i1]
# replace a2 by s/a1
cs[i1] += radsimp(c2 * s / a1.base)
return _sqrt_ratcomb(cs, args)
|
37fd91640bcf1161321ad0bb2c7f411bcec26e4293c625a1e1b4801f2f7f845f | from collections import defaultdict
from functools import reduce
from sympy.core.function import expand_log, count_ops
from sympy.core import sympify, Basic, Dummy, S, Add, Mul, Pow, expand_mul, factor_terms
from sympy.core.sorting import ordered, default_sort_key
from sympy.core.numbers import Integer, Rational
from sympy.core.mul import prod, _keep_coeff
from sympy.core.rules import Transform
from sympy.functions import exp_polar, exp, log, root, polarify, unpolarify
from sympy.polys import lcm, gcd
from sympy.ntheory.factor_ import multiplicity
def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops):
"""
reduces expression by combining powers with similar bases and exponents.
Explanation
===========
If ``deep`` is ``True`` then powsimp() will also simplify arguments of
functions. By default ``deep`` is set to ``False``.
If ``force`` is ``True`` then bases will be combined without checking for
assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true
if x and y are both negative.
You can make powsimp() only combine bases or only combine exponents by
changing combine='base' or combine='exp'. By default, combine='all',
which does both. combine='base' will only combine::
a a a 2x x
x * y => (x*y) as well as things like 2 => 4
and combine='exp' will only combine
::
a b (a + b)
x * x => x
combine='exp' will strictly only combine exponents in the way that used
to be automatic. Also use deep=True if you need the old behavior.
When combine='all', 'exp' is evaluated first. Consider the first
example below for when there could be an ambiguity relating to this.
This is done so things like the second example can be completely
combined. If you want 'base' combined first, do something like
powsimp(powsimp(expr, combine='base'), combine='exp').
Examples
========
>>> from sympy import powsimp, exp, log, symbols
>>> from sympy.abc import x, y, z, n
>>> powsimp(x**y*x**z*y**z, combine='all')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='exp')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='base', force=True)
x**y*(x*y)**z
>>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True)
(n*x)**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='exp')
n**(y + z)*x**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True)
(n*x)**y*(n*x)**z
>>> x, y = symbols('x y', positive=True)
>>> powsimp(log(exp(x)*exp(y)))
log(exp(x)*exp(y))
>>> powsimp(log(exp(x)*exp(y)), deep=True)
x + y
Radicals with Mul bases will be combined if combine='exp'
>>> from sympy import sqrt
>>> x, y = symbols('x y')
Two radicals are automatically joined through Mul:
>>> a=sqrt(x*sqrt(y))
>>> a*a**3 == a**4
True
But if an integer power of that radical has been
autoexpanded then Mul does not join the resulting factors:
>>> a**4 # auto expands to a Mul, no longer a Pow
x**2*y
>>> _*a # so Mul doesn't combine them
x**2*y*sqrt(x*sqrt(y))
>>> powsimp(_) # but powsimp will
(x*sqrt(y))**(5/2)
>>> powsimp(x*y*a) # but won't when doing so would violate assumptions
x*y*sqrt(x*sqrt(y))
"""
from sympy.matrices.expressions.matexpr import MatrixSymbol
def recurse(arg, **kwargs):
_deep = kwargs.get('deep', deep)
_combine = kwargs.get('combine', combine)
_force = kwargs.get('force', force)
_measure = kwargs.get('measure', measure)
return powsimp(arg, _deep, _combine, _force, _measure)
expr = sympify(expr)
if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or (
expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))):
return expr
if deep or expr.is_Add or expr.is_Mul and _y not in expr.args:
expr = expr.func(*[recurse(w) for w in expr.args])
if expr.is_Pow:
return recurse(expr*_y, deep=False)/_y
if not expr.is_Mul:
return expr
# handle the Mul
if combine in ('exp', 'all'):
# Collect base/exp data, while maintaining order in the
# non-commutative parts of the product
c_powers = defaultdict(list)
nc_part = []
newexpr = []
coeff = S.One
for term in expr.args:
if term.is_Rational:
coeff *= term
continue
if term.is_Pow:
term = _denest_pow(term)
if term.is_commutative:
b, e = term.as_base_exp()
if deep:
b, e = [recurse(i) for i in [b, e]]
if b.is_Pow or isinstance(b, exp):
# don't let smthg like sqrt(x**a) split into x**a, 1/2
# or else it will be joined as x**(a/2) later
b, e = b**e, S.One
c_powers[b].append(e)
else:
# This is the logic that combines exponents for equal,
# but non-commutative bases: A**x*A**y == A**(x+y).
if nc_part:
b1, e1 = nc_part[-1].as_base_exp()
b2, e2 = term.as_base_exp()
if (b1 == b2 and
e1.is_commutative and e2.is_commutative):
nc_part[-1] = Pow(b1, Add(e1, e2))
continue
nc_part.append(term)
# add up exponents of common bases
for b, e in ordered(iter(c_powers.items())):
# allow 2**x/4 -> 2**(x - 2); don't do this when b and e are
# Numbers since autoevaluation will undo it, e.g.
# 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4
if (b and b.is_Rational and not all(ei.is_Number for ei in e) and \
coeff is not S.One and
b not in (S.One, S.NegativeOne)):
m = multiplicity(abs(b), abs(coeff))
if m:
e.append(m)
coeff /= b**m
c_powers[b] = Add(*e)
if coeff is not S.One:
if coeff in c_powers:
c_powers[coeff] += S.One
else:
c_powers[coeff] = S.One
# convert to plain dictionary
c_powers = dict(c_powers)
# check for base and inverted base pairs
be = list(c_powers.items())
skip = set() # skip if we already saw them
for b, e in be:
if b in skip:
continue
bpos = b.is_positive or b.is_polar
if bpos:
binv = 1/b
if b != binv and binv in c_powers:
if b.as_numer_denom()[0] is S.One:
c_powers.pop(b)
c_powers[binv] -= e
else:
skip.add(binv)
e = c_powers.pop(binv)
c_powers[b] -= e
# check for base and negated base pairs
be = list(c_powers.items())
_n = S.NegativeOne
for b, e in be:
if (b.is_Symbol or b.is_Add) and -b in c_powers and b in c_powers:
if (b.is_positive is not None or e.is_integer):
if e.is_integer or b.is_negative:
c_powers[-b] += c_powers.pop(b)
else: # (-b).is_positive so use its e
e = c_powers.pop(-b)
c_powers[b] += e
if _n in c_powers:
c_powers[_n] += e
else:
c_powers[_n] = e
# filter c_powers and convert to a list
c_powers = [(b, e) for b, e in c_powers.items() if e]
# ==============================================================
# check for Mul bases of Rational powers that can be combined with
# separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) ->
# (x*sqrt(x*y))**(3/2)
# ---------------- helper functions
def ratq(x):
'''Return Rational part of x's exponent as it appears in the bkey.
'''
return bkey(x)[0][1]
def bkey(b, e=None):
'''Return (b**s, c.q), c.p where e -> c*s. If e is not given then
it will be taken by using as_base_exp() on the input b.
e.g.
x**3/2 -> (x, 2), 3
x**y -> (x**y, 1), 1
x**(2*y/3) -> (x**y, 3), 2
exp(x/2) -> (exp(a), 2), 1
'''
if e is not None: # coming from c_powers or from below
if e.is_Integer:
return (b, S.One), e
elif e.is_Rational:
return (b, Integer(e.q)), Integer(e.p)
else:
c, m = e.as_coeff_Mul(rational=True)
if c is not S.One:
if m.is_integer:
return (b, Integer(c.q)), m*Integer(c.p)
return (b**m, Integer(c.q)), Integer(c.p)
else:
return (b**e, S.One), S.One
else:
return bkey(*b.as_base_exp())
def update(b):
'''Decide what to do with base, b. If its exponent is now an
integer multiple of the Rational denominator, then remove it
and put the factors of its base in the common_b dictionary or
update the existing bases if necessary. If it has been zeroed
out, simply remove the base.
'''
newe, r = divmod(common_b[b], b[1])
if not r:
common_b.pop(b)
if newe:
for m in Mul.make_args(b[0]**newe):
b, e = bkey(m)
if b not in common_b:
common_b[b] = 0
common_b[b] += e
if b[1] != 1:
bases.append(b)
# ---------------- end of helper functions
# assemble a dictionary of the factors having a Rational power
common_b = {}
done = []
bases = []
for b, e in c_powers:
b, e = bkey(b, e)
if b in common_b:
common_b[b] = common_b[b] + e
else:
common_b[b] = e
if b[1] != 1 and b[0].is_Mul:
bases.append(b)
bases.sort(key=default_sort_key) # this makes tie-breaking canonical
bases.sort(key=measure, reverse=True) # handle longest first
for base in bases:
if base not in common_b: # it may have been removed already
continue
b, exponent = base
last = False # True when no factor of base is a radical
qlcm = 1 # the lcm of the radical denominators
while True:
bstart = b
qstart = qlcm
bb = [] # list of factors
ee = [] # (factor's expo. and it's current value in common_b)
for bi in Mul.make_args(b):
bib, bie = bkey(bi)
if bib not in common_b or common_b[bib] < bie:
ee = bb = [] # failed
break
ee.append([bie, common_b[bib]])
bb.append(bib)
if ee:
# find the number of integral extractions possible
# e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1
min1 = ee[0][1]//ee[0][0]
for i in range(1, len(ee)):
rat = ee[i][1]//ee[i][0]
if rat < 1:
break
min1 = min(min1, rat)
else:
# update base factor counts
# e.g. if ee = [(2, 5), (3, 6)] then min1 = 2
# and the new base counts will be 5-2*2 and 6-2*3
for i in range(len(bb)):
common_b[bb[i]] -= min1*ee[i][0]
update(bb[i])
# update the count of the base
# e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y)
# will increase by 4 to give bkey (x*sqrt(y), 2, 5)
common_b[base] += min1*qstart*exponent
if (last # no more radicals in base
or len(common_b) == 1 # nothing left to join with
or all(k[1] == 1 for k in common_b) # no rad's in common_b
):
break
# see what we can exponentiate base by to remove any radicals
# so we know what to search for
# e.g. if base were x**(1/2)*y**(1/3) then we should
# exponentiate by 6 and look for powers of x and y in the ratio
# of 2 to 3
qlcm = lcm([ratq(bi) for bi in Mul.make_args(bstart)])
if qlcm == 1:
break # we are done
b = bstart**qlcm
qlcm *= qstart
if all(ratq(bi) == 1 for bi in Mul.make_args(b)):
last = True # we are going to be done after this next pass
# this base no longer can find anything to join with and
# since it was longer than any other we are done with it
b, q = base
done.append((b, common_b.pop(base)*Rational(1, q)))
# update c_powers and get ready to continue with powsimp
c_powers = done
# there may be terms still in common_b that were bases that were
# identified as needing processing, so remove those, too
for (b, q), e in common_b.items():
if (b.is_Pow or isinstance(b, exp)) and \
q is not S.One and not b.exp.is_Rational:
b, be = b.as_base_exp()
b = b**(be/q)
else:
b = root(b, q)
c_powers.append((b, e))
check = len(c_powers)
c_powers = dict(c_powers)
assert len(c_powers) == check # there should have been no duplicates
# ==============================================================
# rebuild the expression
newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()]))
if combine == 'exp':
return expr.func(newexpr, expr.func(*nc_part))
else:
return recurse(expr.func(*nc_part), combine='base') * \
recurse(newexpr, combine='base')
elif combine == 'base':
# Build c_powers and nc_part. These must both be lists not
# dicts because exp's are not combined.
c_powers = []
nc_part = []
for term in expr.args:
if term.is_commutative:
c_powers.append(list(term.as_base_exp()))
else:
nc_part.append(term)
# Pull out numerical coefficients from exponent if assumptions allow
# e.g., 2**(2*x) => 4**x
for i in range(len(c_powers)):
b, e = c_powers[i]
if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar):
continue
exp_c, exp_t = e.as_coeff_Mul(rational=True)
if exp_c is not S.One and exp_t is not S.One:
c_powers[i] = [Pow(b, exp_c), exp_t]
# Combine bases whenever they have the same exponent and
# assumptions allow
# first gather the potential bases under the common exponent
c_exp = defaultdict(list)
for b, e in c_powers:
if deep:
e = recurse(e)
c_exp[e].append(b)
del c_powers
# Merge back in the results of the above to form a new product
c_powers = defaultdict(list)
for e in c_exp:
bases = c_exp[e]
# calculate the new base for e
if len(bases) == 1:
new_base = bases[0]
elif e.is_integer or force:
new_base = expr.func(*bases)
else:
# see which ones can be joined
unk = []
nonneg = []
neg = []
for bi in bases:
if bi.is_negative:
neg.append(bi)
elif bi.is_nonnegative:
nonneg.append(bi)
elif bi.is_polar:
nonneg.append(
bi) # polar can be treated like non-negative
else:
unk.append(bi)
if len(unk) == 1 and not neg or len(neg) == 1 and not unk:
# a single neg or a single unk can join the rest
nonneg.extend(unk + neg)
unk = neg = []
elif neg:
# their negative signs cancel in groups of 2*q if we know
# that e = p/q else we have to treat them as unknown
israt = False
if e.is_Rational:
israt = True
else:
p, d = e.as_numer_denom()
if p.is_integer and d.is_integer:
israt = True
if israt:
neg = [-w for w in neg]
unk.extend([S.NegativeOne]*len(neg))
else:
unk.extend(neg)
neg = []
del israt
# these shouldn't be joined
for b in unk:
c_powers[b].append(e)
# here is a new joined base
new_base = expr.func(*(nonneg + neg))
# if there are positive parts they will just get separated
# again unless some change is made
def _terms(e):
# return the number of terms of this expression
# when multiplied out -- assuming no joining of terms
if e.is_Add:
return sum([_terms(ai) for ai in e.args])
if e.is_Mul:
return prod([_terms(mi) for mi in e.args])
return 1
xnew_base = expand_mul(new_base, deep=False)
if len(Add.make_args(xnew_base)) < _terms(new_base):
new_base = factor_terms(xnew_base)
c_powers[new_base].append(e)
# break out the powers from c_powers now
c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e]
# we're done
return expr.func(*(c_part + nc_part))
else:
raise ValueError("combine must be one of ('all', 'exp', 'base').")
def powdenest(eq, force=False, polar=False):
r"""
Collect exponents on powers as assumptions allow.
Explanation
===========
Given ``(bb**be)**e``, this can be simplified as follows:
* if ``bb`` is positive, or
* ``e`` is an integer, or
* ``|be| < 1`` then this simplifies to ``bb**(be*e)``
Given a product of powers raised to a power, ``(bb1**be1 *
bb2**be2...)**e``, simplification can be done as follows:
- if e is positive, the gcd of all bei can be joined with e;
- all non-negative bb can be separated from those that are negative
and their gcd can be joined with e; autosimplification already
handles this separation.
- integer factors from powers that have integers in the denominator
of the exponent can be removed from any term and the gcd of such
integers can be joined with e
Setting ``force`` to ``True`` will make symbols that are not explicitly
negative behave as though they are positive, resulting in more
denesting.
Setting ``polar`` to ``True`` will do simplifications on the Riemann surface of
the logarithm, also resulting in more denestings.
When there are sums of logs in exp() then a product of powers may be
obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``.
Examples
========
>>> from sympy.abc import a, b, x, y, z
>>> from sympy import Symbol, exp, log, sqrt, symbols, powdenest
>>> powdenest((x**(2*a/3))**(3*x))
(x**(2*a/3))**(3*x)
>>> powdenest(exp(3*x*log(2)))
2**(3*x)
Assumptions may prevent expansion:
>>> powdenest(sqrt(x**2))
sqrt(x**2)
>>> p = symbols('p', positive=True)
>>> powdenest(sqrt(p**2))
p
No other expansion is done.
>>> i, j = symbols('i,j', integer=True)
>>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j
x**(x*(i + j))
But exp() will be denested by moving all non-log terms outside of
the function; this may result in the collapsing of the exp to a power
with a different base:
>>> powdenest(exp(3*y*log(x)))
x**(3*y)
>>> powdenest(exp(y*(log(a) + log(b))))
(a*b)**y
>>> powdenest(exp(3*(log(a) + log(b))))
a**3*b**3
If assumptions allow, symbols can also be moved to the outermost exponent:
>>> i = Symbol('i', integer=True)
>>> powdenest(((x**(2*i))**(3*y))**x)
((x**(2*i))**(3*y))**x
>>> powdenest(((x**(2*i))**(3*y))**x, force=True)
x**(6*i*x*y)
>>> powdenest(((x**(2*a/3))**(3*y/i))**x)
((x**(2*a/3))**(3*y/i))**x
>>> powdenest((x**(2*i)*y**(4*i))**z, force=True)
(x*y**2)**(2*i*z)
>>> n = Symbol('n', negative=True)
>>> powdenest((x**i)**y, force=True)
x**(i*y)
>>> powdenest((n**i)**x, force=True)
(n**i)**x
"""
from sympy.simplify.simplify import posify
if force:
def _denest(b, e):
if not isinstance(b, (Pow, exp)):
return b.is_positive, Pow(b, e, evaluate=False)
return _denest(b.base, b.exp*e)
reps = []
for p in eq.atoms(Pow, exp):
if isinstance(p.base, (Pow, exp)):
ok, dp = _denest(*p.args)
if ok is not False:
reps.append((p, dp))
if reps:
eq = eq.subs(reps)
eq, reps = posify(eq)
return powdenest(eq, force=False, polar=polar).xreplace(reps)
if polar:
eq, rep = polarify(eq)
return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep)
new = powsimp(sympify(eq))
return new.xreplace(Transform(
_denest_pow, filter=lambda m: m.is_Pow or isinstance(m, exp)))
_y = Dummy('y')
def _denest_pow(eq):
"""
Denest powers.
This is a helper function for powdenest that performs the actual
transformation.
"""
from sympy.simplify.simplify import logcombine
b, e = eq.as_base_exp()
if b.is_Pow or isinstance(b.func, exp) and e != 1:
new = b._eval_power(e)
if new is not None:
eq = new
b, e = new.as_base_exp()
# denest exp with log terms in exponent
if b is S.Exp1 and e.is_Mul:
logs = []
other = []
for ei in e.args:
if any(isinstance(ai, log) for ai in Add.make_args(ei)):
logs.append(ei)
else:
other.append(ei)
logs = logcombine(Mul(*logs))
return Pow(exp(logs), Mul(*other))
_, be = b.as_base_exp()
if be is S.One and not (b.is_Mul or
b.is_Rational and b.q != 1 or
b.is_positive):
return eq
# denest eq which is either pos**e or Pow**e or Mul**e or
# Mul(b1**e1, b2**e2)
# handle polar numbers specially
polars, nonpolars = [], []
for bb in Mul.make_args(b):
if bb.is_polar:
polars.append(bb.as_base_exp())
else:
nonpolars.append(bb)
if len(polars) == 1 and not polars[0][0].is_Mul:
return Pow(polars[0][0], polars[0][1]*e)*powdenest(Mul(*nonpolars)**e)
elif polars:
return Mul(*[powdenest(bb**(ee*e)) for (bb, ee) in polars]) \
*powdenest(Mul(*nonpolars)**e)
if b.is_Integer:
# use log to see if there is a power here
logb = expand_log(log(b))
if logb.is_Mul:
c, logb = logb.args
e *= c
base = logb.args[0]
return Pow(base, e)
# if b is not a Mul or any factor is an atom then there is nothing to do
if not b.is_Mul or any(s.is_Atom for s in Mul.make_args(b)):
return eq
# let log handle the case of the base of the argument being a Mul, e.g.
# sqrt(x**(2*i)*y**(6*i)) -> x**i*y**(3**i) if x and y are positive; we
# will take the log, expand it, and then factor out the common powers that
# now appear as coefficient. We do this manually since terms_gcd pulls out
# fractions, terms_gcd(x+x*y/2) -> x*(y + 2)/2 and we don't want the 1/2;
# gcd won't pull out numerators from a fraction: gcd(3*x, 9*x/2) -> x but
# we want 3*x. Neither work with noncommutatives.
def nc_gcd(aa, bb):
a, b = [i.as_coeff_Mul() for i in [aa, bb]]
c = gcd(a[0], b[0]).as_numer_denom()[0]
g = Mul(*(a[1].args_cnc(cset=True)[0] & b[1].args_cnc(cset=True)[0]))
return _keep_coeff(c, g)
glogb = expand_log(log(b))
if glogb.is_Add:
args = glogb.args
g = reduce(nc_gcd, args)
if g != 1:
cg, rg = g.as_coeff_Mul()
glogb = _keep_coeff(cg, rg*Add(*[a/g for a in args]))
# now put the log back together again
if isinstance(glogb, log) or not glogb.is_Mul:
if glogb.args[0].is_Pow or isinstance(glogb.args[0], exp):
glogb = _denest_pow(glogb.args[0])
if (abs(glogb.exp) < 1) == True:
return Pow(glogb.base, glogb.exp*e)
return eq
# the log(b) was a Mul so join any adds with logcombine
add = []
other = []
for a in glogb.args:
if a.is_Add:
add.append(a)
else:
other.append(a)
return Pow(exp(logcombine(Mul(*add))), e*Mul(*other))
|
8aa3b3f324858dc331f231c40ee35cda87be6d60ca6cb0f1e1284ce82d15c6f6 | from sympy.core import Function, S, Mul, Pow, Add
from sympy.core.sorting import ordered, default_sort_key
from sympy.core.function import count_ops, expand_func
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions import gamma, sqrt, sin
from sympy.polys import factor, cancel
from sympy.utilities.iterables import sift, uniq
def gammasimp(expr):
r"""
Simplify expressions with gamma functions.
Explanation
===========
This function takes as input an expression containing gamma
functions or functions that can be rewritten in terms of gamma
functions and tries to minimize the number of those functions and
reduce the size of their arguments.
The algorithm works by rewriting all gamma functions as expressions
involving rising factorials (Pochhammer symbols) and applies
recurrence relations and other transformations applicable to rising
factorials, to reduce their arguments, possibly letting the resulting
rising factorial to cancel. Rising factorials with the second argument
being an integer are expanded into polynomial forms and finally all
other rising factorial are rewritten in terms of gamma functions.
Then the following two steps are performed.
1. Reduce the number of gammas by applying the reflection theorem
gamma(x)*gamma(1-x) == pi/sin(pi*x).
2. Reduce the number of gammas by applying the multiplication theorem
gamma(x)*gamma(x+1/n)*...*gamma(x+(n-1)/n) == C*gamma(n*x).
It then reduces the number of prefactors by absorbing them into gammas
where possible and expands gammas with rational argument.
All transformation rules can be found (or was derived from) here:
.. [1] http://functions.wolfram.com/GammaBetaErf/Pochhammer/17/01/02/
.. [2] http://functions.wolfram.com/GammaBetaErf/Pochhammer/27/01/0005/
Examples
========
>>> from sympy.simplify import gammasimp
>>> from sympy import gamma, Symbol
>>> from sympy.abc import x
>>> n = Symbol('n', integer = True)
>>> gammasimp(gamma(x)/gamma(x - 3))
(x - 3)*(x - 2)*(x - 1)
>>> gammasimp(gamma(n + 3))
gamma(n + 3)
"""
expr = expr.rewrite(gamma)
return _gammasimp(expr, as_comb = False)
def _gammasimp(expr, as_comb):
"""
Helper function for gammasimp and combsimp.
Explanation
===========
Simplifies expressions written in terms of gamma function. If
as_comb is True, it tries to preserve integer arguments. See
docstring of gammasimp for more information. This was part of
combsimp() in combsimp.py.
"""
expr = expr.replace(gamma,
lambda n: _rf(1, (n - 1).expand()))
if as_comb:
expr = expr.replace(_rf,
lambda a, b: gamma(b + 1))
else:
expr = expr.replace(_rf,
lambda a, b: gamma(a + b)/gamma(a))
def rule(n, k):
coeff, rewrite = S.One, False
cn, _n = n.as_coeff_Add()
if _n and cn.is_Integer and cn:
coeff *= _rf(_n + 1, cn)/_rf(_n - k + 1, cn)
rewrite = True
n = _n
# this sort of binomial has already been removed by
# rising factorials but is left here in case the order
# of rule application is changed
if k.is_Add:
ck, _k = k.as_coeff_Add()
if _k and ck.is_Integer and ck:
coeff *= _rf(n - ck - _k + 1, ck)/_rf(_k + 1, ck)
rewrite = True
k = _k
if count_ops(k) > count_ops(n - k):
rewrite = True
k = n - k
if rewrite:
return coeff*binomial(n, k)
expr = expr.replace(binomial, rule)
def rule_gamma(expr, level=0):
""" Simplify products of gamma functions further. """
if expr.is_Atom:
return expr
def gamma_rat(x):
# helper to simplify ratios of gammas
was = x.count(gamma)
xx = x.replace(gamma, lambda n: _rf(1, (n - 1).expand()
).replace(_rf, lambda a, b: gamma(a + b)/gamma(a)))
if xx.count(gamma) < was:
x = xx
return x
def gamma_factor(x):
# return True if there is a gamma factor in shallow args
if isinstance(x, gamma):
return True
if x.is_Add or x.is_Mul:
return any(gamma_factor(xi) for xi in x.args)
if x.is_Pow and (x.exp.is_integer or x.base.is_positive):
return gamma_factor(x.base)
return False
# recursion step
if level == 0:
expr = expr.func(*[rule_gamma(x, level + 1) for x in expr.args])
level += 1
if not expr.is_Mul:
return expr
# non-commutative step
if level == 1:
args, nc = expr.args_cnc()
if not args:
return expr
if nc:
return rule_gamma(Mul._from_args(args), level + 1)*Mul._from_args(nc)
level += 1
# pure gamma handling, not factor absorption
if level == 2:
T, F = sift(expr.args, gamma_factor, binary=True)
gamma_ind = Mul(*F)
d = Mul(*T)
nd, dd = d.as_numer_denom()
for ipass in range(2):
args = list(ordered(Mul.make_args(nd)))
for i, ni in enumerate(args):
if ni.is_Add:
ni, dd = Add(*[
rule_gamma(gamma_rat(a/dd), level + 1) for a in ni.args]
).as_numer_denom()
args[i] = ni
if not dd.has(gamma):
break
nd = Mul(*args)
if ipass == 0 and not gamma_factor(nd):
break
nd, dd = dd, nd # now process in reversed order
expr = gamma_ind*nd/dd
if not (expr.is_Mul and (gamma_factor(dd) or gamma_factor(nd))):
return expr
level += 1
# iteration until constant
if level == 3:
while True:
was = expr
expr = rule_gamma(expr, 4)
if expr == was:
return expr
numer_gammas = []
denom_gammas = []
numer_others = []
denom_others = []
def explicate(p):
if p is S.One:
return None, []
b, e = p.as_base_exp()
if e.is_Integer:
if isinstance(b, gamma):
return True, [b.args[0]]*e
else:
return False, [b]*e
else:
return False, [p]
newargs = list(ordered(expr.args))
while newargs:
n, d = newargs.pop().as_numer_denom()
isg, l = explicate(n)
if isg:
numer_gammas.extend(l)
elif isg is False:
numer_others.extend(l)
isg, l = explicate(d)
if isg:
denom_gammas.extend(l)
elif isg is False:
denom_others.extend(l)
# =========== level 2 work: pure gamma manipulation =========
if not as_comb:
# Try to reduce the number of gamma factors by applying the
# reflection formula gamma(x)*gamma(1-x) = pi/sin(pi*x)
for gammas, numer, denom in [(
numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
new = []
while gammas:
g1 = gammas.pop()
if g1.is_integer:
new.append(g1)
continue
for i, g2 in enumerate(gammas):
n = g1 + g2 - 1
if not n.is_Integer:
continue
numer.append(S.Pi)
denom.append(sin(S.Pi*g1))
gammas.pop(i)
if n > 0:
for k in range(n):
numer.append(1 - g1 + k)
elif n < 0:
for k in range(-n):
denom.append(-g1 - k)
break
else:
new.append(g1)
# /!\ updating IN PLACE
gammas[:] = new
# Try to reduce the number of gammas by using the duplication
# theorem to cancel an upper and lower: gamma(2*s)/gamma(s) =
# 2**(2*s + 1)/(4*sqrt(pi))*gamma(s + 1/2). Although this could
# be done with higher argument ratios like gamma(3*x)/gamma(x),
# this would not reduce the number of gammas as in this case.
for ng, dg, no, do in [(numer_gammas, denom_gammas, numer_others,
denom_others),
(denom_gammas, numer_gammas, denom_others,
numer_others)]:
while True:
for x in ng:
for y in dg:
n = x - 2*y
if n.is_Integer:
break
else:
continue
break
else:
break
ng.remove(x)
dg.remove(y)
if n > 0:
for k in range(n):
no.append(2*y + k)
elif n < 0:
for k in range(-n):
do.append(2*y - 1 - k)
ng.append(y + S.Half)
no.append(2**(2*y - 1))
do.append(sqrt(S.Pi))
# Try to reduce the number of gamma factors by applying the
# multiplication theorem (used when n gammas with args differing
# by 1/n mod 1 are encountered).
#
# run of 2 with args differing by 1/2
#
# >>> gammasimp(gamma(x)*gamma(x+S.Half))
# 2*sqrt(2)*2**(-2*x - 1/2)*sqrt(pi)*gamma(2*x)
#
# run of 3 args differing by 1/3 (mod 1)
#
# >>> gammasimp(gamma(x)*gamma(x+S(1)/3)*gamma(x+S(2)/3))
# 6*3**(-3*x - 1/2)*pi*gamma(3*x)
# >>> gammasimp(gamma(x)*gamma(x+S(1)/3)*gamma(x+S(5)/3))
# 2*3**(-3*x - 1/2)*pi*(3*x + 2)*gamma(3*x)
#
def _run(coeffs):
# find runs in coeffs such that the difference in terms (mod 1)
# of t1, t2, ..., tn is 1/n
u = list(uniq(coeffs))
for i in range(len(u)):
dj = ([((u[j] - u[i]) % 1, j) for j in range(i + 1, len(u))])
for one, j in dj:
if one.p == 1 and one.q != 1:
n = one.q
got = [i]
get = list(range(1, n))
for d, j in dj:
m = n*d
if m.is_Integer and m in get:
get.remove(m)
got.append(j)
if not get:
break
else:
continue
for i, j in enumerate(got):
c = u[j]
coeffs.remove(c)
got[i] = c
return one.q, got[0], got[1:]
def _mult_thm(gammas, numer, denom):
# pull off and analyze the leading coefficient from each gamma arg
# looking for runs in those Rationals
# expr -> coeff + resid -> rats[resid] = coeff
rats = {}
for g in gammas:
c, resid = g.as_coeff_Add()
rats.setdefault(resid, []).append(c)
# look for runs in Rationals for each resid
keys = sorted(rats, key=default_sort_key)
for resid in keys:
coeffs = list(sorted(rats[resid]))
new = []
while True:
run = _run(coeffs)
if run is None:
break
# process the sequence that was found:
# 1) convert all the gamma functions to have the right
# argument (could be off by an integer)
# 2) append the factors corresponding to the theorem
# 3) append the new gamma function
n, ui, other = run
# (1)
for u in other:
con = resid + u - 1
for k in range(int(u - ui)):
numer.append(con - k)
con = n*(resid + ui) # for (2) and (3)
# (2)
numer.append((2*S.Pi)**(S(n - 1)/2)*
n**(S.Half - con))
# (3)
new.append(con)
# restore resid to coeffs
rats[resid] = [resid + c for c in coeffs] + new
# rebuild the gamma arguments
g = []
for resid in keys:
g += rats[resid]
# /!\ updating IN PLACE
gammas[:] = g
for l, numer, denom in [(numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
_mult_thm(l, numer, denom)
# =========== level >= 2 work: factor absorption =========
if level >= 2:
# Try to absorb factors into the gammas: x*gamma(x) -> gamma(x + 1)
# and gamma(x)/(x - 1) -> gamma(x - 1)
# This code (in particular repeated calls to find_fuzzy) can be very
# slow.
def find_fuzzy(l, x):
if not l:
return
S1, T1 = compute_ST(x)
for y in l:
S2, T2 = inv[y]
if T1 != T2 or (not S1.intersection(S2) and
(S1 != set() or S2 != set())):
continue
# XXX we want some simplification (e.g. cancel or
# simplify) but no matter what it's slow.
a = len(cancel(x/y).free_symbols)
b = len(x.free_symbols)
c = len(y.free_symbols)
# TODO is there a better heuristic?
if a == 0 and (b > 0 or c > 0):
return y
# We thus try to avoid expensive calls by building the following
# "invariants": For every factor or gamma function argument
# - the set of free symbols S
# - the set of functional components T
# We will only try to absorb if T1==T2 and (S1 intersect S2 != emptyset
# or S1 == S2 == emptyset)
inv = {}
def compute_ST(expr):
if expr in inv:
return inv[expr]
return (expr.free_symbols, expr.atoms(Function).union(
{e.exp for e in expr.atoms(Pow)}))
def update_ST(expr):
inv[expr] = compute_ST(expr)
for expr in numer_gammas + denom_gammas + numer_others + denom_others:
update_ST(expr)
for gammas, numer, denom in [(
numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
new = []
while gammas:
g = gammas.pop()
cont = True
while cont:
cont = False
y = find_fuzzy(numer, g)
if y is not None:
numer.remove(y)
if y != g:
numer.append(y/g)
update_ST(y/g)
g += 1
cont = True
y = find_fuzzy(denom, g - 1)
if y is not None:
denom.remove(y)
if y != g - 1:
numer.append((g - 1)/y)
update_ST((g - 1)/y)
g -= 1
cont = True
new.append(g)
# /!\ updating IN PLACE
gammas[:] = new
# =========== rebuild expr ==================================
return Mul(*[gamma(g) for g in numer_gammas]) \
/ Mul(*[gamma(g) for g in denom_gammas]) \
* Mul(*numer_others) / Mul(*denom_others)
# (for some reason we cannot use Basic.replace in this case)
was = factor(expr)
expr = rule_gamma(was)
if expr != was:
expr = factor(expr)
expr = expr.replace(gamma,
lambda n: expand_func(gamma(n)) if n.is_Rational else gamma(n))
return expr
class _rf(Function):
@classmethod
def eval(cls, a, b):
if b.is_Integer:
if not b:
return S.One
n, result = int(b), S.One
if n > 0:
for i in range(n):
result *= a + i
return result
elif n < 0:
for i in range(1, -n + 1):
result *= a - i
return 1/result
else:
if b.is_Add:
c, _b = b.as_coeff_Add()
if c.is_Integer:
if c > 0:
return _rf(a, _b)*_rf(a + _b, c)
elif c < 0:
return _rf(a, _b)/_rf(a + _b + c, -c)
if a.is_Add:
c, _a = a.as_coeff_Add()
if c.is_Integer:
if c > 0:
return _rf(_a, b)*_rf(_a + b, c)/_rf(_a, c)
elif c < 0:
return _rf(_a, b)*_rf(_a + c, -c)/_rf(_a + b + c, -c)
|
b221d07fc3a8c1cde02b3553e570ff348483a20fde3f43ce1d7bf9f949fee009 | from collections import defaultdict
from functools import reduce
from sympy.core import (sympify, Basic, S, Expr, factor_terms,
Mul, Add, bottom_up)
from sympy.core.cache import cacheit
from sympy.core.function import (count_ops, _mexpand, FunctionClass, expand,
expand_mul, Derivative)
from sympy.core.numbers import I, Integer, igcd
from sympy.core.sorting import _nodes
from sympy.core.symbol import Dummy, symbols, Wild
from sympy.external.gmpy import SYMPY_INTS
from sympy.functions import sin, cos, exp, cosh, tanh, sinh, tan, cot, coth
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.polys import Poly, factor, cancel, parallel_poly_from_expr
from sympy.polys.domains import ZZ
from sympy.polys.polyerrors import PolificationFailed
from sympy.polys.polytools import groebner
from sympy.simplify.cse_main import cse
from sympy.strategies.core import identity
from sympy.strategies.tree import greedy
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import debug
def trigsimp_groebner(expr, hints=[], quick=False, order="grlex",
polynomial=False):
"""
Simplify trigonometric expressions using a groebner basis algorithm.
Explanation
===========
This routine takes a fraction involving trigonometric or hyperbolic
expressions, and tries to simplify it. The primary metric is the
total degree. Some attempts are made to choose the simplest possible
expression of the minimal degree, but this is non-rigorous, and also
very slow (see the ``quick=True`` option).
If ``polynomial`` is set to True, instead of simplifying numerator and
denominator together, this function just brings numerator and denominator
into a canonical form. This is much faster, but has potentially worse
results. However, if the input is a polynomial, then the result is
guaranteed to be an equivalent polynomial of minimal degree.
The most important option is hints. Its entries can be any of the
following:
- a natural number
- a function
- an iterable of the form (func, var1, var2, ...)
- anything else, interpreted as a generator
A number is used to indicate that the search space should be increased.
A function is used to indicate that said function is likely to occur in a
simplified expression.
An iterable is used indicate that func(var1 + var2 + ...) is likely to
occur in a simplified .
An additional generator also indicates that it is likely to occur.
(See examples below).
This routine carries out various computationally intensive algorithms.
The option ``quick=True`` can be used to suppress one particularly slow
step (at the expense of potentially more complicated results, but never at
the expense of increased total degree).
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, tan, cos, sinh, cosh, tanh
>>> from sympy.simplify.trigsimp import trigsimp_groebner
Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens:
>>> ex = sin(x)*cos(x)
>>> trigsimp_groebner(ex)
sin(x)*cos(x)
This is because ``trigsimp_groebner`` only looks for a simplification
involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try
``2*x`` by passing ``hints=[2]``:
>>> trigsimp_groebner(ex, hints=[2])
sin(2*x)/2
>>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2])
-cos(2*x)
Increasing the search space this way can quickly become expensive. A much
faster way is to give a specific expression that is likely to occur:
>>> trigsimp_groebner(ex, hints=[sin(2*x)])
sin(2*x)/2
Hyperbolic expressions are similarly supported:
>>> trigsimp_groebner(sinh(2*x)/sinh(x))
2*cosh(x)
Note how no hints had to be passed, since the expression already involved
``2*x``.
The tangent function is also supported. You can either pass ``tan`` in the
hints, to indicate that tan should be tried whenever cosine or sine are,
or you can pass a specific generator:
>>> trigsimp_groebner(sin(x)/cos(x), hints=[tan])
tan(x)
>>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)])
tanh(x)
Finally, you can use the iterable form to suggest that angle sum formulae
should be tried:
>>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y))
>>> trigsimp_groebner(ex, hints=[(tan, x, y)])
tan(x + y)
"""
# TODO
# - preprocess by replacing everything by funcs we can handle
# - optionally use cot instead of tan
# - more intelligent hinting.
# For example, if the ideal is small, and we have sin(x), sin(y),
# add sin(x + y) automatically... ?
# - algebraic numbers ...
# - expressions of lowest degree are not distinguished properly
# e.g. 1 - sin(x)**2
# - we could try to order the generators intelligently, so as to influence
# which monomials appear in the quotient basis
# THEORY
# ------
# Ratsimpmodprime above can be used to "simplify" a rational function
# modulo a prime ideal. "Simplify" mainly means finding an equivalent
# expression of lower total degree.
#
# We intend to use this to simplify trigonometric functions. To do that,
# we need to decide (a) which ring to use, and (b) modulo which ideal to
# simplify. In practice, (a) means settling on a list of "generators"
# a, b, c, ..., such that the fraction we want to simplify is a rational
# function in a, b, c, ..., with coefficients in ZZ (integers).
# (2) means that we have to decide what relations to impose on the
# generators. There are two practical problems:
# (1) The ideal has to be *prime* (a technical term).
# (2) The relations have to be polynomials in the generators.
#
# We typically have two kinds of generators:
# - trigonometric expressions, like sin(x), cos(5*x), etc
# - "everything else", like gamma(x), pi, etc.
#
# Since this function is trigsimp, we will concentrate on what to do with
# trigonometric expressions. We can also simplify hyperbolic expressions,
# but the extensions should be clear.
#
# One crucial point is that all *other* generators really should behave
# like indeterminates. In particular if (say) "I" is one of them, then
# in fact I**2 + 1 = 0 and we may and will compute non-sensical
# expressions. However, we can work with a dummy and add the relation
# I**2 + 1 = 0 to our ideal, then substitute back in the end.
#
# Now regarding trigonometric generators. We split them into groups,
# according to the argument of the trigonometric functions. We want to
# organise this in such a way that most trigonometric identities apply in
# the same group. For example, given sin(x), cos(2*x) and cos(y), we would
# group as [sin(x), cos(2*x)] and [cos(y)].
#
# Our prime ideal will be built in three steps:
# (1) For each group, compute a "geometrically prime" ideal of relations.
# Geometrically prime means that it generates a prime ideal in
# CC[gens], not just ZZ[gens].
# (2) Take the union of all the generators of the ideals for all groups.
# By the geometric primality condition, this is still prime.
# (3) Add further inter-group relations which preserve primality.
#
# Step (1) works as follows. We will isolate common factors in the
# argument, so that all our generators are of the form sin(n*x), cos(n*x)
# or tan(n*x), with n an integer. Suppose first there are no tan terms.
# The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since
# X**2 + Y**2 - 1 is irreducible over CC.
# Now, if we have a generator sin(n*x), than we can, using trig identities,
# express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this
# relation to the ideal, preserving geometric primality, since the quotient
# ring is unchanged.
# Thus we have treated all sin and cos terms.
# For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0.
# (This requires of course that we already have relations for cos(n*x) and
# sin(n*x).) It is not obvious, but it seems that this preserves geometric
# primality.
# XXX A real proof would be nice. HELP!
# Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of
# CC[S, C, T]:
# - it suffices to show that the projective closure in CP**3 is
# irreducible
# - using the half-angle substitutions, we can express sin(x), tan(x),
# cos(x) as rational functions in tan(x/2)
# - from this, we get a rational map from CP**1 to our curve
# - this is a morphism, hence the curve is prime
#
# Step (2) is trivial.
#
# Step (3) works by adding selected relations of the form
# sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is
# preserved by the same argument as before.
def parse_hints(hints):
"""Split hints into (n, funcs, iterables, gens)."""
n = 1
funcs, iterables, gens = [], [], []
for e in hints:
if isinstance(e, (SYMPY_INTS, Integer)):
n = e
elif isinstance(e, FunctionClass):
funcs.append(e)
elif iterable(e):
iterables.append((e[0], e[1:]))
# XXX sin(x+2y)?
# Note: we go through polys so e.g.
# sin(-x) -> -sin(x) -> sin(x)
gens.extend(parallel_poly_from_expr(
[e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens)
else:
gens.append(e)
return n, funcs, iterables, gens
def build_ideal(x, terms):
"""
Build generators for our ideal. ``Terms`` is an iterable with elements of
the form (fn, coeff), indicating that we have a generator fn(coeff*x).
If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed
to appear in terms. Similarly for hyperbolic functions. For tan(n*x),
sin(n*x) and cos(n*x) are guaranteed.
"""
I = []
y = Dummy('y')
for fn, coeff in terms:
for c, s, t, rel in (
[cos, sin, tan, cos(x)**2 + sin(x)**2 - 1],
[cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]):
if coeff == 1 and fn in [c, s]:
I.append(rel)
elif fn == t:
I.append(t(coeff*x)*c(coeff*x) - s(coeff*x))
elif fn in [c, s]:
cn = fn(coeff*y).expand(trig=True).subs(y, x)
I.append(fn(coeff*x) - cn)
return list(set(I))
def analyse_gens(gens, hints):
"""
Analyse the generators ``gens``, using the hints ``hints``.
The meaning of ``hints`` is described in the main docstring.
Return a new list of generators, and also the ideal we should
work with.
"""
# First parse the hints
n, funcs, iterables, extragens = parse_hints(hints)
debug('n=%s' % n, 'funcs:', funcs, 'iterables:',
iterables, 'extragens:', extragens)
# We just add the extragens to gens and analyse them as before
gens = list(gens)
gens.extend(extragens)
# remove duplicates
funcs = list(set(funcs))
iterables = list(set(iterables))
gens = list(set(gens))
# all the functions we can do anything with
allfuncs = {sin, cos, tan, sinh, cosh, tanh}
# sin(3*x) -> ((3, x), sin)
trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens
if g.func in allfuncs]
# Our list of new generators - start with anything that we cannot
# work with (i.e. is not a trigonometric term)
freegens = [g for g in gens if g.func not in allfuncs]
newgens = []
trigdict = {}
for (coeff, var), fn in trigterms:
trigdict.setdefault(var, []).append((coeff, fn))
res = [] # the ideal
for key, val in trigdict.items():
# We have now assembeled a dictionary. Its keys are common
# arguments in trigonometric expressions, and values are lists of
# pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we
# need to deal with fn(coeff*x0). We take the rational gcd of the
# coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol",
# all other arguments are integral multiples thereof.
# We will build an ideal which works with sin(x), cos(x).
# If hint tan is provided, also work with tan(x). Moreover, if
# n > 1, also work with sin(k*x) for k <= n, and similarly for cos
# (and tan if the hint is provided). Finally, any generators which
# the ideal does not work with but we need to accommodate (either
# because it was in expr or because it was provided as a hint)
# we also build into the ideal.
# This selection process is expressed in the list ``terms``.
# build_ideal then generates the actual relations in our ideal,
# from this list.
fns = [x[1] for x in val]
val = [x[0] for x in val]
gcd = reduce(igcd, val)
terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)]
fs = set(funcs + fns)
for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]):
if any(x in fs for x in (c, s, t)):
fs.add(c)
fs.add(s)
for fn in fs:
for k in range(1, n + 1):
terms.append((fn, k))
extra = []
for fn, v in terms:
if fn == tan:
extra.append((sin, v))
extra.append((cos, v))
if fn in [sin, cos] and tan in fs:
extra.append((tan, v))
if fn == tanh:
extra.append((sinh, v))
extra.append((cosh, v))
if fn in [sinh, cosh] and tanh in fs:
extra.append((tanh, v))
terms.extend(extra)
x = gcd*Mul(*key)
r = build_ideal(x, terms)
res.extend(r)
newgens.extend({fn(v*x) for fn, v in terms})
# Add generators for compound expressions from iterables
for fn, args in iterables:
if fn == tan:
# Tan expressions are recovered from sin and cos.
iterables.extend([(sin, args), (cos, args)])
elif fn == tanh:
# Tanh expressions are recovered from sihn and cosh.
iterables.extend([(sinh, args), (cosh, args)])
else:
dummys = symbols('d:%i' % len(args), cls=Dummy)
expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args)))
res.append(fn(Add(*args)) - expr)
if myI in gens:
res.append(myI**2 + 1)
freegens.remove(myI)
newgens.append(myI)
return res, freegens, newgens
myI = Dummy('I')
expr = expr.subs(S.ImaginaryUnit, myI)
subs = [(myI, S.ImaginaryUnit)]
num, denom = cancel(expr).as_numer_denom()
try:
(pnum, pdenom), opt = parallel_poly_from_expr([num, denom])
except PolificationFailed:
return expr
debug('initial gens:', opt.gens)
ideal, freegens, gens = analyse_gens(opt.gens, hints)
debug('ideal:', ideal)
debug('new gens:', gens, " -- len", len(gens))
debug('free gens:', freegens, " -- len", len(gens))
# NOTE we force the domain to be ZZ to stop polys from injecting generators
# (which is usually a sign of a bug in the way we build the ideal)
if not gens:
return expr
G = groebner(ideal, order=order, gens=gens, domain=ZZ)
debug('groebner basis:', list(G), " -- len", len(G))
# If our fraction is a polynomial in the free generators, simplify all
# coefficients separately:
from sympy.simplify.ratsimp import ratsimpmodprime
if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)):
num = Poly(num, gens=gens+freegens).eject(*gens)
res = []
for monom, coeff in num.terms():
ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens)
# We compute the transitive closure of all generators that can
# be reached from our generators through relations in the ideal.
changed = True
while changed:
changed = False
for p in ideal:
p = Poly(p)
if not ourgens.issuperset(p.gens) and \
not p.has_only_gens(*set(p.gens).difference(ourgens)):
changed = True
ourgens.update(p.exclude().gens)
# NOTE preserve order!
realgens = [x for x in gens if x in ourgens]
# The generators of the ideal have now been (implicitly) split
# into two groups: those involving ourgens and those that don't.
# Since we took the transitive closure above, these two groups
# live in subgrings generated by a *disjoint* set of variables.
# Any sensible groebner basis algorithm will preserve this disjoint
# structure (i.e. the elements of the groebner basis can be split
# similarly), and and the two subsets of the groebner basis then
# form groebner bases by themselves. (For the smaller generating
# sets, of course.)
ourG = [g.as_expr() for g in G.polys if
g.has_only_gens(*ourgens.intersection(g.gens))]
res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, ourG, order=order,
gens=realgens, quick=quick, domain=ZZ,
polynomial=polynomial).subs(subs))
return Add(*res)
# NOTE The following is simpler and has less assumptions on the
# groebner basis algorithm. If the above turns out to be broken,
# use this.
return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, list(G), order=order,
gens=gens, quick=quick, domain=ZZ)
for monom, coeff in num.terms()])
else:
return ratsimpmodprime(
expr, list(G), order=order, gens=freegens+gens,
quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)
_trigs = (TrigonometricFunction, HyperbolicFunction)
def trigsimp(expr, **opts):
"""
reduces expression by using known trig identities
Explanation
===========
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', and 'fu'. If 'matching', simplify the
expression recursively by targeting common patterns. If 'groebner', apply
an experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring).
Examples
========
>>> from sympy import trigsimp, sin, cos, log
>>> from sympy.abc import x
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e)
2
Simplification occurs wherever trigonometric functions are located.
>>> trigsimp(log(e))
log(2)
Using `method="groebner"` (or `"combined"`) might lead to greater
simplification.
The old trigsimp routine can be accessed as with method 'old'.
>>> from sympy import coth, tanh
>>> t = 3*tanh(x)**7 - 2/coth(x)**7
>>> trigsimp(t, method='old') == t
True
>>> trigsimp(t)
tanh(x)**7
"""
from sympy.simplify.fu import fu
expr = sympify(expr)
_eval_trigsimp = getattr(expr, '_eval_trigsimp', None)
if _eval_trigsimp is not None:
return _eval_trigsimp(**opts)
old = opts.pop('old', False)
if not old:
opts.pop('deep', None)
opts.pop('recursive', None)
method = opts.pop('method', 'matching')
else:
method = 'old'
def groebnersimp(ex, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
new = traverse(ex)
if not isinstance(new, Expr):
return new
return trigsimp_groebner(new, **opts)
trigsimpfunc = {
'fu': (lambda x: fu(x, **opts)),
'matching': (lambda x: futrig(x)),
'groebner': (lambda x: groebnersimp(x, **opts)),
'combined': (lambda x: futrig(groebnersimp(x,
polynomial=True, hints=[2, tan]))),
'old': lambda x: trigsimp_old(x, **opts),
}[method]
return trigsimpfunc(expr)
def exptrigsimp(expr):
"""
Simplifies exponential / trigonometric / hyperbolic functions.
Examples
========
>>> from sympy import exptrigsimp, exp, cosh, sinh
>>> from sympy.abc import z
>>> exptrigsimp(exp(z) + exp(-z))
2*cosh(z)
>>> exptrigsimp(cosh(z) - sinh(z))
exp(-z)
"""
from sympy.simplify.fu import hyper_as_trig, TR2i
def exp_trig(e):
# select the better of e, and e rewritten in terms of exp or trig
# functions
choices = [e]
if e.has(*_trigs):
choices.append(e.rewrite(exp))
choices.append(e.rewrite(cos))
return min(*choices, key=count_ops)
newexpr = bottom_up(expr, exp_trig)
def f(rv):
if not rv.is_Mul:
return rv
commutative_part, noncommutative_part = rv.args_cnc()
# Since as_powers_dict loses order information,
# if there is more than one noncommutative factor,
# it should only be used to simplify the commutative part.
if (len(noncommutative_part) > 1):
return f(Mul(*commutative_part))*Mul(*noncommutative_part)
rvd = rv.as_powers_dict()
newd = rvd.copy()
def signlog(expr, sign=S.One):
if expr is S.Exp1:
return sign, S.One
elif isinstance(expr, exp) or (expr.is_Pow and expr.base == S.Exp1):
return sign, expr.exp
elif sign is S.One:
return signlog(-expr, sign=-S.One)
else:
return None, None
ee = rvd[S.Exp1]
for k in rvd:
if k.is_Add and len(k.args) == 2:
# k == c*(1 + sign*E**x)
c = k.args[0]
sign, x = signlog(k.args[1]/c)
if not x:
continue
m = rvd[k]
newd[k] -= m
if ee == -x*m/2:
# sinh and cosh
newd[S.Exp1] -= ee
ee = 0
if sign == 1:
newd[2*c*cosh(x/2)] += m
else:
newd[-2*c*sinh(x/2)] += m
elif newd[1 - sign*S.Exp1**x] == -m:
# tanh
del newd[1 - sign*S.Exp1**x]
if sign == 1:
newd[-c/tanh(x/2)] += m
else:
newd[-c*tanh(x/2)] += m
else:
newd[1 + sign*S.Exp1**x] += m
newd[c] += m
return Mul(*[k**newd[k] for k in newd])
newexpr = bottom_up(newexpr, f)
# sin/cos and sinh/cosh ratios to tan and tanh, respectively
if newexpr.has(HyperbolicFunction):
e, f = hyper_as_trig(newexpr)
newexpr = f(TR2i(e))
if newexpr.has(TrigonometricFunction):
newexpr = TR2i(newexpr)
# can we ever generate an I where there was none previously?
if not (newexpr.has(I) and not expr.has(I)):
expr = newexpr
return expr
#-------------------- the old trigsimp routines ---------------------
def trigsimp_old(expr, *, first=True, **opts):
"""
Reduces expression by using known trig identities.
Notes
=====
deep:
- Apply trigsimp inside all objects with arguments
recursive:
- Use common subexpression elimination (cse()) and apply
trigsimp recursively (this is quite expensive if the
expression is large)
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', 'fu' and 'futrig'. If 'matching', simplify the
expression recursively by pattern matching. If 'groebner', apply an
experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring) while `futrig` runs a subset of Fu-transforms
that mimic the behavior of `trigsimp`.
compare:
- show input and output from `trigsimp` and `futrig` when different,
but returns the `trigsimp` value.
Examples
========
>>> from sympy import trigsimp, sin, cos, log, cot
>>> from sympy.abc import x
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e, old=True)
2
>>> trigsimp(log(e), old=True)
log(2*sin(x)**2 + 2*cos(x)**2)
>>> trigsimp(log(e), deep=True, old=True)
log(2)
Using `method="groebner"` (or `"combined"`) can sometimes lead to a lot
more simplification:
>>> e = (-sin(x) + 1)/cos(x) + cos(x)/(-sin(x) + 1)
>>> trigsimp(e, old=True)
(1 - sin(x))/cos(x) + cos(x)/(1 - sin(x))
>>> trigsimp(e, method="groebner", old=True)
2/cos(x)
>>> trigsimp(1/cot(x)**2, compare=True, old=True)
futrig: tan(x)**2
cot(x)**(-2)
"""
old = expr
if first:
if not expr.has(*_trigs):
return expr
trigsyms = set().union(*[t.free_symbols for t in expr.atoms(*_trigs)])
if len(trigsyms) > 1:
from sympy.simplify.simplify import separatevars
d = separatevars(expr)
if d.is_Mul:
d = separatevars(d, dict=True) or d
if isinstance(d, dict):
expr = 1
for k, v in d.items():
# remove hollow factoring
was = v
v = expand_mul(v)
opts['first'] = False
vnew = trigsimp(v, **opts)
if vnew == v:
vnew = was
expr *= vnew
old = expr
else:
if d.is_Add:
for s in trigsyms:
r, e = expr.as_independent(s)
if r:
opts['first'] = False
expr = r + trigsimp(e, **opts)
if not expr.is_Add:
break
old = expr
recursive = opts.pop('recursive', False)
deep = opts.pop('deep', False)
method = opts.pop('method', 'matching')
def groebnersimp(ex, deep, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
if deep:
ex = traverse(ex)
return trigsimp_groebner(ex, **opts)
trigsimpfunc = {
'matching': (lambda x, d: _trigsimp(x, d)),
'groebner': (lambda x, d: groebnersimp(x, d, **opts)),
'combined': (lambda x, d: _trigsimp(groebnersimp(x,
d, polynomial=True, hints=[2, tan]),
d))
}[method]
if recursive:
w, g = cse(expr)
g = trigsimpfunc(g[0], deep)
for sub in reversed(w):
g = g.subs(sub[0], sub[1])
g = trigsimpfunc(g, deep)
result = g
else:
result = trigsimpfunc(expr, deep)
if opts.get('compare', False):
f = futrig(old)
if f != result:
print('\tfutrig:', f)
return result
def _dotrig(a, b):
"""Helper to tell whether ``a`` and ``b`` have the same sorts
of symbols in them -- no need to test hyperbolic patterns against
expressions that have no hyperbolics in them."""
return a.func == b.func and (
a.has(TrigonometricFunction) and b.has(TrigonometricFunction) or
a.has(HyperbolicFunction) and b.has(HyperbolicFunction))
_trigpat = None
def _trigpats():
global _trigpat
a, b, c = symbols('a b c', cls=Wild)
d = Wild('d', commutative=False)
# for the simplifications like sinh/cosh -> tanh:
# DO NOT REORDER THE FIRST 14 since these are assumed to be in this
# order in _match_div_rewrite.
matchers_division = (
(a*sin(b)**c/cos(b)**c, a*tan(b)**c, sin(b), cos(b)),
(a*tan(b)**c*cos(b)**c, a*sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*sin(b)**c, a*cos(b)**c, sin(b), cos(b)),
(a*tan(b)**c/sin(b)**c, a/cos(b)**c, sin(b), cos(b)),
(a*cot(b)**c/cos(b)**c, a/sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*tan(b)**c, a, sin(b), cos(b)),
(a*(cos(b) + 1)**c*(cos(b) - 1)**c,
a*(-sin(b)**2)**c, cos(b) + 1, cos(b) - 1),
(a*(sin(b) + 1)**c*(sin(b) - 1)**c,
a*(-cos(b)**2)**c, sin(b) + 1, sin(b) - 1),
(a*sinh(b)**c/cosh(b)**c, a*tanh(b)**c, S.One, S.One),
(a*tanh(b)**c*cosh(b)**c, a*sinh(b)**c, S.One, S.One),
(a*coth(b)**c*sinh(b)**c, a*cosh(b)**c, S.One, S.One),
(a*tanh(b)**c/sinh(b)**c, a/cosh(b)**c, S.One, S.One),
(a*coth(b)**c/cosh(b)**c, a/sinh(b)**c, S.One, S.One),
(a*coth(b)**c*tanh(b)**c, a, S.One, S.One),
(c*(tanh(a) + tanh(b))/(1 + tanh(a)*tanh(b)),
tanh(a + b)*c, S.One, S.One),
)
matchers_add = (
(c*sin(a)*cos(b) + c*cos(a)*sin(b) + d, sin(a + b)*c + d),
(c*cos(a)*cos(b) - c*sin(a)*sin(b) + d, cos(a + b)*c + d),
(c*sin(a)*cos(b) - c*cos(a)*sin(b) + d, sin(a - b)*c + d),
(c*cos(a)*cos(b) + c*sin(a)*sin(b) + d, cos(a - b)*c + d),
(c*sinh(a)*cosh(b) + c*sinh(b)*cosh(a) + d, sinh(a + b)*c + d),
(c*cosh(a)*cosh(b) + c*sinh(a)*sinh(b) + d, cosh(a + b)*c + d),
)
# for cos(x)**2 + sin(x)**2 -> 1
matchers_identity = (
(a*sin(b)**2, a - a*cos(b)**2),
(a*tan(b)**2, a*(1/cos(b))**2 - a),
(a*cot(b)**2, a*(1/sin(b))**2 - a),
(a*sin(b + c), a*(sin(b)*cos(c) + sin(c)*cos(b))),
(a*cos(b + c), a*(cos(b)*cos(c) - sin(b)*sin(c))),
(a*tan(b + c), a*((tan(b) + tan(c))/(1 - tan(b)*tan(c)))),
(a*sinh(b)**2, a*cosh(b)**2 - a),
(a*tanh(b)**2, a - a*(1/cosh(b))**2),
(a*coth(b)**2, a + a*(1/sinh(b))**2),
(a*sinh(b + c), a*(sinh(b)*cosh(c) + sinh(c)*cosh(b))),
(a*cosh(b + c), a*(cosh(b)*cosh(c) + sinh(b)*sinh(c))),
(a*tanh(b + c), a*((tanh(b) + tanh(c))/(1 + tanh(b)*tanh(c)))),
)
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1-cos(x)**2 when sin(x)**2 was "simpler"
artifacts = (
(a - a*cos(b)**2 + c, a*sin(b)**2 + c, cos),
(a - a*(1/cos(b))**2 + c, -a*tan(b)**2 + c, cos),
(a - a*(1/sin(b))**2 + c, -a*cot(b)**2 + c, sin),
(a - a*cosh(b)**2 + c, -a*sinh(b)**2 + c, cosh),
(a - a*(1/cosh(b))**2 + c, a*tanh(b)**2 + c, cosh),
(a + a*(1/sinh(b))**2 + c, a*coth(b)**2 + c, sinh),
# same as above but with noncommutative prefactor
(a*d - a*d*cos(b)**2 + c, a*d*sin(b)**2 + c, cos),
(a*d - a*d*(1/cos(b))**2 + c, -a*d*tan(b)**2 + c, cos),
(a*d - a*d*(1/sin(b))**2 + c, -a*d*cot(b)**2 + c, sin),
(a*d - a*d*cosh(b)**2 + c, -a*d*sinh(b)**2 + c, cosh),
(a*d - a*d*(1/cosh(b))**2 + c, a*d*tanh(b)**2 + c, cosh),
(a*d + a*d*(1/sinh(b))**2 + c, a*d*coth(b)**2 + c, sinh),
)
_trigpat = (a, b, c, d, matchers_division, matchers_add,
matchers_identity, artifacts)
return _trigpat
def _replace_mul_fpowxgpow(expr, f, g, rexp, h, rexph):
"""Helper for _match_div_rewrite.
Replace f(b_)**c_*g(b_)**(rexp(c_)) with h(b)**rexph(c) if f(b_)
and g(b_) are both positive or if c_ is an integer.
"""
# assert expr.is_Mul and expr.is_commutative and f != g
fargs = defaultdict(int)
gargs = defaultdict(int)
args = []
for x in expr.args:
if x.is_Pow or x.func in (f, g):
b, e = x.as_base_exp()
if b.is_positive or e.is_integer:
if b.func == f:
fargs[b.args[0]] += e
continue
elif b.func == g:
gargs[b.args[0]] += e
continue
args.append(x)
common = set(fargs) & set(gargs)
hit = False
while common:
key = common.pop()
fe = fargs.pop(key)
ge = gargs.pop(key)
if fe == rexp(ge):
args.append(h(key)**rexph(fe))
hit = True
else:
fargs[key] = fe
gargs[key] = ge
if not hit:
return expr
while fargs:
key, e = fargs.popitem()
args.append(f(key)**e)
while gargs:
key, e = gargs.popitem()
args.append(g(key)**e)
return Mul(*args)
_idn = lambda x: x
_midn = lambda x: -x
_one = lambda x: S.One
def _match_div_rewrite(expr, i):
"""helper for __trigsimp"""
if i == 0:
expr = _replace_mul_fpowxgpow(expr, sin, cos,
_midn, tan, _idn)
elif i == 1:
expr = _replace_mul_fpowxgpow(expr, tan, cos,
_idn, sin, _idn)
elif i == 2:
expr = _replace_mul_fpowxgpow(expr, cot, sin,
_idn, cos, _idn)
elif i == 3:
expr = _replace_mul_fpowxgpow(expr, tan, sin,
_midn, cos, _midn)
elif i == 4:
expr = _replace_mul_fpowxgpow(expr, cot, cos,
_midn, sin, _midn)
elif i == 5:
expr = _replace_mul_fpowxgpow(expr, cot, tan,
_idn, _one, _idn)
# i in (6, 7) is skipped
elif i == 8:
expr = _replace_mul_fpowxgpow(expr, sinh, cosh,
_midn, tanh, _idn)
elif i == 9:
expr = _replace_mul_fpowxgpow(expr, tanh, cosh,
_idn, sinh, _idn)
elif i == 10:
expr = _replace_mul_fpowxgpow(expr, coth, sinh,
_idn, cosh, _idn)
elif i == 11:
expr = _replace_mul_fpowxgpow(expr, tanh, sinh,
_midn, cosh, _midn)
elif i == 12:
expr = _replace_mul_fpowxgpow(expr, coth, cosh,
_midn, sinh, _midn)
elif i == 13:
expr = _replace_mul_fpowxgpow(expr, coth, tanh,
_idn, _one, _idn)
else:
return None
return expr
def _trigsimp(expr, deep=False):
# protect the cache from non-trig patterns; we only allow
# trig patterns to enter the cache
if expr.has(*_trigs):
return __trigsimp(expr, deep)
return expr
@cacheit
def __trigsimp(expr, deep=False):
"""recursive helper for trigsimp"""
from sympy.simplify.fu import TR10i
if _trigpat is None:
_trigpats()
a, b, c, d, matchers_division, matchers_add, \
matchers_identity, artifacts = _trigpat
if expr.is_Mul:
# do some simplifications like sin/cos -> tan:
if not expr.is_commutative:
com, nc = expr.args_cnc()
expr = _trigsimp(Mul._from_args(com), deep)*Mul._from_args(nc)
else:
for i, (pattern, simp, ok1, ok2) in enumerate(matchers_division):
if not _dotrig(expr, pattern):
continue
newexpr = _match_div_rewrite(expr, i)
if newexpr is not None:
if newexpr != expr:
expr = newexpr
break
else:
continue
# use SymPy matching instead
res = expr.match(pattern)
if res and res.get(c, 0):
if not res[c].is_integer:
ok = ok1.subs(res)
if not ok.is_positive:
continue
ok = ok2.subs(res)
if not ok.is_positive:
continue
# if "a" contains any of trig or hyperbolic funcs with
# argument "b" then skip the simplification
if any(w.args[0] == res[b] for w in res[a].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
# simplify and finish:
expr = simp.subs(res)
break # process below
if expr.is_Add:
args = []
for term in expr.args:
if not term.is_commutative:
com, nc = term.args_cnc()
nc = Mul._from_args(nc)
term = Mul._from_args(com)
else:
nc = S.One
term = _trigsimp(term, deep)
for pattern, result in matchers_identity:
res = term.match(pattern)
if res is not None:
term = result.subs(res)
break
args.append(term*nc)
if args != expr.args:
expr = Add(*args)
expr = min(expr, expand(expr), key=count_ops)
if expr.is_Add:
for pattern, result in matchers_add:
if not _dotrig(expr, pattern):
continue
expr = TR10i(expr)
if expr.has(HyperbolicFunction):
res = expr.match(pattern)
# if "d" contains any trig or hyperbolic funcs with
# argument "a" or "b" then skip the simplification;
# this isn't perfect -- see tests
if res is None or not (a in res and b in res) or any(
w.args[0] in (res[a], res[b]) for w in res[d].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
expr = result.subs(res)
break
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1 - cos(x)**2 when sin(x)**2 was "simpler"
for pattern, result, ex in artifacts:
if not _dotrig(expr, pattern):
continue
# Substitute a new wild that excludes some function(s)
# to help influence a better match. This is because
# sometimes, for example, 'a' would match sec(x)**2
a_t = Wild('a', exclude=[ex])
pattern = pattern.subs(a, a_t)
result = result.subs(a, a_t)
m = expr.match(pattern)
was = None
while m and was != expr:
was = expr
if m[a_t] == 0 or \
-m[a_t] in m[c].args or m[a_t] + m[c] == 0:
break
if d in m and m[a_t]*m[d] + m[c] == 0:
break
expr = result.subs(m)
m = expr.match(pattern)
m.setdefault(c, S.Zero)
elif expr.is_Mul or expr.is_Pow or deep and expr.args:
expr = expr.func(*[_trigsimp(a, deep) for a in expr.args])
try:
if not expr.has(*_trigs):
raise TypeError
e = expr.atoms(exp)
new = expr.rewrite(exp, deep=deep)
if new == e:
raise TypeError
fnew = factor(new)
if fnew != new:
new = sorted([new, factor(new)], key=count_ops)[0]
# if all exp that were introduced disappeared then accept it
if not (new.atoms(exp) - e):
expr = new
except TypeError:
pass
return expr
#------------------- end of old trigsimp routines --------------------
def futrig(e, *, hyper=True, **kwargs):
"""Return simplified ``e`` using Fu-like transformations.
This is not the "Fu" algorithm. This is called by default
from ``trigsimp``. By default, hyperbolics subexpressions
will be simplified, but this can be disabled by setting
``hyper=False``.
Examples
========
>>> from sympy import trigsimp, tan, sinh, tanh
>>> from sympy.simplify.trigsimp import futrig
>>> from sympy.abc import x
>>> trigsimp(1/tan(x)**2)
tan(x)**(-2)
>>> futrig(sinh(x)/tanh(x))
cosh(x)
"""
from sympy.simplify.fu import hyper_as_trig
e = sympify(e)
if not isinstance(e, Basic):
return e
if not e.args:
return e
old = e
e = bottom_up(e, _futrig)
if hyper and e.has(HyperbolicFunction):
e, f = hyper_as_trig(e)
e = f(bottom_up(e, _futrig))
if e != old and e.is_Mul and e.args[0].is_Rational:
# redistribute leading coeff on 2-arg Add
e = Mul(*e.as_coeff_Mul())
return e
def _futrig(e):
"""Helper for futrig."""
from sympy.simplify.fu import (
TR1, TR2, TR3, TR2i, TR10, L, TR10i,
TR8, TR6, TR15, TR16, TR111, TR5, TRmorrie, TR11, _TR11, TR14, TR22,
TR12)
if not e.has(TrigonometricFunction):
return e
if e.is_Mul:
coeff, e = e.as_independent(TrigonometricFunction)
else:
coeff = None
Lops = lambda x: (L(x), x.count_ops(), _nodes(x), len(x.args), x.is_Add)
trigs = lambda x: x.has(TrigonometricFunction)
tree = [identity,
(
TR3, # canonical angles
TR1, # sec-csc -> cos-sin
TR12, # expand tan of sum
lambda x: _eapply(factor, x, trigs),
TR2, # tan-cot -> sin-cos
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TR2i, # sin-cos ratio -> tan
lambda x: _eapply(lambda i: factor(i.normal()), x, trigs),
TR14, # factored identities
TR5, # sin-pow -> cos_pow
TR10, # sin-cos of sums -> sin-cos prod
TR11, _TR11, TR6, # reduce double angles and rewrite cos pows
lambda x: _eapply(factor, x, trigs),
TR14, # factored powers of identities
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TR10i, # sin-cos products > sin-cos of sums
TRmorrie,
[identity, TR8], # sin-cos products -> sin-cos of sums
[identity, lambda x: TR2i(TR2(x))], # tan -> sin-cos -> tan
[
lambda x: _eapply(expand_mul, TR5(x), trigs),
lambda x: _eapply(
expand_mul, TR15(x), trigs)], # pos/neg powers of sin
[
lambda x: _eapply(expand_mul, TR6(x), trigs),
lambda x: _eapply(
expand_mul, TR16(x), trigs)], # pos/neg powers of cos
TR111, # tan, sin, cos to neg power -> cot, csc, sec
[identity, TR2i], # sin-cos ratio to tan
[identity, lambda x: _eapply(
expand_mul, TR22(x), trigs)], # tan-cot to sec-csc
TR1, TR2, TR2i,
[identity, lambda x: _eapply(
factor_terms, TR12(x), trigs)], # expand tan of sum
)]
e = greedy(tree, objective=Lops)(e)
if coeff is not None:
e = coeff * e
return e
def _is_Expr(e):
"""_eapply helper to tell whether ``e`` and all its args
are Exprs."""
if isinstance(e, Derivative):
return _is_Expr(e.expr)
if not isinstance(e, Expr):
return False
return all(_is_Expr(i) for i in e.args)
def _eapply(func, e, cond=None):
"""Apply ``func`` to ``e`` if all args are Exprs else only
apply it to those args that *are* Exprs."""
if not isinstance(e, Expr):
return e
if _is_Expr(e) or not e.args:
return func(e)
return e.func(*[
_eapply(func, ei) if (cond is None or cond(ei)) else ei
for ei in e.args])
|
94342506c88ae32652b096178179633620aaabf1839b0965fdcc7301ed690756 | """ Tools for doing common subexpression elimination.
"""
from sympy.core import Basic, Mul, Add, Pow, sympify
from sympy.core.containers import Tuple, OrderedSet
from sympy.core.exprtools import factor_terms
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import symbols, Symbol
from sympy.utilities.iterables import numbered_symbols, sift, \
topological_sort, iterable
from . import cse_opts
# (preprocessor, postprocessor) pairs which are commonly useful. They should
# each take a SymPy expression and return a possibly transformed expression.
# When used in the function ``cse()``, the target expressions will be transformed
# by each of the preprocessor functions in order. After the common
# subexpressions are eliminated, each resulting expression will have the
# postprocessor functions transform them in *reverse* order in order to undo the
# transformation if necessary. This allows the algorithm to operate on
# a representation of the expressions that allows for more optimization
# opportunities.
# ``None`` can be used to specify no transformation for either the preprocessor or
# postprocessor.
basic_optimizations = [(cse_opts.sub_pre, cse_opts.sub_post),
(factor_terms, None)]
# sometimes we want the output in a different format; non-trivial
# transformations can be put here for users
# ===============================================================
def reps_toposort(r):
"""Sort replacements ``r`` so (k1, v1) appears before (k2, v2)
if k2 is in v1's free symbols. This orders items in the
way that cse returns its results (hence, in order to use the
replacements in a substitution option it would make sense
to reverse the order).
Examples
========
>>> from sympy.simplify.cse_main import reps_toposort
>>> from sympy.abc import x, y
>>> from sympy import Eq
>>> for l, r in reps_toposort([(x, y + 1), (y, 2)]):
... print(Eq(l, r))
...
Eq(y, 2)
Eq(x, y + 1)
"""
r = sympify(r)
E = []
for c1, (k1, v1) in enumerate(r):
for c2, (k2, v2) in enumerate(r):
if k1 in v2.free_symbols:
E.append((c1, c2))
return [r[i] for i in topological_sort((range(len(r)), E))]
def cse_separate(r, e):
"""Move expressions that are in the form (symbol, expr) out of the
expressions and sort them into the replacements using the reps_toposort.
Examples
========
>>> from sympy.simplify.cse_main import cse_separate
>>> from sympy.abc import x, y, z
>>> from sympy import cos, exp, cse, Eq, symbols
>>> x0, x1 = symbols('x:2')
>>> eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
>>> cse([eq, Eq(x, z + 1), z - 2], postprocess=cse_separate) in [
... [[(x0, y + 1), (x, z + 1), (x1, x + 1)],
... [x1 + exp(x1/x0) + cos(x0), z - 2]],
... [[(x1, y + 1), (x, z + 1), (x0, x + 1)],
... [x0 + exp(x0/x1) + cos(x1), z - 2]]]
...
True
"""
d = sift(e, lambda w: w.is_Equality and w.lhs.is_Symbol)
r = r + [w.args for w in d[True]]
e = d[False]
return [reps_toposort(r), e]
def cse_release_variables(r, e):
"""
Return tuples giving ``(a, b)`` where ``a`` is a symbol and ``b`` is
either an expression or None. The value of None is used when a
symbol is no longer needed for subsequent expressions.
Use of such output can reduce the memory footprint of lambdified
expressions that contain large, repeated subexpressions.
Examples
========
>>> from sympy import cse
>>> from sympy.simplify.cse_main import cse_release_variables
>>> from sympy.abc import x, y
>>> eqs = [(x + y - 1)**2, x, x + y, (x + y)/(2*x + 1) + (x + y - 1)**2, (2*x + 1)**(x + y)]
>>> defs, rvs = cse_release_variables(*cse(eqs))
>>> for i in defs:
... print(i)
...
(x0, x + y)
(x1, (x0 - 1)**2)
(x2, 2*x + 1)
(_3, x0/x2 + x1)
(_4, x2**x0)
(x2, None)
(_0, x1)
(x1, None)
(_2, x0)
(x0, None)
(_1, x)
>>> print(rvs)
(_0, _1, _2, _3, _4)
"""
if not r:
return r, e
s, p = zip(*r)
esyms = symbols('_:%d' % len(e))
syms = list(esyms)
s = list(s)
in_use = set(s)
p = list(p)
# sort e so those with most sub-expressions appear first
e = [(e[i], syms[i]) for i in range(len(e))]
e, syms = zip(*sorted(e,
key=lambda x: -sum([p[s.index(i)].count_ops()
for i in x[0].free_symbols & in_use])))
syms = list(syms)
p += e
rv = []
i = len(p) - 1
while i >= 0:
_p = p.pop()
c = in_use & _p.free_symbols
if c: # sorting for canonical results
rv.extend([(s, None) for s in sorted(c, key=str)])
if i >= len(r):
rv.append((syms.pop(), _p))
else:
rv.append((s[i], _p))
in_use -= c
i -= 1
rv.reverse()
return rv, esyms
# ====end of cse postprocess idioms===========================
def preprocess_for_cse(expr, optimizations):
""" Preprocess an expression to optimize for common subexpression
elimination.
Parameters
==========
expr : SymPy expression
The target expression to optimize.
optimizations : list of (callable, callable) pairs
The (preprocessor, postprocessor) pairs.
Returns
=======
expr : SymPy expression
The transformed expression.
"""
for pre, post in optimizations:
if pre is not None:
expr = pre(expr)
return expr
def postprocess_for_cse(expr, optimizations):
"""Postprocess an expression after common subexpression elimination to
return the expression to canonical SymPy form.
Parameters
==========
expr : SymPy expression
The target expression to transform.
optimizations : list of (callable, callable) pairs, optional
The (preprocessor, postprocessor) pairs. The postprocessors will be
applied in reversed order to undo the effects of the preprocessors
correctly.
Returns
=======
expr : SymPy expression
The transformed expression.
"""
for pre, post in reversed(optimizations):
if post is not None:
expr = post(expr)
return expr
class FuncArgTracker:
"""
A class which manages a mapping from functions to arguments and an inverse
mapping from arguments to functions.
"""
def __init__(self, funcs):
# To minimize the number of symbolic comparisons, all function arguments
# get assigned a value number.
self.value_numbers = {}
self.value_number_to_value = []
# Both of these maps use integer indices for arguments / functions.
self.arg_to_funcset = []
self.func_to_argset = []
for func_i, func in enumerate(funcs):
func_argset = OrderedSet()
for func_arg in func.args:
arg_number = self.get_or_add_value_number(func_arg)
func_argset.add(arg_number)
self.arg_to_funcset[arg_number].add(func_i)
self.func_to_argset.append(func_argset)
def get_args_in_value_order(self, argset):
"""
Return the list of arguments in sorted order according to their value
numbers.
"""
return [self.value_number_to_value[argn] for argn in sorted(argset)]
def get_or_add_value_number(self, value):
"""
Return the value number for the given argument.
"""
nvalues = len(self.value_numbers)
value_number = self.value_numbers.setdefault(value, nvalues)
if value_number == nvalues:
self.value_number_to_value.append(value)
self.arg_to_funcset.append(OrderedSet())
return value_number
def stop_arg_tracking(self, func_i):
"""
Remove the function func_i from the argument to function mapping.
"""
for arg in self.func_to_argset[func_i]:
self.arg_to_funcset[arg].remove(func_i)
def get_common_arg_candidates(self, argset, min_func_i=0):
"""Return a dict whose keys are function numbers. The entries of the dict are
the number of arguments said function has in common with
``argset``. Entries have at least 2 items in common. All keys have
value at least ``min_func_i``.
"""
from collections import defaultdict
count_map = defaultdict(lambda: 0)
if not argset:
return count_map
funcsets = [self.arg_to_funcset[arg] for arg in argset]
# As an optimization below, we handle the largest funcset separately from
# the others.
largest_funcset = max(funcsets, key=len)
for funcset in funcsets:
if largest_funcset is funcset:
continue
for func_i in funcset:
if func_i >= min_func_i:
count_map[func_i] += 1
# We pick the smaller of the two containers (count_map, largest_funcset)
# to iterate over to reduce the number of iterations needed.
(smaller_funcs_container,
larger_funcs_container) = sorted(
[largest_funcset, count_map],
key=len)
for func_i in smaller_funcs_container:
# Not already in count_map? It can't possibly be in the output, so
# skip it.
if count_map[func_i] < 1:
continue
if func_i in larger_funcs_container:
count_map[func_i] += 1
return {k: v for k, v in count_map.items() if v >= 2}
def get_subset_candidates(self, argset, restrict_to_funcset=None):
"""
Return a set of functions each of which whose argument list contains
``argset``, optionally filtered only to contain functions in
``restrict_to_funcset``.
"""
iarg = iter(argset)
indices = OrderedSet(
fi for fi in self.arg_to_funcset[next(iarg)])
if restrict_to_funcset is not None:
indices &= restrict_to_funcset
for arg in iarg:
indices &= self.arg_to_funcset[arg]
return indices
def update_func_argset(self, func_i, new_argset):
"""
Update a function with a new set of arguments.
"""
new_args = OrderedSet(new_argset)
old_args = self.func_to_argset[func_i]
for deleted_arg in old_args - new_args:
self.arg_to_funcset[deleted_arg].remove(func_i)
for added_arg in new_args - old_args:
self.arg_to_funcset[added_arg].add(func_i)
self.func_to_argset[func_i].clear()
self.func_to_argset[func_i].update(new_args)
class Unevaluated:
def __init__(self, func, args):
self.func = func
self.args = args
def __str__(self):
return "Uneval<{}>({})".format(
self.func, ", ".join(str(a) for a in self.args))
def as_unevaluated_basic(self):
return self.func(*self.args, evaluate=False)
@property
def free_symbols(self):
return set().union(*[a.free_symbols for a in self.args])
__repr__ = __str__
def match_common_args(func_class, funcs, opt_subs):
"""
Recognize and extract common subexpressions of function arguments within a
set of function calls. For instance, for the following function calls::
x + z + y
sin(x + y)
this will extract a common subexpression of `x + y`::
w = x + y
w + z
sin(w)
The function we work with is assumed to be associative and commutative.
Parameters
==========
func_class: class
The function class (e.g. Add, Mul)
funcs: list of functions
A list of function calls.
opt_subs: dict
A dictionary of substitutions which this function may update.
"""
# Sort to ensure that whole-function subexpressions come before the items
# that use them.
funcs = sorted(funcs, key=lambda f: len(f.args))
arg_tracker = FuncArgTracker(funcs)
changed = OrderedSet()
for i in range(len(funcs)):
common_arg_candidates_counts = arg_tracker.get_common_arg_candidates(
arg_tracker.func_to_argset[i], min_func_i=i + 1)
# Sort the candidates in order of match size.
# This makes us try combining smaller matches first.
common_arg_candidates = OrderedSet(sorted(
common_arg_candidates_counts.keys(),
key=lambda k: (common_arg_candidates_counts[k], k)))
while common_arg_candidates:
j = common_arg_candidates.pop(last=False)
com_args = arg_tracker.func_to_argset[i].intersection(
arg_tracker.func_to_argset[j])
if len(com_args) <= 1:
# This may happen if a set of common arguments was already
# combined in a previous iteration.
continue
# For all sets, replace the common symbols by the function
# over them, to allow recursive matches.
diff_i = arg_tracker.func_to_argset[i].difference(com_args)
if diff_i:
# com_func needs to be unevaluated to allow for recursive matches.
com_func = Unevaluated(
func_class, arg_tracker.get_args_in_value_order(com_args))
com_func_number = arg_tracker.get_or_add_value_number(com_func)
arg_tracker.update_func_argset(i, diff_i | OrderedSet([com_func_number]))
changed.add(i)
else:
# Treat the whole expression as a CSE.
#
# The reason this needs to be done is somewhat subtle. Within
# tree_cse(), to_eliminate only contains expressions that are
# seen more than once. The problem is unevaluated expressions
# do not compare equal to the evaluated equivalent. So
# tree_cse() won't mark funcs[i] as a CSE if we use an
# unevaluated version.
com_func_number = arg_tracker.get_or_add_value_number(funcs[i])
diff_j = arg_tracker.func_to_argset[j].difference(com_args)
arg_tracker.update_func_argset(j, diff_j | OrderedSet([com_func_number]))
changed.add(j)
for k in arg_tracker.get_subset_candidates(
com_args, common_arg_candidates):
diff_k = arg_tracker.func_to_argset[k].difference(com_args)
arg_tracker.update_func_argset(k, diff_k | OrderedSet([com_func_number]))
changed.add(k)
if i in changed:
opt_subs[funcs[i]] = Unevaluated(func_class,
arg_tracker.get_args_in_value_order(arg_tracker.func_to_argset[i]))
arg_tracker.stop_arg_tracking(i)
def opt_cse(exprs, order='canonical'):
"""Find optimization opportunities in Adds, Muls, Pows and negative
coefficient Muls.
Parameters
==========
exprs : list of SymPy expressions
The expressions to optimize.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
Returns
=======
opt_subs : dictionary of expression substitutions
The expression substitutions which can be useful to optimize CSE.
Examples
========
>>> from sympy.simplify.cse_main import opt_cse
>>> from sympy.abc import x
>>> opt_subs = opt_cse([x**-2])
>>> k, v = list(opt_subs.keys())[0], list(opt_subs.values())[0]
>>> print((k, v.as_unevaluated_basic()))
(x**(-2), 1/(x**2))
"""
from sympy.matrices.expressions import MatAdd, MatMul, MatPow
opt_subs = dict()
adds = OrderedSet()
muls = OrderedSet()
seen_subexp = set()
def _find_opts(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return
if expr.is_Atom or expr.is_Order:
return
if iterable(expr):
list(map(_find_opts, expr))
return
if expr in seen_subexp:
return expr
seen_subexp.add(expr)
list(map(_find_opts, expr.args))
if expr.could_extract_minus_sign():
neg_expr = -expr
if not neg_expr.is_Atom:
opt_subs[expr] = Unevaluated(Mul, (S.NegativeOne, neg_expr))
seen_subexp.add(neg_expr)
expr = neg_expr
if isinstance(expr, (Mul, MatMul)):
muls.add(expr)
elif isinstance(expr, (Add, MatAdd)):
adds.add(expr)
elif isinstance(expr, (Pow, MatPow)):
base, exp = expr.base, expr.exp
if exp.could_extract_minus_sign():
opt_subs[expr] = Unevaluated(Pow, (Pow(base, -exp), -1))
for e in exprs:
if isinstance(e, (Basic, Unevaluated)):
_find_opts(e)
# split muls into commutative
commutative_muls = OrderedSet()
for m in muls:
c, nc = m.args_cnc(cset=False)
if c:
c_mul = m.func(*c)
if nc:
if c_mul == 1:
new_obj = m.func(*nc)
else:
new_obj = m.func(c_mul, m.func(*nc), evaluate=False)
opt_subs[m] = new_obj
if len(c) > 1:
commutative_muls.add(c_mul)
match_common_args(Add, adds, opt_subs)
match_common_args(Mul, commutative_muls, opt_subs)
return opt_subs
def tree_cse(exprs, symbols, opt_subs=None, order='canonical', ignore=()):
"""Perform raw CSE on expression tree, taking opt_subs into account.
Parameters
==========
exprs : list of SymPy expressions
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out.
opt_subs : dictionary of expression substitutions
The expressions to be substituted before any CSE action is performed.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
ignore : iterable of Symbols
Substitutions containing any Symbol from ``ignore`` will be ignored.
"""
from sympy.matrices.expressions import MatrixExpr, MatrixSymbol, MatMul, MatAdd
from sympy.polys.rootoftools import RootOf
if opt_subs is None:
opt_subs = dict()
## Find repeated sub-expressions
to_eliminate = set()
seen_subexp = set()
excluded_symbols = set()
def _find_repeated(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return
if isinstance(expr, RootOf):
return
if isinstance(expr, Basic) and (expr.is_Atom or expr.is_Order):
if expr.is_Symbol:
excluded_symbols.add(expr)
return
if iterable(expr):
args = expr
else:
if expr in seen_subexp:
for ign in ignore:
if ign in expr.free_symbols:
break
else:
to_eliminate.add(expr)
return
seen_subexp.add(expr)
if expr in opt_subs:
expr = opt_subs[expr]
args = expr.args
list(map(_find_repeated, args))
for e in exprs:
if isinstance(e, Basic):
_find_repeated(e)
## Rebuild tree
# Remove symbols from the generator that conflict with names in the expressions.
symbols = (symbol for symbol in symbols if symbol not in excluded_symbols)
replacements = []
subs = dict()
def _rebuild(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return expr
if not expr.args:
return expr
if iterable(expr):
new_args = [_rebuild(arg) for arg in expr]
return expr.func(*new_args)
if expr in subs:
return subs[expr]
orig_expr = expr
if expr in opt_subs:
expr = opt_subs[expr]
# If enabled, parse Muls and Adds arguments by order to ensure
# replacement order independent from hashes
if order != 'none':
if isinstance(expr, (Mul, MatMul)):
c, nc = expr.args_cnc()
if c == [1]:
args = nc
else:
args = list(ordered(c)) + nc
elif isinstance(expr, (Add, MatAdd)):
args = list(ordered(expr.args))
else:
args = expr.args
else:
args = expr.args
new_args = list(map(_rebuild, args))
if isinstance(expr, Unevaluated) or new_args != args:
new_expr = expr.func(*new_args)
else:
new_expr = expr
if orig_expr in to_eliminate:
try:
sym = next(symbols)
except StopIteration:
raise ValueError("Symbols iterator ran out of symbols.")
if isinstance(orig_expr, MatrixExpr):
sym = MatrixSymbol(sym.name, orig_expr.rows,
orig_expr.cols)
subs[orig_expr] = sym
replacements.append((sym, new_expr))
return sym
else:
return new_expr
reduced_exprs = []
for e in exprs:
if isinstance(e, Basic):
reduced_e = _rebuild(e)
else:
reduced_e = e
reduced_exprs.append(reduced_e)
return replacements, reduced_exprs
def cse(exprs, symbols=None, optimizations=None, postprocess=None,
order='canonical', ignore=(), list=True):
""" Perform common subexpression elimination on an expression.
Parameters
==========
exprs : list of SymPy expressions, or a single SymPy expression
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out. The ``numbered_symbols`` generator is useful. The default is a
stream of symbols of the form "x0", "x1", etc. This must be an
infinite iterator.
optimizations : list of (callable, callable) pairs
The (preprocessor, postprocessor) pairs of external optimization
functions. Optionally 'basic' can be passed for a set of predefined
basic optimizations. Such 'basic' optimizations were used by default
in old implementation, however they can be really slow on larger
expressions. Now, no pre or post optimizations are made by default.
postprocess : a function which accepts the two return values of cse and
returns the desired form of output from cse, e.g. if you want the
replacements reversed the function might be the following lambda:
lambda r, e: return reversed(r), e
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. If set to
'canonical', arguments will be canonically ordered. If set to 'none',
ordering will be faster but dependent on expressions hashes, thus
machine dependent and variable. For large expressions where speed is a
concern, use the setting order='none'.
ignore : iterable of Symbols
Substitutions containing any Symbol from ``ignore`` will be ignored.
list : bool, (default True)
Returns expression in list or else with same type as input (when False).
Returns
=======
replacements : list of (Symbol, expression) pairs
All of the common subexpressions that were replaced. Subexpressions
earlier in this list might show up in subexpressions later in this
list.
reduced_exprs : list of SymPy expressions
The reduced expressions with all of the replacements above.
Examples
========
>>> from sympy import cse, SparseMatrix
>>> from sympy.abc import x, y, z, w
>>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3)
([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3])
List of expressions with recursive substitutions:
>>> m = SparseMatrix([x + y, x + y + z])
>>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m])
([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([
[x0],
[x1]])])
Note: the type and mutability of input matrices is retained.
>>> isinstance(_[1][-1], SparseMatrix)
True
The user may disallow substitutions containing certain symbols:
>>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,))
([(x0, x + 1)], [x0*y**2, 3*x0*y**2])
The default return value for the reduced expression(s) is a list, even if there is only
one expression. The `list` flag preserves the type of the input in the output:
>>> cse(x)
([], [x])
>>> cse(x, list=False)
([], x)
"""
from sympy.matrices import (MatrixBase, Matrix, ImmutableMatrix,
SparseMatrix, ImmutableSparseMatrix)
if not list:
return _cse_homogeneous(exprs,
symbols=symbols, optimizations=optimizations,
postprocess=postprocess, order=order, ignore=ignore)
if isinstance(exprs, (int, float)):
exprs = sympify(exprs)
# Handle the case if just one expression was passed.
if isinstance(exprs, (Basic, MatrixBase)):
exprs = [exprs]
copy = exprs
temp = []
for e in exprs:
if isinstance(e, (Matrix, ImmutableMatrix)):
temp.append(Tuple(*e.flat()))
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
temp.append(Tuple(*e.todok().items()))
else:
temp.append(e)
exprs = temp
del temp
if optimizations is None:
optimizations = []
elif optimizations == 'basic':
optimizations = basic_optimizations
# Preprocess the expressions to give us better optimization opportunities.
reduced_exprs = [preprocess_for_cse(e, optimizations) for e in exprs]
if symbols is None:
symbols = numbered_symbols(cls=Symbol)
else:
# In case we get passed an iterable with an __iter__ method instead of
# an actual iterator.
symbols = iter(symbols)
# Find other optimization opportunities.
opt_subs = opt_cse(reduced_exprs, order)
# Main CSE algorithm.
replacements, reduced_exprs = tree_cse(reduced_exprs, symbols, opt_subs,
order, ignore)
# Postprocess the expressions to return the expressions to canonical form.
exprs = copy
for i, (sym, subtree) in enumerate(replacements):
subtree = postprocess_for_cse(subtree, optimizations)
replacements[i] = (sym, subtree)
reduced_exprs = [postprocess_for_cse(e, optimizations)
for e in reduced_exprs]
# Get the matrices back
for i, e in enumerate(exprs):
if isinstance(e, (Matrix, ImmutableMatrix)):
reduced_exprs[i] = Matrix(e.rows, e.cols, reduced_exprs[i])
if isinstance(e, ImmutableMatrix):
reduced_exprs[i] = reduced_exprs[i].as_immutable()
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
m = SparseMatrix(e.rows, e.cols, {})
for k, v in reduced_exprs[i]:
m[k] = v
if isinstance(e, ImmutableSparseMatrix):
m = m.as_immutable()
reduced_exprs[i] = m
if postprocess is None:
return replacements, reduced_exprs
return postprocess(replacements, reduced_exprs)
def _cse_homogeneous(exprs, **kwargs):
"""
Same as ``cse`` but the ``reduced_exprs`` are returned
with the same type as ``exprs`` or a sympified version of the same.
Parameters
==========
exprs : an Expr, iterable of Expr or dictionary with Expr values
the expressions in which repeated subexpressions will be identified
kwargs : additional arguments for the ``cse`` function
Returns
=======
replacements : list of (Symbol, expression) pairs
All of the common subexpressions that were replaced. Subexpressions
earlier in this list might show up in subexpressions later in this
list.
reduced_exprs : list of SymPy expressions
The reduced expressions with all of the replacements above.
Examples
========
>>> from sympy.simplify.cse_main import cse
>>> from sympy import cos, Tuple, Matrix
>>> from sympy.abc import x
>>> output = lambda x: type(cse(x, list=False)[1])
>>> output(1)
<class 'sympy.core.numbers.One'>
>>> output('cos(x)')
<class 'str'>
>>> output(cos(x))
cos
>>> output(Tuple(1, x))
<class 'sympy.core.containers.Tuple'>
>>> output(Matrix([[1,0], [0,1]]))
<class 'sympy.matrices.dense.MutableDenseMatrix'>
>>> output([1, x])
<class 'list'>
>>> output((1, x))
<class 'tuple'>
>>> output({1, x})
<class 'set'>
"""
if isinstance(exprs, str):
replacements, reduced_exprs = _cse_homogeneous(
sympify(exprs), **kwargs)
return replacements, repr(reduced_exprs)
if isinstance(exprs, (list, tuple, set)):
replacements, reduced_exprs = cse(exprs, **kwargs)
return replacements, type(exprs)(reduced_exprs)
if isinstance(exprs, dict):
keys = list(exprs.keys()) # In order to guarantee the order of the elements.
replacements, values = cse([exprs[k] for k in keys], **kwargs)
reduced_exprs = dict(zip(keys, values))
return replacements, reduced_exprs
try:
replacements, (reduced_exprs,) = cse(exprs, **kwargs)
except TypeError: # For example 'mpf' objects
return [], exprs
else:
return replacements, reduced_exprs
|
55fb7d2a4f4b56de8b5eb8eeff13e6da86e1ad73b2bfa2bdac8a534f3db0916d | from collections import defaultdict
from sympy.core.add import Add
from sympy.core.expr import Expr
from sympy.core.exprtools import Factors, gcd_terms, factor_terms
from sympy.core.function import expand_mul
from sympy.core.mul import Mul
from sympy.core.numbers import pi, I
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.core.traversal import bottom_up
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions.elementary.hyperbolic import (
cosh, sinh, tanh, coth, sech, csch, HyperbolicFunction)
from sympy.functions.elementary.trigonometric import (
cos, sin, tan, cot, sec, csc, sqrt, TrigonometricFunction)
from sympy.ntheory.factor_ import perfect_power
from sympy.polys.polytools import factor
from sympy.strategies.tree import greedy
from sympy.strategies.core import identity, debug
from sympy import SYMPY_DEBUG
# ================== Fu-like tools ===========================
def TR0(rv):
"""Simplification of rational polynomials, trying to simplify
the expression, e.g. combine things like 3*x + 2*x, etc....
"""
# although it would be nice to use cancel, it doesn't work
# with noncommutatives
return rv.normal().factor().expand()
def TR1(rv):
"""Replace sec, csc with 1/cos, 1/sin
Examples
========
>>> from sympy.simplify.fu import TR1, sec, csc
>>> from sympy.abc import x
>>> TR1(2*csc(x) + sec(x))
1/cos(x) + 2/sin(x)
"""
def f(rv):
if isinstance(rv, sec):
a = rv.args[0]
return S.One/cos(a)
elif isinstance(rv, csc):
a = rv.args[0]
return S.One/sin(a)
return rv
return bottom_up(rv, f)
def TR2(rv):
"""Replace tan and cot with sin/cos and cos/sin
Examples
========
>>> from sympy.simplify.fu import TR2
>>> from sympy.abc import x
>>> from sympy import tan, cot, sin, cos
>>> TR2(tan(x))
sin(x)/cos(x)
>>> TR2(cot(x))
cos(x)/sin(x)
>>> TR2(tan(tan(x) - sin(x)/cos(x)))
0
"""
def f(rv):
if isinstance(rv, tan):
a = rv.args[0]
return sin(a)/cos(a)
elif isinstance(rv, cot):
a = rv.args[0]
return cos(a)/sin(a)
return rv
return bottom_up(rv, f)
def TR2i(rv, half=False):
"""Converts ratios involving sin and cos as follows::
sin(x)/cos(x) -> tan(x)
sin(x)/(cos(x) + 1) -> tan(x/2) if half=True
Examples
========
>>> from sympy.simplify.fu import TR2i
>>> from sympy.abc import x, a
>>> from sympy import sin, cos
>>> TR2i(sin(x)/cos(x))
tan(x)
Powers of the numerator and denominator are also recognized
>>> TR2i(sin(x)**2/(cos(x) + 1)**2, half=True)
tan(x/2)**2
The transformation does not take place unless assumptions allow
(i.e. the base must be positive or the exponent must be an integer
for both numerator and denominator)
>>> TR2i(sin(x)**a/(cos(x) + 1)**a)
sin(x)**a/(cos(x) + 1)**a
"""
def f(rv):
if not rv.is_Mul:
return rv
n, d = rv.as_numer_denom()
if n.is_Atom or d.is_Atom:
return rv
def ok(k, e):
# initial filtering of factors
return (
(e.is_integer or k.is_positive) and (
k.func in (sin, cos) or (half and
k.is_Add and
len(k.args) >= 2 and
any(any(isinstance(ai, cos) or ai.is_Pow and ai.base is cos
for ai in Mul.make_args(a)) for a in k.args))))
n = n.as_powers_dict()
ndone = [(k, n.pop(k)) for k in list(n.keys()) if not ok(k, n[k])]
if not n:
return rv
d = d.as_powers_dict()
ddone = [(k, d.pop(k)) for k in list(d.keys()) if not ok(k, d[k])]
if not d:
return rv
# factoring if necessary
def factorize(d, ddone):
newk = []
for k in d:
if k.is_Add and len(k.args) > 1:
knew = factor(k) if half else factor_terms(k)
if knew != k:
newk.append((k, knew))
if newk:
for i, (k, knew) in enumerate(newk):
del d[k]
newk[i] = knew
newk = Mul(*newk).as_powers_dict()
for k in newk:
v = d[k] + newk[k]
if ok(k, v):
d[k] = v
else:
ddone.append((k, v))
del newk
factorize(n, ndone)
factorize(d, ddone)
# joining
t = []
for k in n:
if isinstance(k, sin):
a = cos(k.args[0], evaluate=False)
if a in d and d[a] == n[k]:
t.append(tan(k.args[0])**n[k])
n[k] = d[a] = None
elif half:
a1 = 1 + a
if a1 in d and d[a1] == n[k]:
t.append((tan(k.args[0]/2))**n[k])
n[k] = d[a1] = None
elif isinstance(k, cos):
a = sin(k.args[0], evaluate=False)
if a in d and d[a] == n[k]:
t.append(tan(k.args[0])**-n[k])
n[k] = d[a] = None
elif half and k.is_Add and k.args[0] is S.One and \
isinstance(k.args[1], cos):
a = sin(k.args[1].args[0], evaluate=False)
if a in d and d[a] == n[k] and (d[a].is_integer or \
a.is_positive):
t.append(tan(a.args[0]/2)**-n[k])
n[k] = d[a] = None
if t:
rv = Mul(*(t + [b**e for b, e in n.items() if e]))/\
Mul(*[b**e for b, e in d.items() if e])
rv *= Mul(*[b**e for b, e in ndone])/Mul(*[b**e for b, e in ddone])
return rv
return bottom_up(rv, f)
def TR3(rv):
"""Induced formula: example sin(-a) = -sin(a)
Examples
========
>>> from sympy.simplify.fu import TR3
>>> from sympy.abc import x, y
>>> from sympy import pi
>>> from sympy import cos
>>> TR3(cos(y - x*(y - x)))
cos(x*(x - y) + y)
>>> cos(pi/2 + x)
-sin(x)
>>> cos(30*pi/2 + x)
-cos(x)
"""
from sympy.simplify.simplify import signsimp
# Negative argument (already automatic for funcs like sin(-x) -> -sin(x)
# but more complicated expressions can use it, too). Also, trig angles
# between pi/4 and pi/2 are not reduced to an angle between 0 and pi/4.
# The following are automatically handled:
# Argument of type: pi/2 +/- angle
# Argument of type: pi +/- angle
# Argument of type : 2k*pi +/- angle
def f(rv):
if not isinstance(rv, TrigonometricFunction):
return rv
rv = rv.func(signsimp(rv.args[0]))
if not isinstance(rv, TrigonometricFunction):
return rv
if (rv.args[0] - S.Pi/4).is_positive is (S.Pi/2 - rv.args[0]).is_positive is True:
fmap = {cos: sin, sin: cos, tan: cot, cot: tan, sec: csc, csc: sec}
rv = fmap[rv.func](S.Pi/2 - rv.args[0])
return rv
return bottom_up(rv, f)
def TR4(rv):
"""Identify values of special angles.
a= 0 pi/6 pi/4 pi/3 pi/2
----------------------------------------------------
sin(a) 0 1/2 sqrt(2)/2 sqrt(3)/2 1
cos(a) 1 sqrt(3)/2 sqrt(2)/2 1/2 0
tan(a) 0 sqt(3)/3 1 sqrt(3) --
Examples
========
>>> from sympy import pi
>>> from sympy import cos, sin, tan, cot
>>> for s in (0, pi/6, pi/4, pi/3, pi/2):
... print('%s %s %s %s' % (cos(s), sin(s), tan(s), cot(s)))
...
1 0 0 zoo
sqrt(3)/2 1/2 sqrt(3)/3 sqrt(3)
sqrt(2)/2 sqrt(2)/2 1 1
1/2 sqrt(3)/2 sqrt(3) sqrt(3)/3
0 1 zoo 0
"""
# special values at 0, pi/6, pi/4, pi/3, pi/2 already handled
return rv
def _TR56(rv, f, g, h, max, pow):
"""Helper for TR5 and TR6 to replace f**2 with h(g**2)
Options
=======
max : controls size of exponent that can appear on f
e.g. if max=4 then f**4 will be changed to h(g**2)**2.
pow : controls whether the exponent must be a perfect power of 2
e.g. if pow=True (and max >= 6) then f**6 will not be changed
but f**8 will be changed to h(g**2)**4
>>> from sympy.simplify.fu import _TR56 as T
>>> from sympy.abc import x
>>> from sympy import sin, cos
>>> h = lambda x: 1 - x
>>> T(sin(x)**3, sin, cos, h, 4, False)
(1 - cos(x)**2)*sin(x)
>>> T(sin(x)**6, sin, cos, h, 6, False)
(1 - cos(x)**2)**3
>>> T(sin(x)**6, sin, cos, h, 6, True)
sin(x)**6
>>> T(sin(x)**8, sin, cos, h, 10, True)
(1 - cos(x)**2)**4
"""
def _f(rv):
# I'm not sure if this transformation should target all even powers
# or only those expressible as powers of 2. Also, should it only
# make the changes in powers that appear in sums -- making an isolated
# change is not going to allow a simplification as far as I can tell.
if not (rv.is_Pow and rv.base.func == f):
return rv
if not rv.exp.is_real:
return rv
if (rv.exp < 0) == True:
return rv
if (rv.exp > max) == True:
return rv
if rv.exp == 1:
return rv
if rv.exp == 2:
return h(g(rv.base.args[0])**2)
else:
if rv.exp % 2 == 1:
e = rv.exp//2
return f(rv.base.args[0])*h(g(rv.base.args[0])**2)**e
elif rv.exp == 4:
e = 2
elif not pow:
if rv.exp % 2:
return rv
e = rv.exp//2
else:
p = perfect_power(rv.exp)
if not p:
return rv
e = rv.exp//2
return h(g(rv.base.args[0])**2)**e
return bottom_up(rv, _f)
def TR5(rv, max=4, pow=False):
"""Replacement of sin**2 with 1 - cos(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR5
>>> from sympy.abc import x
>>> from sympy import sin
>>> TR5(sin(x)**2)
1 - cos(x)**2
>>> TR5(sin(x)**-2) # unchanged
sin(x)**(-2)
>>> TR5(sin(x)**4)
(1 - cos(x)**2)**2
"""
return _TR56(rv, sin, cos, lambda x: 1 - x, max=max, pow=pow)
def TR6(rv, max=4, pow=False):
"""Replacement of cos**2 with 1 - sin(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR6
>>> from sympy.abc import x
>>> from sympy import cos
>>> TR6(cos(x)**2)
1 - sin(x)**2
>>> TR6(cos(x)**-2) #unchanged
cos(x)**(-2)
>>> TR6(cos(x)**4)
(1 - sin(x)**2)**2
"""
return _TR56(rv, cos, sin, lambda x: 1 - x, max=max, pow=pow)
def TR7(rv):
"""Lowering the degree of cos(x)**2.
Examples
========
>>> from sympy.simplify.fu import TR7
>>> from sympy.abc import x
>>> from sympy import cos
>>> TR7(cos(x)**2)
cos(2*x)/2 + 1/2
>>> TR7(cos(x)**2 + 1)
cos(2*x)/2 + 3/2
"""
def f(rv):
if not (rv.is_Pow and rv.base.func == cos and rv.exp == 2):
return rv
return (1 + cos(2*rv.base.args[0]))/2
return bottom_up(rv, f)
def TR8(rv, first=True):
"""Converting products of ``cos`` and/or ``sin`` to a sum or
difference of ``cos`` and or ``sin`` terms.
Examples
========
>>> from sympy.simplify.fu import TR8
>>> from sympy import cos, sin
>>> TR8(cos(2)*cos(3))
cos(5)/2 + cos(1)/2
>>> TR8(cos(2)*sin(3))
sin(5)/2 + sin(1)/2
>>> TR8(sin(2)*sin(3))
-cos(5)/2 + cos(1)/2
"""
def f(rv):
if not (
rv.is_Mul or
rv.is_Pow and
rv.base.func in (cos, sin) and
(rv.exp.is_integer or rv.base.is_positive)):
return rv
if first:
n, d = [expand_mul(i) for i in rv.as_numer_denom()]
newn = TR8(n, first=False)
newd = TR8(d, first=False)
if newn != n or newd != d:
rv = gcd_terms(newn/newd)
if rv.is_Mul and rv.args[0].is_Rational and \
len(rv.args) == 2 and rv.args[1].is_Add:
rv = Mul(*rv.as_coeff_Mul())
return rv
args = {cos: [], sin: [], None: []}
for a in ordered(Mul.make_args(rv)):
if a.func in (cos, sin):
args[a.func].append(a.args[0])
elif (a.is_Pow and a.exp.is_Integer and a.exp > 0 and \
a.base.func in (cos, sin)):
# XXX this is ok but pathological expression could be handled
# more efficiently as in TRmorrie
args[a.base.func].extend([a.base.args[0]]*a.exp)
else:
args[None].append(a)
c = args[cos]
s = args[sin]
if not (c and s or len(c) > 1 or len(s) > 1):
return rv
args = args[None]
n = min(len(c), len(s))
for i in range(n):
a1 = s.pop()
a2 = c.pop()
args.append((sin(a1 + a2) + sin(a1 - a2))/2)
while len(c) > 1:
a1 = c.pop()
a2 = c.pop()
args.append((cos(a1 + a2) + cos(a1 - a2))/2)
if c:
args.append(cos(c.pop()))
while len(s) > 1:
a1 = s.pop()
a2 = s.pop()
args.append((-cos(a1 + a2) + cos(a1 - a2))/2)
if s:
args.append(sin(s.pop()))
return TR8(expand_mul(Mul(*args)))
return bottom_up(rv, f)
def TR9(rv):
"""Sum of ``cos`` or ``sin`` terms as a product of ``cos`` or ``sin``.
Examples
========
>>> from sympy.simplify.fu import TR9
>>> from sympy import cos, sin
>>> TR9(cos(1) + cos(2))
2*cos(1/2)*cos(3/2)
>>> TR9(cos(1) + 2*sin(1) + 2*sin(2))
cos(1) + 4*sin(3/2)*cos(1/2)
If no change is made by TR9, no re-arrangement of the
expression will be made. For example, though factoring
of common term is attempted, if the factored expression
wasn't changed, the original expression will be returned:
>>> TR9(cos(3) + cos(3)*cos(2))
cos(3) + cos(2)*cos(3)
"""
def f(rv):
if not rv.is_Add:
return rv
def do(rv, first=True):
# cos(a)+/-cos(b) can be combined into a product of cosines and
# sin(a)+/-sin(b) can be combined into a product of cosine and
# sine.
#
# If there are more than two args, the pairs which "work" will
# have a gcd extractable and the remaining two terms will have
# the above structure -- all pairs must be checked to find the
# ones that work. args that don't have a common set of symbols
# are skipped since this doesn't lead to a simpler formula and
# also has the arbitrariness of combining, for example, the x
# and y term instead of the y and z term in something like
# cos(x) + cos(y) + cos(z).
if not rv.is_Add:
return rv
args = list(ordered(rv.args))
if len(args) != 2:
hit = False
for i in range(len(args)):
ai = args[i]
if ai is None:
continue
for j in range(i + 1, len(args)):
aj = args[j]
if aj is None:
continue
was = ai + aj
new = do(was)
if new != was:
args[i] = new # update in place
args[j] = None
hit = True
break # go to next i
if hit:
rv = Add(*[_f for _f in args if _f])
if rv.is_Add:
rv = do(rv)
return rv
# two-arg Add
split = trig_split(*args)
if not split:
return rv
gcd, n1, n2, a, b, iscos = split
# application of rule if possible
if iscos:
if n1 == n2:
return gcd*n1*2*cos((a + b)/2)*cos((a - b)/2)
if n1 < 0:
a, b = b, a
return -2*gcd*sin((a + b)/2)*sin((a - b)/2)
else:
if n1 == n2:
return gcd*n1*2*sin((a + b)/2)*cos((a - b)/2)
if n1 < 0:
a, b = b, a
return 2*gcd*cos((a + b)/2)*sin((a - b)/2)
return process_common_addends(rv, do) # DON'T sift by free symbols
return bottom_up(rv, f)
def TR10(rv, first=True):
"""Separate sums in ``cos`` and ``sin``.
Examples
========
>>> from sympy.simplify.fu import TR10
>>> from sympy.abc import a, b, c
>>> from sympy import cos, sin
>>> TR10(cos(a + b))
-sin(a)*sin(b) + cos(a)*cos(b)
>>> TR10(sin(a + b))
sin(a)*cos(b) + sin(b)*cos(a)
>>> TR10(sin(a + b + c))
(-sin(a)*sin(b) + cos(a)*cos(b))*sin(c) + \
(sin(a)*cos(b) + sin(b)*cos(a))*cos(c)
"""
def f(rv):
if not rv.func in (cos, sin):
return rv
f = rv.func
arg = rv.args[0]
if arg.is_Add:
if first:
args = list(ordered(arg.args))
else:
args = list(arg.args)
a = args.pop()
b = Add._from_args(args)
if b.is_Add:
if f == sin:
return sin(a)*TR10(cos(b), first=False) + \
cos(a)*TR10(sin(b), first=False)
else:
return cos(a)*TR10(cos(b), first=False) - \
sin(a)*TR10(sin(b), first=False)
else:
if f == sin:
return sin(a)*cos(b) + cos(a)*sin(b)
else:
return cos(a)*cos(b) - sin(a)*sin(b)
return rv
return bottom_up(rv, f)
def TR10i(rv):
"""Sum of products to function of sum.
Examples
========
>>> from sympy.simplify.fu import TR10i
>>> from sympy import cos, sin, sqrt
>>> from sympy.abc import x
>>> TR10i(cos(1)*cos(3) + sin(1)*sin(3))
cos(2)
>>> TR10i(cos(1)*sin(3) + sin(1)*cos(3) + cos(3))
cos(3) + sin(4)
>>> TR10i(sqrt(2)*cos(x)*x + sqrt(6)*sin(x)*x)
2*sqrt(2)*x*sin(x + pi/6)
"""
global _ROOT2, _ROOT3, _invROOT3
if _ROOT2 is None:
_roots()
def f(rv):
if not rv.is_Add:
return rv
def do(rv, first=True):
# args which can be expressed as A*(cos(a)*cos(b)+/-sin(a)*sin(b))
# or B*(cos(a)*sin(b)+/-cos(b)*sin(a)) can be combined into
# A*f(a+/-b) where f is either sin or cos.
#
# If there are more than two args, the pairs which "work" will have
# a gcd extractable and the remaining two terms will have the above
# structure -- all pairs must be checked to find the ones that
# work.
if not rv.is_Add:
return rv
args = list(ordered(rv.args))
if len(args) != 2:
hit = False
for i in range(len(args)):
ai = args[i]
if ai is None:
continue
for j in range(i + 1, len(args)):
aj = args[j]
if aj is None:
continue
was = ai + aj
new = do(was)
if new != was:
args[i] = new # update in place
args[j] = None
hit = True
break # go to next i
if hit:
rv = Add(*[_f for _f in args if _f])
if rv.is_Add:
rv = do(rv)
return rv
# two-arg Add
split = trig_split(*args, two=True)
if not split:
return rv
gcd, n1, n2, a, b, same = split
# identify and get c1 to be cos then apply rule if possible
if same: # coscos, sinsin
gcd = n1*gcd
if n1 == n2:
return gcd*cos(a - b)
return gcd*cos(a + b)
else: #cossin, cossin
gcd = n1*gcd
if n1 == n2:
return gcd*sin(a + b)
return gcd*sin(b - a)
rv = process_common_addends(
rv, do, lambda x: tuple(ordered(x.free_symbols)))
# need to check for inducible pairs in ratio of sqrt(3):1 that
# appeared in different lists when sorting by coefficient
while rv.is_Add:
byrad = defaultdict(list)
for a in rv.args:
hit = 0
if a.is_Mul:
for ai in a.args:
if ai.is_Pow and ai.exp is S.Half and \
ai.base.is_Integer:
byrad[ai].append(a)
hit = 1
break
if not hit:
byrad[S.One].append(a)
# no need to check all pairs -- just check for the onees
# that have the right ratio
args = []
for a in byrad:
for b in [_ROOT3*a, _invROOT3]:
if b in byrad:
for i in range(len(byrad[a])):
if byrad[a][i] is None:
continue
for j in range(len(byrad[b])):
if byrad[b][j] is None:
continue
was = Add(byrad[a][i] + byrad[b][j])
new = do(was)
if new != was:
args.append(new)
byrad[a][i] = None
byrad[b][j] = None
break
if args:
rv = Add(*(args + [Add(*[_f for _f in v if _f])
for v in byrad.values()]))
else:
rv = do(rv) # final pass to resolve any new inducible pairs
break
return rv
return bottom_up(rv, f)
def TR11(rv, base=None):
"""Function of double angle to product. The ``base`` argument can be used
to indicate what is the un-doubled argument, e.g. if 3*pi/7 is the base
then cosine and sine functions with argument 6*pi/7 will be replaced.
Examples
========
>>> from sympy.simplify.fu import TR11
>>> from sympy import cos, sin, pi
>>> from sympy.abc import x
>>> TR11(sin(2*x))
2*sin(x)*cos(x)
>>> TR11(cos(2*x))
-sin(x)**2 + cos(x)**2
>>> TR11(sin(4*x))
4*(-sin(x)**2 + cos(x)**2)*sin(x)*cos(x)
>>> TR11(sin(4*x/3))
4*(-sin(x/3)**2 + cos(x/3)**2)*sin(x/3)*cos(x/3)
If the arguments are simply integers, no change is made
unless a base is provided:
>>> TR11(cos(2))
cos(2)
>>> TR11(cos(4), 2)
-sin(2)**2 + cos(2)**2
There is a subtle issue here in that autosimplification will convert
some higher angles to lower angles
>>> cos(6*pi/7) + cos(3*pi/7)
-cos(pi/7) + cos(3*pi/7)
The 6*pi/7 angle is now pi/7 but can be targeted with TR11 by supplying
the 3*pi/7 base:
>>> TR11(_, 3*pi/7)
-sin(3*pi/7)**2 + cos(3*pi/7)**2 + cos(3*pi/7)
"""
def f(rv):
if not rv.func in (cos, sin):
return rv
if base:
f = rv.func
t = f(base*2)
co = S.One
if t.is_Mul:
co, t = t.as_coeff_Mul()
if not t.func in (cos, sin):
return rv
if rv.args[0] == t.args[0]:
c = cos(base)
s = sin(base)
if f is cos:
return (c**2 - s**2)/co
else:
return 2*c*s/co
return rv
elif not rv.args[0].is_Number:
# make a change if the leading coefficient's numerator is
# divisible by 2
c, m = rv.args[0].as_coeff_Mul(rational=True)
if c.p % 2 == 0:
arg = c.p//2*m/c.q
c = TR11(cos(arg))
s = TR11(sin(arg))
if rv.func == sin:
rv = 2*s*c
else:
rv = c**2 - s**2
return rv
return bottom_up(rv, f)
def _TR11(rv):
"""
Helper for TR11 to find half-arguments for sin in factors of
num/den that appear in cos or sin factors in the den/num.
Examples
========
>>> from sympy.simplify.fu import TR11, _TR11
>>> from sympy import cos, sin
>>> from sympy.abc import x
>>> TR11(sin(x/3)/(cos(x/6)))
sin(x/3)/cos(x/6)
>>> _TR11(sin(x/3)/(cos(x/6)))
2*sin(x/6)
>>> TR11(sin(x/6)/(sin(x/3)))
sin(x/6)/sin(x/3)
>>> _TR11(sin(x/6)/(sin(x/3)))
1/(2*cos(x/6))
"""
def f(rv):
if not isinstance(rv, Expr):
return rv
def sincos_args(flat):
# find arguments of sin and cos that
# appears as bases in args of flat
# and have Integer exponents
args = defaultdict(set)
for fi in Mul.make_args(flat):
b, e = fi.as_base_exp()
if e.is_Integer and e > 0:
if b.func in (cos, sin):
args[b.func].add(b.args[0])
return args
num_args, den_args = map(sincos_args, rv.as_numer_denom())
def handle_match(rv, num_args, den_args):
# for arg in sin args of num_args, look for arg/2
# in den_args and pass this half-angle to TR11
# for handling in rv
for narg in num_args[sin]:
half = narg/2
if half in den_args[cos]:
func = cos
elif half in den_args[sin]:
func = sin
else:
continue
rv = TR11(rv, half)
den_args[func].remove(half)
return rv
# sin in num, sin or cos in den
rv = handle_match(rv, num_args, den_args)
# sin in den, sin or cos in num
rv = handle_match(rv, den_args, num_args)
return rv
return bottom_up(rv, f)
def TR12(rv, first=True):
"""Separate sums in ``tan``.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import tan
>>> from sympy.simplify.fu import TR12
>>> TR12(tan(x + y))
(tan(x) + tan(y))/(-tan(x)*tan(y) + 1)
"""
def f(rv):
if not rv.func == tan:
return rv
arg = rv.args[0]
if arg.is_Add:
if first:
args = list(ordered(arg.args))
else:
args = list(arg.args)
a = args.pop()
b = Add._from_args(args)
if b.is_Add:
tb = TR12(tan(b), first=False)
else:
tb = tan(b)
return (tan(a) + tb)/(1 - tan(a)*tb)
return rv
return bottom_up(rv, f)
def TR12i(rv):
"""Combine tan arguments as
(tan(y) + tan(x))/(tan(x)*tan(y) - 1) -> -tan(x + y).
Examples
========
>>> from sympy.simplify.fu import TR12i
>>> from sympy import tan
>>> from sympy.abc import a, b, c
>>> ta, tb, tc = [tan(i) for i in (a, b, c)]
>>> TR12i((ta + tb)/(-ta*tb + 1))
tan(a + b)
>>> TR12i((ta + tb)/(ta*tb - 1))
-tan(a + b)
>>> TR12i((-ta - tb)/(ta*tb - 1))
tan(a + b)
>>> eq = (ta + tb)/(-ta*tb + 1)**2*(-3*ta - 3*tc)/(2*(ta*tc - 1))
>>> TR12i(eq.expand())
-3*tan(a + b)*tan(a + c)/(2*(tan(a) + tan(b) - 1))
"""
def f(rv):
if not (rv.is_Add or rv.is_Mul or rv.is_Pow):
return rv
n, d = rv.as_numer_denom()
if not d.args or not n.args:
return rv
dok = {}
def ok(di):
m = as_f_sign_1(di)
if m:
g, f, s = m
if s is S.NegativeOne and f.is_Mul and len(f.args) == 2 and \
all(isinstance(fi, tan) for fi in f.args):
return g, f
d_args = list(Mul.make_args(d))
for i, di in enumerate(d_args):
m = ok(di)
if m:
g, t = m
s = Add(*[_.args[0] for _ in t.args])
dok[s] = S.One
d_args[i] = g
continue
if di.is_Add:
di = factor(di)
if di.is_Mul:
d_args.extend(di.args)
d_args[i] = S.One
elif di.is_Pow and (di.exp.is_integer or di.base.is_positive):
m = ok(di.base)
if m:
g, t = m
s = Add(*[_.args[0] for _ in t.args])
dok[s] = di.exp
d_args[i] = g**di.exp
else:
di = factor(di)
if di.is_Mul:
d_args.extend(di.args)
d_args[i] = S.One
if not dok:
return rv
def ok(ni):
if ni.is_Add and len(ni.args) == 2:
a, b = ni.args
if isinstance(a, tan) and isinstance(b, tan):
return a, b
n_args = list(Mul.make_args(factor_terms(n)))
hit = False
for i, ni in enumerate(n_args):
m = ok(ni)
if not m:
m = ok(-ni)
if m:
n_args[i] = S.NegativeOne
else:
if ni.is_Add:
ni = factor(ni)
if ni.is_Mul:
n_args.extend(ni.args)
n_args[i] = S.One
continue
elif ni.is_Pow and (
ni.exp.is_integer or ni.base.is_positive):
m = ok(ni.base)
if m:
n_args[i] = S.One
else:
ni = factor(ni)
if ni.is_Mul:
n_args.extend(ni.args)
n_args[i] = S.One
continue
else:
continue
else:
n_args[i] = S.One
hit = True
s = Add(*[_.args[0] for _ in m])
ed = dok[s]
newed = ed.extract_additively(S.One)
if newed is not None:
if newed:
dok[s] = newed
else:
dok.pop(s)
n_args[i] *= -tan(s)
if hit:
rv = Mul(*n_args)/Mul(*d_args)/Mul(*[(Add(*[
tan(a) for a in i.args]) - 1)**e for i, e in dok.items()])
return rv
return bottom_up(rv, f)
def TR13(rv):
"""Change products of ``tan`` or ``cot``.
Examples
========
>>> from sympy.simplify.fu import TR13
>>> from sympy import tan, cot
>>> TR13(tan(3)*tan(2))
-tan(2)/tan(5) - tan(3)/tan(5) + 1
>>> TR13(cot(3)*cot(2))
cot(2)*cot(5) + 1 + cot(3)*cot(5)
"""
def f(rv):
if not rv.is_Mul:
return rv
# XXX handle products of powers? or let power-reducing handle it?
args = {tan: [], cot: [], None: []}
for a in ordered(Mul.make_args(rv)):
if a.func in (tan, cot):
args[a.func].append(a.args[0])
else:
args[None].append(a)
t = args[tan]
c = args[cot]
if len(t) < 2 and len(c) < 2:
return rv
args = args[None]
while len(t) > 1:
t1 = t.pop()
t2 = t.pop()
args.append(1 - (tan(t1)/tan(t1 + t2) + tan(t2)/tan(t1 + t2)))
if t:
args.append(tan(t.pop()))
while len(c) > 1:
t1 = c.pop()
t2 = c.pop()
args.append(1 + cot(t1)*cot(t1 + t2) + cot(t2)*cot(t1 + t2))
if c:
args.append(cot(c.pop()))
return Mul(*args)
return bottom_up(rv, f)
def TRmorrie(rv):
"""Returns cos(x)*cos(2*x)*...*cos(2**(k-1)*x) -> sin(2**k*x)/(2**k*sin(x))
Examples
========
>>> from sympy.simplify.fu import TRmorrie, TR8, TR3
>>> from sympy.abc import x
>>> from sympy import Mul, cos, pi
>>> TRmorrie(cos(x)*cos(2*x))
sin(4*x)/(4*sin(x))
>>> TRmorrie(7*Mul(*[cos(x) for x in range(10)]))
7*sin(12)*sin(16)*cos(5)*cos(7)*cos(9)/(64*sin(1)*sin(3))
Sometimes autosimplification will cause a power to be
not recognized. e.g. in the following, cos(4*pi/7) automatically
simplifies to -cos(3*pi/7) so only 2 of the 3 terms are
recognized:
>>> TRmorrie(cos(pi/7)*cos(2*pi/7)*cos(4*pi/7))
-sin(3*pi/7)*cos(3*pi/7)/(4*sin(pi/7))
A touch by TR8 resolves the expression to a Rational
>>> TR8(_)
-1/8
In this case, if eq is unsimplified, the answer is obtained
directly:
>>> eq = cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9)
>>> TRmorrie(eq)
1/16
But if angles are made canonical with TR3 then the answer
is not simplified without further work:
>>> TR3(eq)
sin(pi/18)*cos(pi/9)*cos(2*pi/9)/2
>>> TRmorrie(_)
sin(pi/18)*sin(4*pi/9)/(8*sin(pi/9))
>>> TR8(_)
cos(7*pi/18)/(16*sin(pi/9))
>>> TR3(_)
1/16
The original expression would have resolve to 1/16 directly with TR8,
however:
>>> TR8(eq)
1/16
References
==========
.. [1] https://en.wikipedia.org/wiki/Morrie%27s_law
"""
def f(rv, first=True):
if not rv.is_Mul:
return rv
if first:
n, d = rv.as_numer_denom()
return f(n, 0)/f(d, 0)
args = defaultdict(list)
coss = {}
other = []
for c in rv.args:
b, e = c.as_base_exp()
if e.is_Integer and isinstance(b, cos):
co, a = b.args[0].as_coeff_Mul()
args[a].append(co)
coss[b] = e
else:
other.append(c)
new = []
for a in args:
c = args[a]
c.sort()
while c:
k = 0
cc = ci = c[0]
while cc in c:
k += 1
cc *= 2
if k > 1:
newarg = sin(2**k*ci*a)/2**k/sin(ci*a)
# see how many times this can be taken
take = None
ccs = []
for i in range(k):
cc /= 2
key = cos(a*cc, evaluate=False)
ccs.append(cc)
take = min(coss[key], take or coss[key])
# update exponent counts
for i in range(k):
cc = ccs.pop()
key = cos(a*cc, evaluate=False)
coss[key] -= take
if not coss[key]:
c.remove(cc)
new.append(newarg**take)
else:
b = cos(c.pop(0)*a)
other.append(b**coss[b])
if new:
rv = Mul(*(new + other + [
cos(k*a, evaluate=False) for a in args for k in args[a]]))
return rv
return bottom_up(rv, f)
def TR14(rv, first=True):
"""Convert factored powers of sin and cos identities into simpler
expressions.
Examples
========
>>> from sympy.simplify.fu import TR14
>>> from sympy.abc import x, y
>>> from sympy import cos, sin
>>> TR14((cos(x) - 1)*(cos(x) + 1))
-sin(x)**2
>>> TR14((sin(x) - 1)*(sin(x) + 1))
-cos(x)**2
>>> p1 = (cos(x) + 1)*(cos(x) - 1)
>>> p2 = (cos(y) - 1)*2*(cos(y) + 1)
>>> p3 = (3*(cos(y) - 1))*(3*(cos(y) + 1))
>>> TR14(p1*p2*p3*(x - 1))
-18*(x - 1)*sin(x)**2*sin(y)**4
"""
def f(rv):
if not rv.is_Mul:
return rv
if first:
# sort them by location in numerator and denominator
# so the code below can just deal with positive exponents
n, d = rv.as_numer_denom()
if d is not S.One:
newn = TR14(n, first=False)
newd = TR14(d, first=False)
if newn != n or newd != d:
rv = newn/newd
return rv
other = []
process = []
for a in rv.args:
if a.is_Pow:
b, e = a.as_base_exp()
if not (e.is_integer or b.is_positive):
other.append(a)
continue
a = b
else:
e = S.One
m = as_f_sign_1(a)
if not m or m[1].func not in (cos, sin):
if e is S.One:
other.append(a)
else:
other.append(a**e)
continue
g, f, si = m
process.append((g, e.is_Number, e, f, si, a))
# sort them to get like terms next to each other
process = list(ordered(process))
# keep track of whether there was any change
nother = len(other)
# access keys
keys = (g, t, e, f, si, a) = list(range(6))
while process:
A = process.pop(0)
if process:
B = process[0]
if A[e].is_Number and B[e].is_Number:
# both exponents are numbers
if A[f] == B[f]:
if A[si] != B[si]:
B = process.pop(0)
take = min(A[e], B[e])
# reinsert any remainder
# the B will likely sort after A so check it first
if B[e] != take:
rem = [B[i] for i in keys]
rem[e] -= take
process.insert(0, rem)
elif A[e] != take:
rem = [A[i] for i in keys]
rem[e] -= take
process.insert(0, rem)
if isinstance(A[f], cos):
t = sin
else:
t = cos
other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take)
continue
elif A[e] == B[e]:
# both exponents are equal symbols
if A[f] == B[f]:
if A[si] != B[si]:
B = process.pop(0)
take = A[e]
if isinstance(A[f], cos):
t = sin
else:
t = cos
other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take)
continue
# either we are done or neither condition above applied
other.append(A[a]**A[e])
if len(other) != nother:
rv = Mul(*other)
return rv
return bottom_up(rv, f)
def TR15(rv, max=4, pow=False):
"""Convert sin(x)**-2 to 1 + cot(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR15
>>> from sympy.abc import x
>>> from sympy import sin
>>> TR15(1 - 1/sin(x)**2)
-cot(x)**2
"""
def f(rv):
if not (isinstance(rv, Pow) and isinstance(rv.base, sin)):
return rv
e = rv.exp
if e % 2 == 1:
return TR15(rv.base**(e + 1))/rv.base
ia = 1/rv
a = _TR56(ia, sin, cot, lambda x: 1 + x, max=max, pow=pow)
if a != ia:
rv = a
return rv
return bottom_up(rv, f)
def TR16(rv, max=4, pow=False):
"""Convert cos(x)**-2 to 1 + tan(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR16
>>> from sympy.abc import x
>>> from sympy import cos
>>> TR16(1 - 1/cos(x)**2)
-tan(x)**2
"""
def f(rv):
if not (isinstance(rv, Pow) and isinstance(rv.base, cos)):
return rv
e = rv.exp
if e % 2 == 1:
return TR15(rv.base**(e + 1))/rv.base
ia = 1/rv
a = _TR56(ia, cos, tan, lambda x: 1 + x, max=max, pow=pow)
if a != ia:
rv = a
return rv
return bottom_up(rv, f)
def TR111(rv):
"""Convert f(x)**-i to g(x)**i where either ``i`` is an integer
or the base is positive and f, g are: tan, cot; sin, csc; or cos, sec.
Examples
========
>>> from sympy.simplify.fu import TR111
>>> from sympy.abc import x
>>> from sympy import tan
>>> TR111(1 - 1/tan(x)**2)
1 - cot(x)**2
"""
def f(rv):
if not (
isinstance(rv, Pow) and
(rv.base.is_positive or rv.exp.is_integer and rv.exp.is_negative)):
return rv
if isinstance(rv.base, tan):
return cot(rv.base.args[0])**-rv.exp
elif isinstance(rv.base, sin):
return csc(rv.base.args[0])**-rv.exp
elif isinstance(rv.base, cos):
return sec(rv.base.args[0])**-rv.exp
return rv
return bottom_up(rv, f)
def TR22(rv, max=4, pow=False):
"""Convert tan(x)**2 to sec(x)**2 - 1 and cot(x)**2 to csc(x)**2 - 1.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR22
>>> from sympy.abc import x
>>> from sympy import tan, cot
>>> TR22(1 + tan(x)**2)
sec(x)**2
>>> TR22(1 + cot(x)**2)
csc(x)**2
"""
def f(rv):
if not (isinstance(rv, Pow) and rv.base.func in (cot, tan)):
return rv
rv = _TR56(rv, tan, sec, lambda x: x - 1, max=max, pow=pow)
rv = _TR56(rv, cot, csc, lambda x: x - 1, max=max, pow=pow)
return rv
return bottom_up(rv, f)
def TRpower(rv):
"""Convert sin(x)**n and cos(x)**n with positive n to sums.
Examples
========
>>> from sympy.simplify.fu import TRpower
>>> from sympy.abc import x
>>> from sympy import cos, sin
>>> TRpower(sin(x)**6)
-15*cos(2*x)/32 + 3*cos(4*x)/16 - cos(6*x)/32 + 5/16
>>> TRpower(sin(x)**3*cos(2*x)**4)
(3*sin(x)/4 - sin(3*x)/4)*(cos(4*x)/2 + cos(8*x)/8 + 3/8)
References
==========
.. [1] https://en.wikipedia.org/wiki/List_of_trigonometric_identities#Power-reduction_formulae
"""
def f(rv):
if not (isinstance(rv, Pow) and isinstance(rv.base, (sin, cos))):
return rv
b, n = rv.as_base_exp()
x = b.args[0]
if n.is_Integer and n.is_positive:
if n.is_odd and isinstance(b, cos):
rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x)
for k in range((n + 1)/2)])
elif n.is_odd and isinstance(b, sin):
rv = 2**(1-n)*S.NegativeOne**((n-1)/2)*Add(*[binomial(n, k)*
S.NegativeOne**k*sin((n - 2*k)*x) for k in range((n + 1)/2)])
elif n.is_even and isinstance(b, cos):
rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x)
for k in range(n/2)])
elif n.is_even and isinstance(b, sin):
rv = 2**(1-n)*S.NegativeOne**(n/2)*Add(*[binomial(n, k)*
S.NegativeOne**k*cos((n - 2*k)*x) for k in range(n/2)])
if n.is_even:
rv += 2**(-n)*binomial(n, n/2)
return rv
return bottom_up(rv, f)
def L(rv):
"""Return count of trigonometric functions in expression.
Examples
========
>>> from sympy.simplify.fu import L
>>> from sympy.abc import x
>>> from sympy import cos, sin
>>> L(cos(x)+sin(x))
2
"""
return S(rv.count(TrigonometricFunction))
# ============== end of basic Fu-like tools =====================
if SYMPY_DEBUG:
(TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13,
TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22
)= list(map(debug,
(TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13,
TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22)))
# tuples are chains -- (f, g) -> lambda x: g(f(x))
# lists are choices -- [f, g] -> lambda x: min(f(x), g(x), key=objective)
CTR1 = [(TR5, TR0), (TR6, TR0), identity]
CTR2 = (TR11, [(TR5, TR0), (TR6, TR0), TR0])
CTR3 = [(TRmorrie, TR8, TR0), (TRmorrie, TR8, TR10i, TR0), identity]
CTR4 = [(TR4, TR10i), identity]
RL1 = (TR4, TR3, TR4, TR12, TR4, TR13, TR4, TR0)
# XXX it's a little unclear how this one is to be implemented
# see Fu paper of reference, page 7. What is the Union symbol referring to?
# The diagram shows all these as one chain of transformations, but the
# text refers to them being applied independently. Also, a break
# if L starts to increase has not been implemented.
RL2 = [
(TR4, TR3, TR10, TR4, TR3, TR11),
(TR5, TR7, TR11, TR4),
(CTR3, CTR1, TR9, CTR2, TR4, TR9, TR9, CTR4),
identity,
]
def fu(rv, measure=lambda x: (L(x), x.count_ops())):
"""Attempt to simplify expression by using transformation rules given
in the algorithm by Fu et al.
:func:`fu` will try to minimize the objective function ``measure``.
By default this first minimizes the number of trig terms and then minimizes
the number of total operations.
Examples
========
>>> from sympy.simplify.fu import fu
>>> from sympy import cos, sin, tan, pi, S, sqrt
>>> from sympy.abc import x, y, a, b
>>> fu(sin(50)**2 + cos(50)**2 + sin(pi/6))
3/2
>>> fu(sqrt(6)*cos(x) + sqrt(2)*sin(x))
2*sqrt(2)*sin(x + pi/3)
CTR1 example
>>> eq = sin(x)**4 - cos(y)**2 + sin(y)**2 + 2*cos(x)**2
>>> fu(eq)
cos(x)**4 - 2*cos(y)**2 + 2
CTR2 example
>>> fu(S.Half - cos(2*x)/2)
sin(x)**2
CTR3 example
>>> fu(sin(a)*(cos(b) - sin(b)) + cos(a)*(sin(b) + cos(b)))
sqrt(2)*sin(a + b + pi/4)
CTR4 example
>>> fu(sqrt(3)*cos(x)/2 + sin(x)/2)
sin(x + pi/3)
Example 1
>>> fu(1-sin(2*x)**2/4-sin(y)**2-cos(x)**4)
-cos(x)**2 + cos(y)**2
Example 2
>>> fu(cos(4*pi/9))
sin(pi/18)
>>> fu(cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9))
1/16
Example 3
>>> fu(tan(7*pi/18)+tan(5*pi/18)-sqrt(3)*tan(5*pi/18)*tan(7*pi/18))
-sqrt(3)
Objective function example
>>> fu(sin(x)/cos(x)) # default objective function
tan(x)
>>> fu(sin(x)/cos(x), measure=lambda x: -x.count_ops()) # maximize op count
sin(x)/cos(x)
References
==========
.. [1] https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.657.2478&rep=rep1&type=pdf
"""
fRL1 = greedy(RL1, measure)
fRL2 = greedy(RL2, measure)
was = rv
rv = sympify(rv)
if not isinstance(rv, Expr):
return rv.func(*[fu(a, measure=measure) for a in rv.args])
rv = TR1(rv)
if rv.has(tan, cot):
rv1 = fRL1(rv)
if (measure(rv1) < measure(rv)):
rv = rv1
if rv.has(tan, cot):
rv = TR2(rv)
if rv.has(sin, cos):
rv1 = fRL2(rv)
rv2 = TR8(TRmorrie(rv1))
rv = min([was, rv, rv1, rv2], key=measure)
return min(TR2i(rv), rv, key=measure)
def process_common_addends(rv, do, key2=None, key1=True):
"""Apply ``do`` to addends of ``rv`` that (if ``key1=True``) share at least
a common absolute value of their coefficient and the value of ``key2`` when
applied to the argument. If ``key1`` is False ``key2`` must be supplied and
will be the only key applied.
"""
# collect by absolute value of coefficient and key2
absc = defaultdict(list)
if key1:
for a in rv.args:
c, a = a.as_coeff_Mul()
if c < 0:
c = -c
a = -a # put the sign on `a`
absc[(c, key2(a) if key2 else 1)].append(a)
elif key2:
for a in rv.args:
absc[(S.One, key2(a))].append(a)
else:
raise ValueError('must have at least one key')
args = []
hit = False
for k in absc:
v = absc[k]
c, _ = k
if len(v) > 1:
e = Add(*v, evaluate=False)
new = do(e)
if new != e:
e = new
hit = True
args.append(c*e)
else:
args.append(c*v[0])
if hit:
rv = Add(*args)
return rv
fufuncs = '''
TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 TR8 TR9 TR10 TR10i TR11
TR12 TR13 L TR2i TRmorrie TR12i
TR14 TR15 TR16 TR111 TR22'''.split()
FU = dict(list(zip(fufuncs, list(map(locals().get, fufuncs)))))
def _roots():
global _ROOT2, _ROOT3, _invROOT3
_ROOT2, _ROOT3 = sqrt(2), sqrt(3)
_invROOT3 = 1/_ROOT3
_ROOT2 = None
def trig_split(a, b, two=False):
"""Return the gcd, s1, s2, a1, a2, bool where
If two is False (default) then::
a + b = gcd*(s1*f(a1) + s2*f(a2)) where f = cos if bool else sin
else:
if bool, a + b was +/- cos(a1)*cos(a2) +/- sin(a1)*sin(a2) and equals
n1*gcd*cos(a - b) if n1 == n2 else
n1*gcd*cos(a + b)
else a + b was +/- cos(a1)*sin(a2) +/- sin(a1)*cos(a2) and equals
n1*gcd*sin(a + b) if n1 = n2 else
n1*gcd*sin(b - a)
Examples
========
>>> from sympy.simplify.fu import trig_split
>>> from sympy.abc import x, y, z
>>> from sympy import cos, sin, sqrt
>>> trig_split(cos(x), cos(y))
(1, 1, 1, x, y, True)
>>> trig_split(2*cos(x), -2*cos(y))
(2, 1, -1, x, y, True)
>>> trig_split(cos(x)*sin(y), cos(y)*sin(y))
(sin(y), 1, 1, x, y, True)
>>> trig_split(cos(x), -sqrt(3)*sin(x), two=True)
(2, 1, -1, x, pi/6, False)
>>> trig_split(cos(x), sin(x), two=True)
(sqrt(2), 1, 1, x, pi/4, False)
>>> trig_split(cos(x), -sin(x), two=True)
(sqrt(2), 1, -1, x, pi/4, False)
>>> trig_split(sqrt(2)*cos(x), -sqrt(6)*sin(x), two=True)
(2*sqrt(2), 1, -1, x, pi/6, False)
>>> trig_split(-sqrt(6)*cos(x), -sqrt(2)*sin(x), two=True)
(-2*sqrt(2), 1, 1, x, pi/3, False)
>>> trig_split(cos(x)/sqrt(6), sin(x)/sqrt(2), two=True)
(sqrt(6)/3, 1, 1, x, pi/6, False)
>>> trig_split(-sqrt(6)*cos(x)*sin(y), -sqrt(2)*sin(x)*sin(y), two=True)
(-2*sqrt(2)*sin(y), 1, 1, x, pi/3, False)
>>> trig_split(cos(x), sin(x))
>>> trig_split(cos(x), sin(z))
>>> trig_split(2*cos(x), -sin(x))
>>> trig_split(cos(x), -sqrt(3)*sin(x))
>>> trig_split(cos(x)*cos(y), sin(x)*sin(z))
>>> trig_split(cos(x)*cos(y), sin(x)*sin(y))
>>> trig_split(-sqrt(6)*cos(x), sqrt(2)*sin(x)*sin(y), two=True)
"""
global _ROOT2, _ROOT3, _invROOT3
if _ROOT2 is None:
_roots()
a, b = [Factors(i) for i in (a, b)]
ua, ub = a.normal(b)
gcd = a.gcd(b).as_expr()
n1 = n2 = 1
if S.NegativeOne in ua.factors:
ua = ua.quo(S.NegativeOne)
n1 = -n1
elif S.NegativeOne in ub.factors:
ub = ub.quo(S.NegativeOne)
n2 = -n2
a, b = [i.as_expr() for i in (ua, ub)]
def pow_cos_sin(a, two):
"""Return ``a`` as a tuple (r, c, s) such that
``a = (r or 1)*(c or 1)*(s or 1)``.
Three arguments are returned (radical, c-factor, s-factor) as
long as the conditions set by ``two`` are met; otherwise None is
returned. If ``two`` is True there will be one or two non-None
values in the tuple: c and s or c and r or s and r or s or c with c
being a cosine function (if possible) else a sine, and s being a sine
function (if possible) else oosine. If ``two`` is False then there
will only be a c or s term in the tuple.
``two`` also require that either two cos and/or sin be present (with
the condition that if the functions are the same the arguments are
different or vice versa) or that a single cosine or a single sine
be present with an optional radical.
If the above conditions dictated by ``two`` are not met then None
is returned.
"""
c = s = None
co = S.One
if a.is_Mul:
co, a = a.as_coeff_Mul()
if len(a.args) > 2 or not two:
return None
if a.is_Mul:
args = list(a.args)
else:
args = [a]
a = args.pop(0)
if isinstance(a, cos):
c = a
elif isinstance(a, sin):
s = a
elif a.is_Pow and a.exp is S.Half: # autoeval doesn't allow -1/2
co *= a
else:
return None
if args:
b = args[0]
if isinstance(b, cos):
if c:
s = b
else:
c = b
elif isinstance(b, sin):
if s:
c = b
else:
s = b
elif b.is_Pow and b.exp is S.Half:
co *= b
else:
return None
return co if co is not S.One else None, c, s
elif isinstance(a, cos):
c = a
elif isinstance(a, sin):
s = a
if c is None and s is None:
return
co = co if co is not S.One else None
return co, c, s
# get the parts
m = pow_cos_sin(a, two)
if m is None:
return
coa, ca, sa = m
m = pow_cos_sin(b, two)
if m is None:
return
cob, cb, sb = m
# check them
if (not ca) and cb or ca and isinstance(ca, sin):
coa, ca, sa, cob, cb, sb = cob, cb, sb, coa, ca, sa
n1, n2 = n2, n1
if not two: # need cos(x) and cos(y) or sin(x) and sin(y)
c = ca or sa
s = cb or sb
if not isinstance(c, s.func):
return None
return gcd, n1, n2, c.args[0], s.args[0], isinstance(c, cos)
else:
if not coa and not cob:
if (ca and cb and sa and sb):
if isinstance(ca, sa.func) is not isinstance(cb, sb.func):
return
args = {j.args for j in (ca, sa)}
if not all(i.args in args for i in (cb, sb)):
return
return gcd, n1, n2, ca.args[0], sa.args[0], isinstance(ca, sa.func)
if ca and sa or cb and sb or \
two and (ca is None and sa is None or cb is None and sb is None):
return
c = ca or sa
s = cb or sb
if c.args != s.args:
return
if not coa:
coa = S.One
if not cob:
cob = S.One
if coa is cob:
gcd *= _ROOT2
return gcd, n1, n2, c.args[0], pi/4, False
elif coa/cob == _ROOT3:
gcd *= 2*cob
return gcd, n1, n2, c.args[0], pi/3, False
elif coa/cob == _invROOT3:
gcd *= 2*coa
return gcd, n1, n2, c.args[0], pi/6, False
def as_f_sign_1(e):
"""If ``e`` is a sum that can be written as ``g*(a + s)`` where
``s`` is ``+/-1``, return ``g``, ``a``, and ``s`` where ``a`` does
not have a leading negative coefficient.
Examples
========
>>> from sympy.simplify.fu import as_f_sign_1
>>> from sympy.abc import x
>>> as_f_sign_1(x + 1)
(1, x, 1)
>>> as_f_sign_1(x - 1)
(1, x, -1)
>>> as_f_sign_1(-x + 1)
(-1, x, -1)
>>> as_f_sign_1(-x - 1)
(-1, x, 1)
>>> as_f_sign_1(2*x + 2)
(2, x, 1)
"""
if not e.is_Add or len(e.args) != 2:
return
# exact match
a, b = e.args
if a in (S.NegativeOne, S.One):
g = S.One
if b.is_Mul and b.args[0].is_Number and b.args[0] < 0:
a, b = -a, -b
g = -g
return g, b, a
# gcd match
a, b = [Factors(i) for i in e.args]
ua, ub = a.normal(b)
gcd = a.gcd(b).as_expr()
if S.NegativeOne in ua.factors:
ua = ua.quo(S.NegativeOne)
n1 = -1
n2 = 1
elif S.NegativeOne in ub.factors:
ub = ub.quo(S.NegativeOne)
n1 = 1
n2 = -1
else:
n1 = n2 = 1
a, b = [i.as_expr() for i in (ua, ub)]
if a is S.One:
a, b = b, a
n1, n2 = n2, n1
if n1 == -1:
gcd = -gcd
n2 = -n2
if b is S.One:
return gcd, a, n2
def _osborne(e, d):
"""Replace all hyperbolic functions with trig functions using
the Osborne rule.
Notes
=====
``d`` is a dummy variable to prevent automatic evaluation
of trigonometric/hyperbolic functions.
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
"""
def f(rv):
if not isinstance(rv, HyperbolicFunction):
return rv
a = rv.args[0]
a = a*d if not a.is_Add else Add._from_args([i*d for i in a.args])
if isinstance(rv, sinh):
return I*sin(a)
elif isinstance(rv, cosh):
return cos(a)
elif isinstance(rv, tanh):
return I*tan(a)
elif isinstance(rv, coth):
return cot(a)/I
elif isinstance(rv, sech):
return sec(a)
elif isinstance(rv, csch):
return csc(a)/I
else:
raise NotImplementedError('unhandled %s' % rv.func)
return bottom_up(e, f)
def _osbornei(e, d):
"""Replace all trig functions with hyperbolic functions using
the Osborne rule.
Notes
=====
``d`` is a dummy variable to prevent automatic evaluation
of trigonometric/hyperbolic functions.
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
"""
def f(rv):
if not isinstance(rv, TrigonometricFunction):
return rv
const, x = rv.args[0].as_independent(d, as_Add=True)
a = x.xreplace({d: S.One}) + const*I
if isinstance(rv, sin):
return sinh(a)/I
elif isinstance(rv, cos):
return cosh(a)
elif isinstance(rv, tan):
return tanh(a)/I
elif isinstance(rv, cot):
return coth(a)*I
elif isinstance(rv, sec):
return sech(a)
elif isinstance(rv, csc):
return csch(a)*I
else:
raise NotImplementedError('unhandled %s' % rv.func)
return bottom_up(e, f)
def hyper_as_trig(rv):
"""Return an expression containing hyperbolic functions in terms
of trigonometric functions. Any trigonometric functions initially
present are replaced with Dummy symbols and the function to undo
the masking and the conversion back to hyperbolics is also returned. It
should always be true that::
t, f = hyper_as_trig(expr)
expr == f(t)
Examples
========
>>> from sympy.simplify.fu import hyper_as_trig, fu
>>> from sympy.abc import x
>>> from sympy import cosh, sinh
>>> eq = sinh(x)**2 + cosh(x)**2
>>> t, f = hyper_as_trig(eq)
>>> f(fu(t))
cosh(2*x)
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
"""
from sympy.simplify.simplify import signsimp
from sympy.simplify.radsimp import collect
# mask off trig functions
trigs = rv.atoms(TrigonometricFunction)
reps = [(t, Dummy()) for t in trigs]
masked = rv.xreplace(dict(reps))
# get inversion substitutions in place
reps = [(v, k) for k, v in reps]
d = Dummy()
return _osborne(masked, d), lambda x: collect(signsimp(
_osbornei(x, d).xreplace(dict(reps))), S.ImaginaryUnit)
def sincos_to_sum(expr):
"""Convert products and powers of sin and cos to sums.
Explanation
===========
Applied power reduction TRpower first, then expands products, and
converts products to sums with TR8.
Examples
========
>>> from sympy.simplify.fu import sincos_to_sum
>>> from sympy.abc import x
>>> from sympy import cos, sin
>>> sincos_to_sum(16*sin(x)**3*cos(2*x)**2)
7*sin(x) - 5*sin(3*x) + 3*sin(5*x) - sin(7*x)
"""
if not expr.has(cos, sin):
return expr
else:
return TR8(expand_mul(TRpower(expr)))
|
13c003d268fc50bb5e94349492624a258ee4626e5f55f08be101ff0d7eb9a84c | """ This module cooks up a docstring when imported. Its only purpose is to
be displayed in the sphinx documentation. """
from sympy.core.relational import Eq
from sympy.functions.special.hyper import hyper
from sympy.printing.latex import latex
from sympy.simplify.hyperexpand import FormulaCollection
c = FormulaCollection()
doc = ""
for f in c.formulae:
obj = Eq(hyper(f.func.ap, f.func.bq, f.z),
f.closed_form.rewrite('nonrepsmall'))
doc += ".. math::\n %s\n" % latex(obj)
__doc__ = doc
|
9dc56d44f1801f4873fc3031b1c3b990d3b8cc4e42c71d41d4df9ea4fcbe7b1e | from itertools import combinations_with_replacement
from sympy.core import symbols, Add, Dummy
from sympy.core.numbers import Rational
from sympy.polys import cancel, ComputationFailed, parallel_poly_from_expr, reduced, Poly
from sympy.polys.monomials import Monomial, monomial_div
from sympy.polys.polyerrors import DomainError, PolificationFailed
from sympy.utilities.misc import debug
def ratsimp(expr):
"""
Put an expression over a common denominator, cancel and reduce.
Examples
========
>>> from sympy import ratsimp
>>> from sympy.abc import x, y
>>> ratsimp(1/x + 1/y)
(x + y)/(x*y)
"""
f, g = cancel(expr).as_numer_denom()
try:
Q, r = reduced(f, [g], field=True, expand=False)
except ComputationFailed:
return f/g
return Add(*Q) + cancel(r/g)
def ratsimpmodprime(expr, G, *gens, quick=True, polynomial=False, **args):
"""
Simplifies a rational expression ``expr`` modulo the prime ideal
generated by ``G``. ``G`` should be a Groebner basis of the
ideal.
Examples
========
>>> from sympy.simplify.ratsimp import ratsimpmodprime
>>> from sympy.abc import x, y
>>> eq = (x + y**5 + y)/(x - y)
>>> ratsimpmodprime(eq, [x*y**5 - x - y], x, y, order='lex')
(-x**2 - x*y - x - y)/(-x**2 + x*y)
If ``polynomial`` is ``False``, the algorithm computes a rational
simplification which minimizes the sum of the total degrees of
the numerator and the denominator.
If ``polynomial`` is ``True``, this function just brings numerator and
denominator into a canonical form. This is much faster, but has
potentially worse results.
References
==========
.. [1] M. Monagan, R. Pearce, Rational Simplification Modulo a Polynomial
Ideal, http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.163.6984
(specifically, the second algorithm)
"""
from sympy.solvers.solvers import solve
debug('ratsimpmodprime', expr)
# usual preparation of polynomials:
num, denom = cancel(expr).as_numer_denom()
try:
polys, opt = parallel_poly_from_expr([num, denom] + G, *gens, **args)
except PolificationFailed:
return expr
domain = opt.domain
if domain.has_assoc_Field:
opt.domain = domain.get_field()
else:
raise DomainError(
"Cannot compute rational simplification over %s" % domain)
# compute only once
leading_monomials = [g.LM(opt.order) for g in polys[2:]]
tested = set()
def staircase(n):
"""
Compute all monomials with degree less than ``n`` that are
not divisible by any element of ``leading_monomials``.
"""
if n == 0:
return [1]
S = []
for mi in combinations_with_replacement(range(len(opt.gens)), n):
m = [0]*len(opt.gens)
for i in mi:
m[i] += 1
if all(monomial_div(m, lmg) is None for lmg in
leading_monomials):
S.append(m)
return [Monomial(s).as_expr(*opt.gens) for s in S] + staircase(n - 1)
def _ratsimpmodprime(a, b, allsol, N=0, D=0):
r"""
Computes a rational simplification of ``a/b`` which minimizes
the sum of the total degrees of the numerator and the denominator.
Explanation
===========
The algorithm proceeds by looking at ``a * d - b * c`` modulo
the ideal generated by ``G`` for some ``c`` and ``d`` with degree
less than ``a`` and ``b`` respectively.
The coefficients of ``c`` and ``d`` are indeterminates and thus
the coefficients of the normalform of ``a * d - b * c`` are
linear polynomials in these indeterminates.
If these linear polynomials, considered as system of
equations, have a nontrivial solution, then `\frac{a}{b}
\equiv \frac{c}{d}` modulo the ideal generated by ``G``. So,
by construction, the degree of ``c`` and ``d`` is less than
the degree of ``a`` and ``b``, so a simpler representation
has been found.
After a simpler representation has been found, the algorithm
tries to reduce the degree of the numerator and denominator
and returns the result afterwards.
As an extension, if quick=False, we look at all possible degrees such
that the total degree is less than *or equal to* the best current
solution. We retain a list of all solutions of minimal degree, and try
to find the best one at the end.
"""
c, d = a, b
steps = 0
maxdeg = a.total_degree() + b.total_degree()
if quick:
bound = maxdeg - 1
else:
bound = maxdeg
while N + D <= bound:
if (N, D) in tested:
break
tested.add((N, D))
M1 = staircase(N)
M2 = staircase(D)
debug('%s / %s: %s, %s' % (N, D, M1, M2))
Cs = symbols("c:%d" % len(M1), cls=Dummy)
Ds = symbols("d:%d" % len(M2), cls=Dummy)
ng = Cs + Ds
c_hat = Poly(
sum([Cs[i] * M1[i] for i in range(len(M1))]), opt.gens + ng)
d_hat = Poly(
sum([Ds[i] * M2[i] for i in range(len(M2))]), opt.gens + ng)
r = reduced(a * d_hat - b * c_hat, G, opt.gens + ng,
order=opt.order, polys=True)[1]
S = Poly(r, gens=opt.gens).coeffs()
sol = solve(S, Cs + Ds, particular=True, quick=True)
if sol and not all(s == 0 for s in sol.values()):
c = c_hat.subs(sol)
d = d_hat.subs(sol)
# The "free" variables occurring before as parameters
# might still be in the substituted c, d, so set them
# to the value chosen before:
c = c.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
d = d.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
c = Poly(c, opt.gens)
d = Poly(d, opt.gens)
if d == 0:
raise ValueError('Ideal not prime?')
allsol.append((c_hat, d_hat, S, Cs + Ds))
if N + D != maxdeg:
allsol = [allsol[-1]]
break
steps += 1
N += 1
D += 1
if steps > 0:
c, d, allsol = _ratsimpmodprime(c, d, allsol, N, D - steps)
c, d, allsol = _ratsimpmodprime(c, d, allsol, N - steps, D)
return c, d, allsol
# preprocessing. this improves performance a bit when deg(num)
# and deg(denom) are large:
num = reduced(num, G, opt.gens, order=opt.order)[1]
denom = reduced(denom, G, opt.gens, order=opt.order)[1]
if polynomial:
return (num/denom).cancel()
c, d, allsol = _ratsimpmodprime(
Poly(num, opt.gens, domain=opt.domain), Poly(denom, opt.gens, domain=opt.domain), [])
if not quick and allsol:
debug('Looking for best minimal solution. Got: %s' % len(allsol))
newsol = []
for c_hat, d_hat, S, ng in allsol:
sol = solve(S, ng, particular=True, quick=False)
newsol.append((c_hat.subs(sol), d_hat.subs(sol)))
c, d = min(newsol, key=lambda x: len(x[0].terms()) + len(x[1].terms()))
if not domain.is_Field:
cn, c = c.clear_denoms(convert=True)
dn, d = d.clear_denoms(convert=True)
r = Rational(cn, dn)
else:
r = Rational(1)
return (c*r.q)/(d*r.p)
|
d4bb997f2b6c35e55343e522f5aaa07029a79da91eddaf8ec1bc795705175b84 | r"""
This module contains the functionality to arrange the nodes of a
diagram on an abstract grid, and then to produce a graphical
representation of the grid.
The currently supported back-ends are Xy-pic [Xypic].
Layout Algorithm
================
This section provides an overview of the algorithms implemented in
:class:`DiagramGrid` to lay out diagrams.
The first step of the algorithm is the removal composite and identity
morphisms which do not have properties in the supplied diagram. The
premises and conclusions of the diagram are then merged.
The generic layout algorithm begins with the construction of the
"skeleton" of the diagram. The skeleton is an undirected graph which
has the objects of the diagram as vertices and has an (undirected)
edge between each pair of objects between which there exist morphisms.
The direction of the morphisms does not matter at this stage. The
skeleton also includes an edge between each pair of vertices `A` and
`C` such that there exists an object `B` which is connected via
a morphism to `A`, and via a morphism to `C`.
The skeleton constructed in this way has the property that every
object is a vertex of a triangle formed by three edges of the
skeleton. This property lies at the base of the generic layout
algorithm.
After the skeleton has been constructed, the algorithm lists all
triangles which can be formed. Note that some triangles will not have
all edges corresponding to morphisms which will actually be drawn.
Triangles which have only one edge or less which will actually be
drawn are immediately discarded.
The list of triangles is sorted according to the number of edges which
correspond to morphisms, then the triangle with the least number of such
edges is selected. One of such edges is picked and the corresponding
objects are placed horizontally, on a grid. This edge is recorded to
be in the fringe. The algorithm then finds a "welding" of a triangle
to the fringe. A welding is an edge in the fringe where a triangle
could be attached. If the algorithm succeeds in finding such a
welding, it adds to the grid that vertex of the triangle which was not
yet included in any edge in the fringe and records the two new edges in
the fringe. This process continues iteratively until all objects of
the diagram has been placed or until no more weldings can be found.
An edge is only removed from the fringe when a welding to this edge
has been found, and there is no room around this edge to place
another vertex.
When no more weldings can be found, but there are still triangles
left, the algorithm searches for a possibility of attaching one of the
remaining triangles to the existing structure by a vertex. If such a
possibility is found, the corresponding edge of the found triangle is
placed in the found space and the iterative process of welding
triangles restarts.
When logical groups are supplied, each of these groups is laid out
independently. Then a diagram is constructed in which groups are
objects and any two logical groups between which there exist morphisms
are connected via a morphism. This diagram is laid out. Finally,
the grid which includes all objects of the initial diagram is
constructed by replacing the cells which contain logical groups with
the corresponding laid out grids, and by correspondingly expanding the
rows and columns.
The sequential layout algorithm begins by constructing the
underlying undirected graph defined by the morphisms obtained after
simplifying premises and conclusions and merging them (see above).
The vertex with the minimal degree is then picked up and depth-first
search is started from it. All objects which are located at distance
`n` from the root in the depth-first search tree, are positioned in
the `n`-th column of the resulting grid. The sequential layout will
therefore attempt to lay the objects out along a line.
References
==========
.. [Xypic] http://xy-pic.sourceforge.net/
"""
from sympy.categories import (CompositeMorphism, IdentityMorphism,
NamedMorphism, Diagram)
from sympy.core import Dict, Symbol, default_sort_key
from sympy.printing.latex import latex
from sympy.sets import FiniteSet
from sympy.utilities.iterables import iterable
from sympy.utilities.decorator import doctest_depends_on
from itertools import chain
__doctest_requires__ = {('preview_diagram',): 'pyglet'}
class _GrowableGrid:
"""
Holds a growable grid of objects.
Explanation
===========
It is possible to append or prepend a row or a column to the grid
using the corresponding methods. Prepending rows or columns has
the effect of changing the coordinates of the already existing
elements.
This class currently represents a naive implementation of the
functionality with little attempt at optimisation.
"""
def __init__(self, width, height):
self._width = width
self._height = height
self._array = [[None for j in range(width)] for i in range(height)]
@property
def width(self):
return self._width
@property
def height(self):
return self._height
def __getitem__(self, i_j):
"""
Returns the element located at in the i-th line and j-th
column.
"""
i, j = i_j
return self._array[i][j]
def __setitem__(self, i_j, newvalue):
"""
Sets the element located at in the i-th line and j-th
column.
"""
i, j = i_j
self._array[i][j] = newvalue
def append_row(self):
"""
Appends an empty row to the grid.
"""
self._height += 1
self._array.append([None for j in range(self._width)])
def append_column(self):
"""
Appends an empty column to the grid.
"""
self._width += 1
for i in range(self._height):
self._array[i].append(None)
def prepend_row(self):
"""
Prepends the grid with an empty row.
"""
self._height += 1
self._array.insert(0, [None for j in range(self._width)])
def prepend_column(self):
"""
Prepends the grid with an empty column.
"""
self._width += 1
for i in range(self._height):
self._array[i].insert(0, None)
class DiagramGrid:
r"""
Constructs and holds the fitting of the diagram into a grid.
Explanation
===========
The mission of this class is to analyse the structure of the
supplied diagram and to place its objects on a grid such that,
when the objects and the morphisms are actually drawn, the diagram
would be "readable", in the sense that there will not be many
intersections of moprhisms. This class does not perform any
actual drawing. It does strive nevertheless to offer sufficient
metadata to draw a diagram.
Consider the following simple diagram.
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import Diagram, DiagramGrid
>>> from sympy import pprint
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g])
The simplest way to have a diagram laid out is the following:
>>> grid = DiagramGrid(diagram)
>>> (grid.width, grid.height)
(2, 2)
>>> pprint(grid)
A B
<BLANKLINE>
C
Sometimes one sees the diagram as consisting of logical groups.
One can advise ``DiagramGrid`` as to such groups by employing the
``groups`` keyword argument.
Consider the following diagram:
>>> D = Object("D")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> h = NamedMorphism(D, A, "h")
>>> k = NamedMorphism(D, B, "k")
>>> diagram = Diagram([f, g, h, k])
Lay it out with generic layout:
>>> grid = DiagramGrid(diagram)
>>> pprint(grid)
A B D
<BLANKLINE>
C
Now, we can group the objects `A` and `D` to have them near one
another:
>>> grid = DiagramGrid(diagram, groups=[[A, D], B, C])
>>> pprint(grid)
B C
<BLANKLINE>
A D
Note how the positioning of the other objects changes.
Further indications can be supplied to the constructor of
:class:`DiagramGrid` using keyword arguments. The currently
supported hints are explained in the following paragraphs.
:class:`DiagramGrid` does not automatically guess which layout
would suit the supplied diagram better. Consider, for example,
the following linear diagram:
>>> E = Object("E")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> h = NamedMorphism(C, D, "h")
>>> i = NamedMorphism(D, E, "i")
>>> diagram = Diagram([f, g, h, i])
When laid out with the generic layout, it does not get to look
linear:
>>> grid = DiagramGrid(diagram)
>>> pprint(grid)
A B
<BLANKLINE>
C D
<BLANKLINE>
E
To get it laid out in a line, use ``layout="sequential"``:
>>> grid = DiagramGrid(diagram, layout="sequential")
>>> pprint(grid)
A B C D E
One may sometimes need to transpose the resulting layout. While
this can always be done by hand, :class:`DiagramGrid` provides a
hint for that purpose:
>>> grid = DiagramGrid(diagram, layout="sequential", transpose=True)
>>> pprint(grid)
A
<BLANKLINE>
B
<BLANKLINE>
C
<BLANKLINE>
D
<BLANKLINE>
E
Separate hints can also be provided for each group. For an
example, refer to ``tests/test_drawing.py``, and see the different
ways in which the five lemma [FiveLemma] can be laid out.
See Also
========
Diagram
References
==========
.. [FiveLemma] https://en.wikipedia.org/wiki/Five_lemma
"""
@staticmethod
def _simplify_morphisms(morphisms):
"""
Given a dictionary mapping morphisms to their properties,
returns a new dictionary in which there are no morphisms which
do not have properties, and which are compositions of other
morphisms included in the dictionary. Identities are dropped
as well.
"""
newmorphisms = {}
for morphism, props in morphisms.items():
if isinstance(morphism, CompositeMorphism) and not props:
continue
elif isinstance(morphism, IdentityMorphism):
continue
else:
newmorphisms[morphism] = props
return newmorphisms
@staticmethod
def _merge_premises_conclusions(premises, conclusions):
"""
Given two dictionaries of morphisms and their properties,
produces a single dictionary which includes elements from both
dictionaries. If a morphism has some properties in premises
and also in conclusions, the properties in conclusions take
priority.
"""
return dict(chain(premises.items(), conclusions.items()))
@staticmethod
def _juxtapose_edges(edge1, edge2):
"""
If ``edge1`` and ``edge2`` have precisely one common endpoint,
returns an edge which would form a triangle with ``edge1`` and
``edge2``.
If ``edge1`` and ``edge2`` do not have a common endpoint,
returns ``None``.
If ``edge1`` and ``edge`` are the same edge, returns ``None``.
"""
intersection = edge1 & edge2
if len(intersection) != 1:
# The edges either have no common points or are equal.
return None
# The edges have a common endpoint. Extract the different
# endpoints and set up the new edge.
return (edge1 - intersection) | (edge2 - intersection)
@staticmethod
def _add_edge_append(dictionary, edge, elem):
"""
If ``edge`` is not in ``dictionary``, adds ``edge`` to the
dictionary and sets its value to ``[elem]``. Otherwise
appends ``elem`` to the value of existing entry.
Note that edges are undirected, thus `(A, B) = (B, A)`.
"""
if edge in dictionary:
dictionary[edge].append(elem)
else:
dictionary[edge] = [elem]
@staticmethod
def _build_skeleton(morphisms):
"""
Creates a dictionary which maps edges to corresponding
morphisms. Thus for a morphism `f:A\rightarrow B`, the edge
`(A, B)` will be associated with `f`. This function also adds
to the list those edges which are formed by juxtaposition of
two edges already in the list. These new edges are not
associated with any morphism and are only added to assure that
the diagram can be decomposed into triangles.
"""
edges = {}
# Create edges for morphisms.
for morphism in morphisms:
DiagramGrid._add_edge_append(
edges, frozenset([morphism.domain, morphism.codomain]), morphism)
# Create new edges by juxtaposing existing edges.
edges1 = dict(edges)
for w in edges1:
for v in edges1:
wv = DiagramGrid._juxtapose_edges(w, v)
if wv and wv not in edges:
edges[wv] = []
return edges
@staticmethod
def _list_triangles(edges):
"""
Builds the set of triangles formed by the supplied edges. The
triangles are arbitrary and need not be commutative. A
triangle is a set that contains all three of its sides.
"""
triangles = set()
for w in edges:
for v in edges:
wv = DiagramGrid._juxtapose_edges(w, v)
if wv and wv in edges:
triangles.add(frozenset([w, v, wv]))
return triangles
@staticmethod
def _drop_redundant_triangles(triangles, skeleton):
"""
Returns a list which contains only those triangles who have
morphisms associated with at least two edges.
"""
return [tri for tri in triangles
if len([e for e in tri if skeleton[e]]) >= 2]
@staticmethod
def _morphism_length(morphism):
"""
Returns the length of a morphism. The length of a morphism is
the number of components it consists of. A non-composite
morphism is of length 1.
"""
if isinstance(morphism, CompositeMorphism):
return len(morphism.components)
else:
return 1
@staticmethod
def _compute_triangle_min_sizes(triangles, edges):
r"""
Returns a dictionary mapping triangles to their minimal sizes.
The minimal size of a triangle is the sum of maximal lengths
of morphisms associated to the sides of the triangle. The
length of a morphism is the number of components it consists
of. A non-composite morphism is of length 1.
Sorting triangles by this metric attempts to address two
aspects of layout. For triangles with only simple morphisms
in the edge, this assures that triangles with all three edges
visible will get typeset after triangles with less visible
edges, which sometimes minimizes the necessity in diagonal
arrows. For triangles with composite morphisms in the edges,
this assures that objects connected with shorter morphisms
will be laid out first, resulting the visual proximity of
those objects which are connected by shorter morphisms.
"""
triangle_sizes = {}
for triangle in triangles:
size = 0
for e in triangle:
morphisms = edges[e]
if morphisms:
size += max(DiagramGrid._morphism_length(m)
for m in morphisms)
triangle_sizes[triangle] = size
return triangle_sizes
@staticmethod
def _triangle_objects(triangle):
"""
Given a triangle, returns the objects included in it.
"""
# A triangle is a frozenset of three two-element frozensets
# (the edges). This chains the three edges together and
# creates a frozenset from the iterator, thus producing a
# frozenset of objects of the triangle.
return frozenset(chain(*tuple(triangle)))
@staticmethod
def _other_vertex(triangle, edge):
"""
Given a triangle and an edge of it, returns the vertex which
opposes the edge.
"""
# This gets the set of objects of the triangle and then
# subtracts the set of objects employed in ``edge`` to get the
# vertex opposite to ``edge``.
return list(DiagramGrid._triangle_objects(triangle) - set(edge))[0]
@staticmethod
def _empty_point(pt, grid):
"""
Checks if the cell at coordinates ``pt`` is either empty or
out of the bounds of the grid.
"""
if (pt[0] < 0) or (pt[1] < 0) or \
(pt[0] >= grid.height) or (pt[1] >= grid.width):
return True
return grid[pt] is None
@staticmethod
def _put_object(coords, obj, grid, fringe):
"""
Places an object at the coordinate ``cords`` in ``grid``,
growing the grid and updating ``fringe``, if necessary.
Returns (0, 0) if no row or column has been prepended, (1, 0)
if a row was prepended, (0, 1) if a column was prepended and
(1, 1) if both a column and a row were prepended.
"""
(i, j) = coords
offset = (0, 0)
if i == -1:
grid.prepend_row()
i = 0
offset = (1, 0)
for k in range(len(fringe)):
((i1, j1), (i2, j2)) = fringe[k]
fringe[k] = ((i1 + 1, j1), (i2 + 1, j2))
elif i == grid.height:
grid.append_row()
if j == -1:
j = 0
offset = (offset[0], 1)
grid.prepend_column()
for k in range(len(fringe)):
((i1, j1), (i2, j2)) = fringe[k]
fringe[k] = ((i1, j1 + 1), (i2, j2 + 1))
elif j == grid.width:
grid.append_column()
grid[i, j] = obj
return offset
@staticmethod
def _choose_target_cell(pt1, pt2, edge, obj, skeleton, grid):
"""
Given two points, ``pt1`` and ``pt2``, and the welding edge
``edge``, chooses one of the two points to place the opposing
vertex ``obj`` of the triangle. If neither of this points
fits, returns ``None``.
"""
pt1_empty = DiagramGrid._empty_point(pt1, grid)
pt2_empty = DiagramGrid._empty_point(pt2, grid)
if pt1_empty and pt2_empty:
# Both cells are empty. Of these two, choose that cell
# which will assure that a visible edge of the triangle
# will be drawn perpendicularly to the current welding
# edge.
A = grid[edge[0]]
if skeleton.get(frozenset([A, obj])):
return pt1
else:
return pt2
if pt1_empty:
return pt1
elif pt2_empty:
return pt2
else:
return None
@staticmethod
def _find_triangle_to_weld(triangles, fringe, grid):
"""
Finds, if possible, a triangle and an edge in the ``fringe`` to
which the triangle could be attached. Returns the tuple
containing the triangle and the index of the corresponding
edge in the ``fringe``.
This function relies on the fact that objects are unique in
the diagram.
"""
for triangle in triangles:
for (a, b) in fringe:
if frozenset([grid[a], grid[b]]) in triangle:
return (triangle, (a, b))
return None
@staticmethod
def _weld_triangle(tri, welding_edge, fringe, grid, skeleton):
"""
If possible, welds the triangle ``tri`` to ``fringe`` and
returns ``False``. If this method encounters a degenerate
situation in the fringe and corrects it such that a restart of
the search is required, it returns ``True`` (which means that
a restart in finding triangle weldings is required).
A degenerate situation is a situation when an edge listed in
the fringe does not belong to the visual boundary of the
diagram.
"""
a, b = welding_edge
target_cell = None
obj = DiagramGrid._other_vertex(tri, (grid[a], grid[b]))
# We now have a triangle and an edge where it can be welded to
# the fringe. Decide where to place the other vertex of the
# triangle and check for degenerate situations en route.
if (abs(a[0] - b[0]) == 1) and (abs(a[1] - b[1]) == 1):
# A diagonal edge.
target_cell = (a[0], b[1])
if grid[target_cell]:
# That cell is already occupied.
target_cell = (b[0], a[1])
if grid[target_cell]:
# Degenerate situation, this edge is not
# on the actual fringe. Correct the
# fringe and go on.
fringe.remove((a, b))
return True
elif a[0] == b[0]:
# A horizontal edge. We first attempt to build the
# triangle in the downward direction.
down_left = a[0] + 1, a[1]
down_right = a[0] + 1, b[1]
target_cell = DiagramGrid._choose_target_cell(
down_left, down_right, (a, b), obj, skeleton, grid)
if not target_cell:
# No room below this edge. Check above.
up_left = a[0] - 1, a[1]
up_right = a[0] - 1, b[1]
target_cell = DiagramGrid._choose_target_cell(
up_left, up_right, (a, b), obj, skeleton, grid)
if not target_cell:
# This edge is not in the fringe, remove it
# and restart.
fringe.remove((a, b))
return True
elif a[1] == b[1]:
# A vertical edge. We will attempt to place the other
# vertex of the triangle to the right of this edge.
right_up = a[0], a[1] + 1
right_down = b[0], a[1] + 1
target_cell = DiagramGrid._choose_target_cell(
right_up, right_down, (a, b), obj, skeleton, grid)
if not target_cell:
# No room to the left. See what's to the right.
left_up = a[0], a[1] - 1
left_down = b[0], a[1] - 1
target_cell = DiagramGrid._choose_target_cell(
left_up, left_down, (a, b), obj, skeleton, grid)
if not target_cell:
# This edge is not in the fringe, remove it
# and restart.
fringe.remove((a, b))
return True
# We now know where to place the other vertex of the
# triangle.
offset = DiagramGrid._put_object(target_cell, obj, grid, fringe)
# Take care of the displacement of coordinates if a row or
# a column was prepended.
target_cell = (target_cell[0] + offset[0],
target_cell[1] + offset[1])
a = (a[0] + offset[0], a[1] + offset[1])
b = (b[0] + offset[0], b[1] + offset[1])
fringe.extend([(a, target_cell), (b, target_cell)])
# No restart is required.
return False
@staticmethod
def _triangle_key(tri, triangle_sizes):
"""
Returns a key for the supplied triangle. It should be the
same independently of the hash randomisation.
"""
objects = sorted(
DiagramGrid._triangle_objects(tri), key=default_sort_key)
return (triangle_sizes[tri], default_sort_key(objects))
@staticmethod
def _pick_root_edge(tri, skeleton):
"""
For a given triangle always picks the same root edge. The
root edge is the edge that will be placed first on the grid.
"""
candidates = [sorted(e, key=default_sort_key)
for e in tri if skeleton[e]]
sorted_candidates = sorted(candidates, key=default_sort_key)
# Don't forget to assure the proper ordering of the vertices
# in this edge.
return tuple(sorted(sorted_candidates[0], key=default_sort_key))
@staticmethod
def _drop_irrelevant_triangles(triangles, placed_objects):
"""
Returns only those triangles whose set of objects is not
completely included in ``placed_objects``.
"""
return [tri for tri in triangles if not placed_objects.issuperset(
DiagramGrid._triangle_objects(tri))]
@staticmethod
def _grow_pseudopod(triangles, fringe, grid, skeleton, placed_objects):
"""
Starting from an object in the existing structure on the ``grid``,
adds an edge to which a triangle from ``triangles`` could be
welded. If this method has found a way to do so, it returns
the object it has just added.
This method should be applied when ``_weld_triangle`` cannot
find weldings any more.
"""
for i in range(grid.height):
for j in range(grid.width):
obj = grid[i, j]
if not obj:
continue
# Here we need to choose a triangle which has only
# ``obj`` in common with the existing structure. The
# situations when this is not possible should be
# handled elsewhere.
def good_triangle(tri):
objs = DiagramGrid._triangle_objects(tri)
return obj in objs and \
placed_objects & (objs - {obj}) == set()
tris = [tri for tri in triangles if good_triangle(tri)]
if not tris:
# This object is not interesting.
continue
# Pick the "simplest" of the triangles which could be
# attached. Remember that the list of triangles is
# sorted according to their "simplicity" (see
# _compute_triangle_min_sizes for the metric).
#
# Note that ``tris`` are sequentially built from
# ``triangles``, so we don't have to worry about hash
# randomisation.
tri = tris[0]
# We have found a triangle which could be attached to
# the existing structure by a vertex.
candidates = sorted([e for e in tri if skeleton[e]],
key=lambda e: FiniteSet(*e).sort_key())
edges = [e for e in candidates if obj in e]
# Note that a meaningful edge (i.e., and edge that is
# associated with a morphism) containing ``obj``
# always exists. That's because all triangles are
# guaranteed to have at least two meaningful edges.
# See _drop_redundant_triangles.
# Get the object at the other end of the edge.
edge = edges[0]
other_obj = tuple(edge - frozenset([obj]))[0]
# Now check for free directions. When checking for
# free directions, prefer the horizontal and vertical
# directions.
neighbours = [(i - 1, j), (i, j + 1), (i + 1, j), (i, j - 1),
(i - 1, j - 1), (i - 1, j + 1), (i + 1, j - 1), (i + 1, j + 1)]
for pt in neighbours:
if DiagramGrid._empty_point(pt, grid):
# We have a found a place to grow the
# pseudopod into.
offset = DiagramGrid._put_object(
pt, other_obj, grid, fringe)
i += offset[0]
j += offset[1]
pt = (pt[0] + offset[0], pt[1] + offset[1])
fringe.append(((i, j), pt))
return other_obj
# This diagram is actually cooler that I can handle. Fail cowardly.
return None
@staticmethod
def _handle_groups(diagram, groups, merged_morphisms, hints):
"""
Given the slightly preprocessed morphisms of the diagram,
produces a grid laid out according to ``groups``.
If a group has hints, it is laid out with those hints only,
without any influence from ``hints``. Otherwise, it is laid
out with ``hints``.
"""
def lay_out_group(group, local_hints):
"""
If ``group`` is a set of objects, uses a ``DiagramGrid``
to lay it out and returns the grid. Otherwise returns the
object (i.e., ``group``). If ``local_hints`` is not
empty, it is supplied to ``DiagramGrid`` as the dictionary
of hints. Otherwise, the ``hints`` argument of
``_handle_groups`` is used.
"""
if isinstance(group, FiniteSet):
# Set up the corresponding object-to-group
# mappings.
for obj in group:
obj_groups[obj] = group
# Lay out the current group.
if local_hints:
groups_grids[group] = DiagramGrid(
diagram.subdiagram_from_objects(group), **local_hints)
else:
groups_grids[group] = DiagramGrid(
diagram.subdiagram_from_objects(group), **hints)
else:
obj_groups[group] = group
def group_to_finiteset(group):
"""
Converts ``group`` to a :class:``FiniteSet`` if it is an
iterable.
"""
if iterable(group):
return FiniteSet(*group)
else:
return group
obj_groups = {}
groups_grids = {}
# We would like to support various containers to represent
# groups. To achieve that, before laying each group out, it
# should be converted to a FiniteSet, because that is what the
# following code expects.
if isinstance(groups, (dict, Dict)):
finiteset_groups = {}
for group, local_hints in groups.items():
finiteset_group = group_to_finiteset(group)
finiteset_groups[finiteset_group] = local_hints
lay_out_group(group, local_hints)
groups = finiteset_groups
else:
finiteset_groups = []
for group in groups:
finiteset_group = group_to_finiteset(group)
finiteset_groups.append(finiteset_group)
lay_out_group(finiteset_group, None)
groups = finiteset_groups
new_morphisms = []
for morphism in merged_morphisms:
dom = obj_groups[morphism.domain]
cod = obj_groups[morphism.codomain]
# Note that we are not really interested in morphisms
# which do not employ two different groups, because
# these do not influence the layout.
if dom != cod:
# These are essentially unnamed morphisms; they are
# not going to mess in the final layout. By giving
# them the same names, we avoid unnecessary
# duplicates.
new_morphisms.append(NamedMorphism(dom, cod, "dummy"))
# Lay out the new diagram. Since these are dummy morphisms,
# properties and conclusions are irrelevant.
top_grid = DiagramGrid(Diagram(new_morphisms))
# We now have to substitute the groups with the corresponding
# grids, laid out at the beginning of this function. Compute
# the size of each row and column in the grid, so that all
# nested grids fit.
def group_size(group):
"""
For the supplied group (or object, eventually), returns
the size of the cell that will hold this group (object).
"""
if group in groups_grids:
grid = groups_grids[group]
return (grid.height, grid.width)
else:
return (1, 1)
row_heights = [max(group_size(top_grid[i, j])[0]
for j in range(top_grid.width))
for i in range(top_grid.height)]
column_widths = [max(group_size(top_grid[i, j])[1]
for i in range(top_grid.height))
for j in range(top_grid.width)]
grid = _GrowableGrid(sum(column_widths), sum(row_heights))
real_row = 0
real_column = 0
for logical_row in range(top_grid.height):
for logical_column in range(top_grid.width):
obj = top_grid[logical_row, logical_column]
if obj in groups_grids:
# This is a group. Copy the corresponding grid in
# place.
local_grid = groups_grids[obj]
for i in range(local_grid.height):
for j in range(local_grid.width):
grid[real_row + i,
real_column + j] = local_grid[i, j]
else:
# This is an object. Just put it there.
grid[real_row, real_column] = obj
real_column += column_widths[logical_column]
real_column = 0
real_row += row_heights[logical_row]
return grid
@staticmethod
def _generic_layout(diagram, merged_morphisms):
"""
Produces the generic layout for the supplied diagram.
"""
all_objects = set(diagram.objects)
if len(all_objects) == 1:
# There only one object in the diagram, just put in on 1x1
# grid.
grid = _GrowableGrid(1, 1)
grid[0, 0] = tuple(all_objects)[0]
return grid
skeleton = DiagramGrid._build_skeleton(merged_morphisms)
grid = _GrowableGrid(2, 1)
if len(skeleton) == 1:
# This diagram contains only one morphism. Draw it
# horizontally.
objects = sorted(all_objects, key=default_sort_key)
grid[0, 0] = objects[0]
grid[0, 1] = objects[1]
return grid
triangles = DiagramGrid._list_triangles(skeleton)
triangles = DiagramGrid._drop_redundant_triangles(triangles, skeleton)
triangle_sizes = DiagramGrid._compute_triangle_min_sizes(
triangles, skeleton)
triangles = sorted(triangles, key=lambda tri:
DiagramGrid._triangle_key(tri, triangle_sizes))
# Place the first edge on the grid.
root_edge = DiagramGrid._pick_root_edge(triangles[0], skeleton)
grid[0, 0], grid[0, 1] = root_edge
fringe = [((0, 0), (0, 1))]
# Record which objects we now have on the grid.
placed_objects = set(root_edge)
while placed_objects != all_objects:
welding = DiagramGrid._find_triangle_to_weld(
triangles, fringe, grid)
if welding:
(triangle, welding_edge) = welding
restart_required = DiagramGrid._weld_triangle(
triangle, welding_edge, fringe, grid, skeleton)
if restart_required:
continue
placed_objects.update(
DiagramGrid._triangle_objects(triangle))
else:
# No more weldings found. Try to attach triangles by
# vertices.
new_obj = DiagramGrid._grow_pseudopod(
triangles, fringe, grid, skeleton, placed_objects)
if not new_obj:
# No more triangles can be attached, not even by
# the edge. We will set up a new diagram out of
# what has been left, laid it out independently,
# and then attach it to this one.
remaining_objects = all_objects - placed_objects
remaining_diagram = diagram.subdiagram_from_objects(
FiniteSet(*remaining_objects))
remaining_grid = DiagramGrid(remaining_diagram)
# Now, let's glue ``remaining_grid`` to ``grid``.
final_width = grid.width + remaining_grid.width
final_height = max(grid.height, remaining_grid.height)
final_grid = _GrowableGrid(final_width, final_height)
for i in range(grid.width):
for j in range(grid.height):
final_grid[i, j] = grid[i, j]
start_j = grid.width
for i in range(remaining_grid.height):
for j in range(remaining_grid.width):
final_grid[i, start_j + j] = remaining_grid[i, j]
return final_grid
placed_objects.add(new_obj)
triangles = DiagramGrid._drop_irrelevant_triangles(
triangles, placed_objects)
return grid
@staticmethod
def _get_undirected_graph(objects, merged_morphisms):
"""
Given the objects and the relevant morphisms of a diagram,
returns the adjacency lists of the underlying undirected
graph.
"""
adjlists = {}
for obj in objects:
adjlists[obj] = []
for morphism in merged_morphisms:
adjlists[morphism.domain].append(morphism.codomain)
adjlists[morphism.codomain].append(morphism.domain)
# Assure that the objects in the adjacency list are always in
# the same order.
for obj in adjlists.keys():
adjlists[obj].sort(key=default_sort_key)
return adjlists
@staticmethod
def _sequential_layout(diagram, merged_morphisms):
r"""
Lays out the diagram in "sequential" layout. This method
will attempt to produce a result as close to a line as
possible. For linear diagrams, the result will actually be a
line.
"""
objects = diagram.objects
sorted_objects = sorted(objects, key=default_sort_key)
# Set up the adjacency lists of the underlying undirected
# graph of ``merged_morphisms``.
adjlists = DiagramGrid._get_undirected_graph(objects, merged_morphisms)
# Find an object with the minimal degree. This is going to be
# the root.
root = sorted_objects[0]
mindegree = len(adjlists[root])
for obj in sorted_objects:
current_degree = len(adjlists[obj])
if current_degree < mindegree:
root = obj
mindegree = current_degree
grid = _GrowableGrid(1, 1)
grid[0, 0] = root
placed_objects = {root}
def place_objects(pt, placed_objects):
"""
Does depth-first search in the underlying graph of the
diagram and places the objects en route.
"""
# We will start placing new objects from here.
new_pt = (pt[0], pt[1] + 1)
for adjacent_obj in adjlists[grid[pt]]:
if adjacent_obj in placed_objects:
# This object has already been placed.
continue
DiagramGrid._put_object(new_pt, adjacent_obj, grid, [])
placed_objects.add(adjacent_obj)
placed_objects.update(place_objects(new_pt, placed_objects))
new_pt = (new_pt[0] + 1, new_pt[1])
return placed_objects
place_objects((0, 0), placed_objects)
return grid
@staticmethod
def _drop_inessential_morphisms(merged_morphisms):
r"""
Removes those morphisms which should appear in the diagram,
but which have no relevance to object layout.
Currently this removes "loop" morphisms: the non-identity
morphisms with the same domains and codomains.
"""
morphisms = [m for m in merged_morphisms if m.domain != m.codomain]
return morphisms
@staticmethod
def _get_connected_components(objects, merged_morphisms):
"""
Given a container of morphisms, returns a list of connected
components formed by these morphisms. A connected component
is represented by a diagram consisting of the corresponding
morphisms.
"""
component_index = {}
for o in objects:
component_index[o] = None
# Get the underlying undirected graph of the diagram.
adjlist = DiagramGrid._get_undirected_graph(objects, merged_morphisms)
def traverse_component(object, current_index):
"""
Does a depth-first search traversal of the component
containing ``object``.
"""
component_index[object] = current_index
for o in adjlist[object]:
if component_index[o] is None:
traverse_component(o, current_index)
# Traverse all components.
current_index = 0
for o in adjlist:
if component_index[o] is None:
traverse_component(o, current_index)
current_index += 1
# List the objects of the components.
component_objects = [[] for i in range(current_index)]
for o, idx in component_index.items():
component_objects[idx].append(o)
# Finally, list the morphisms belonging to each component.
#
# Note: If some objects are isolated, they will not get any
# morphisms at this stage, and since the layout algorithm
# relies, we are essentially going to lose this object.
# Therefore, check if there are isolated objects and, for each
# of them, provide the trivial identity morphism. It will get
# discarded later, but the object will be there.
component_morphisms = []
for component in component_objects:
current_morphisms = {}
for m in merged_morphisms:
if (m.domain in component) and (m.codomain in component):
current_morphisms[m] = merged_morphisms[m]
if len(component) == 1:
# Let's add an identity morphism, for the sake of
# surely having morphisms in this component.
current_morphisms[IdentityMorphism(component[0])] = FiniteSet()
component_morphisms.append(Diagram(current_morphisms))
return component_morphisms
def __init__(self, diagram, groups=None, **hints):
premises = DiagramGrid._simplify_morphisms(diagram.premises)
conclusions = DiagramGrid._simplify_morphisms(diagram.conclusions)
all_merged_morphisms = DiagramGrid._merge_premises_conclusions(
premises, conclusions)
merged_morphisms = DiagramGrid._drop_inessential_morphisms(
all_merged_morphisms)
# Store the merged morphisms for later use.
self._morphisms = all_merged_morphisms
components = DiagramGrid._get_connected_components(
diagram.objects, all_merged_morphisms)
if groups and (groups != diagram.objects):
# Lay out the diagram according to the groups.
self._grid = DiagramGrid._handle_groups(
diagram, groups, merged_morphisms, hints)
elif len(components) > 1:
# Note that we check for connectedness _before_ checking
# the layout hints because the layout strategies don't
# know how to deal with disconnected diagrams.
# The diagram is disconnected. Lay out the components
# independently.
grids = []
# Sort the components to eventually get the grids arranged
# in a fixed, hash-independent order.
components = sorted(components, key=default_sort_key)
for component in components:
grid = DiagramGrid(component, **hints)
grids.append(grid)
# Throw the grids together, in a line.
total_width = sum(g.width for g in grids)
total_height = max(g.height for g in grids)
grid = _GrowableGrid(total_width, total_height)
start_j = 0
for g in grids:
for i in range(g.height):
for j in range(g.width):
grid[i, start_j + j] = g[i, j]
start_j += g.width
self._grid = grid
elif "layout" in hints:
if hints["layout"] == "sequential":
self._grid = DiagramGrid._sequential_layout(
diagram, merged_morphisms)
else:
self._grid = DiagramGrid._generic_layout(diagram, merged_morphisms)
if hints.get("transpose"):
# Transpose the resulting grid.
grid = _GrowableGrid(self._grid.height, self._grid.width)
for i in range(self._grid.height):
for j in range(self._grid.width):
grid[j, i] = self._grid[i, j]
self._grid = grid
@property
def width(self):
"""
Returns the number of columns in this diagram layout.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import Diagram, DiagramGrid
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g])
>>> grid = DiagramGrid(diagram)
>>> grid.width
2
"""
return self._grid.width
@property
def height(self):
"""
Returns the number of rows in this diagram layout.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import Diagram, DiagramGrid
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g])
>>> grid = DiagramGrid(diagram)
>>> grid.height
2
"""
return self._grid.height
def __getitem__(self, i_j):
"""
Returns the object placed in the row ``i`` and column ``j``.
The indices are 0-based.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import Diagram, DiagramGrid
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g])
>>> grid = DiagramGrid(diagram)
>>> (grid[0, 0], grid[0, 1])
(Object("A"), Object("B"))
>>> (grid[1, 0], grid[1, 1])
(None, Object("C"))
"""
i, j = i_j
return self._grid[i, j]
@property
def morphisms(self):
"""
Returns those morphisms (and their properties) which are
sufficiently meaningful to be drawn.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import Diagram, DiagramGrid
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g])
>>> grid = DiagramGrid(diagram)
>>> grid.morphisms
{NamedMorphism(Object("A"), Object("B"), "f"): EmptySet,
NamedMorphism(Object("B"), Object("C"), "g"): EmptySet}
"""
return self._morphisms
def __str__(self):
"""
Produces a string representation of this class.
This method returns a string representation of the underlying
list of lists of objects.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import Diagram, DiagramGrid
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g])
>>> grid = DiagramGrid(diagram)
>>> print(grid)
[[Object("A"), Object("B")],
[None, Object("C")]]
"""
return repr(self._grid._array)
class ArrowStringDescription:
r"""
Stores the information necessary for producing an Xy-pic
description of an arrow.
The principal goal of this class is to abstract away the string
representation of an arrow and to also provide the functionality
to produce the actual Xy-pic string.
``unit`` sets the unit which will be used to specify the amount of
curving and other distances. ``horizontal_direction`` should be a
string of ``"r"`` or ``"l"`` specifying the horizontal offset of the
target cell of the arrow relatively to the current one.
``vertical_direction`` should specify the vertical offset using a
series of either ``"d"`` or ``"u"``. ``label_position`` should be
either ``"^"``, ``"_"``, or ``"|"`` to specify that the label should
be positioned above the arrow, below the arrow or just over the arrow,
in a break. Note that the notions "above" and "below" are relative
to arrow direction. ``label`` stores the morphism label.
This works as follows (disregard the yet unexplained arguments):
>>> from sympy.categories.diagram_drawing import ArrowStringDescription
>>> astr = ArrowStringDescription(
... unit="mm", curving=None, curving_amount=None,
... looping_start=None, looping_end=None, horizontal_direction="d",
... vertical_direction="r", label_position="_", label="f")
>>> print(str(astr))
\ar[dr]_{f}
``curving`` should be one of ``"^"``, ``"_"`` to specify in which
direction the arrow is going to curve. ``curving_amount`` is a number
describing how many ``unit``'s the morphism is going to curve:
>>> astr = ArrowStringDescription(
... unit="mm", curving="^", curving_amount=12,
... looping_start=None, looping_end=None, horizontal_direction="d",
... vertical_direction="r", label_position="_", label="f")
>>> print(str(astr))
\ar@/^12mm/[dr]_{f}
``looping_start`` and ``looping_end`` are currently only used for
loop morphisms, those which have the same domain and codomain.
These two attributes should store a valid Xy-pic direction and
specify, correspondingly, the direction the arrow gets out into
and the direction the arrow gets back from:
>>> astr = ArrowStringDescription(
... unit="mm", curving=None, curving_amount=None,
... looping_start="u", looping_end="l", horizontal_direction="",
... vertical_direction="", label_position="_", label="f")
>>> print(str(astr))
\ar@(u,l)[]_{f}
``label_displacement`` controls how far the arrow label is from
the ends of the arrow. For example, to position the arrow label
near the arrow head, use ">":
>>> astr = ArrowStringDescription(
... unit="mm", curving="^", curving_amount=12,
... looping_start=None, looping_end=None, horizontal_direction="d",
... vertical_direction="r", label_position="_", label="f")
>>> astr.label_displacement = ">"
>>> print(str(astr))
\ar@/^12mm/[dr]_>{f}
Finally, ``arrow_style`` is used to specify the arrow style. To
get a dashed arrow, for example, use "{-->}" as arrow style:
>>> astr = ArrowStringDescription(
... unit="mm", curving="^", curving_amount=12,
... looping_start=None, looping_end=None, horizontal_direction="d",
... vertical_direction="r", label_position="_", label="f")
>>> astr.arrow_style = "{-->}"
>>> print(str(astr))
\ar@/^12mm/@{-->}[dr]_{f}
Notes
=====
Instances of :class:`ArrowStringDescription` will be constructed
by :class:`XypicDiagramDrawer` and provided for further use in
formatters. The user is not expected to construct instances of
:class:`ArrowStringDescription` themselves.
To be able to properly utilise this class, the reader is encouraged
to checkout the Xy-pic user guide, available at [Xypic].
See Also
========
XypicDiagramDrawer
References
==========
.. [Xypic] http://xy-pic.sourceforge.net/
"""
def __init__(self, unit, curving, curving_amount, looping_start,
looping_end, horizontal_direction, vertical_direction,
label_position, label):
self.unit = unit
self.curving = curving
self.curving_amount = curving_amount
self.looping_start = looping_start
self.looping_end = looping_end
self.horizontal_direction = horizontal_direction
self.vertical_direction = vertical_direction
self.label_position = label_position
self.label = label
self.label_displacement = ""
self.arrow_style = ""
# This flag shows that the position of the label of this
# morphism was set while typesetting a curved morphism and
# should not be modified later.
self.forced_label_position = False
def __str__(self):
if self.curving:
curving_str = "@/%s%d%s/" % (self.curving, self.curving_amount,
self.unit)
else:
curving_str = ""
if self.looping_start and self.looping_end:
looping_str = "@(%s,%s)" % (self.looping_start, self.looping_end)
else:
looping_str = ""
if self.arrow_style:
style_str = "@" + self.arrow_style
else:
style_str = ""
return "\\ar%s%s%s[%s%s]%s%s{%s}" % \
(curving_str, looping_str, style_str, self.horizontal_direction,
self.vertical_direction, self.label_position,
self.label_displacement, self.label)
class XypicDiagramDrawer:
r"""
Given a :class:`~.Diagram` and the corresponding
:class:`DiagramGrid`, produces the Xy-pic representation of the
diagram.
The most important method in this class is ``draw``. Consider the
following triangle diagram:
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy.categories import DiagramGrid, XypicDiagramDrawer
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g], {g * f: "unique"})
To draw this diagram, its objects need to be laid out with a
:class:`DiagramGrid`::
>>> grid = DiagramGrid(diagram)
Finally, the drawing:
>>> drawer = XypicDiagramDrawer()
>>> print(drawer.draw(diagram, grid))
\xymatrix{
A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\
C &
}
For further details see the docstring of this method.
To control the appearance of the arrows, formatters are used. The
dictionary ``arrow_formatters`` maps morphisms to formatter
functions. A formatter is accepts an
:class:`ArrowStringDescription` and is allowed to modify any of
the arrow properties exposed thereby. For example, to have all
morphisms with the property ``unique`` appear as dashed arrows,
and to have their names prepended with `\exists !`, the following
should be done:
>>> def formatter(astr):
... astr.label = r"\exists !" + astr.label
... astr.arrow_style = "{-->}"
>>> drawer.arrow_formatters["unique"] = formatter
>>> print(drawer.draw(diagram, grid))
\xymatrix{
A \ar@{-->}[d]_{\exists !g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\
C &
}
To modify the appearance of all arrows in the diagram, set
``default_arrow_formatter``. For example, to place all morphism
labels a little bit farther from the arrow head so that they look
more centred, do as follows:
>>> def default_formatter(astr):
... astr.label_displacement = "(0.45)"
>>> drawer.default_arrow_formatter = default_formatter
>>> print(drawer.draw(diagram, grid))
\xymatrix{
A \ar@{-->}[d]_(0.45){\exists !g\circ f} \ar[r]^(0.45){f} & B \ar[ld]^(0.45){g} \\
C &
}
In some diagrams some morphisms are drawn as curved arrows.
Consider the following diagram:
>>> D = Object("D")
>>> E = Object("E")
>>> h = NamedMorphism(D, A, "h")
>>> k = NamedMorphism(D, B, "k")
>>> diagram = Diagram([f, g, h, k])
>>> grid = DiagramGrid(diagram)
>>> drawer = XypicDiagramDrawer()
>>> print(drawer.draw(diagram, grid))
\xymatrix{
A \ar[r]_{f} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_3mm/[ll]_{h} \\
& C &
}
To control how far the morphisms are curved by default, one can
use the ``unit`` and ``default_curving_amount`` attributes:
>>> drawer.unit = "cm"
>>> drawer.default_curving_amount = 1
>>> print(drawer.draw(diagram, grid))
\xymatrix{
A \ar[r]_{f} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_1cm/[ll]_{h} \\
& C &
}
In some diagrams, there are multiple curved morphisms between the
same two objects. To control by how much the curving changes
between two such successive morphisms, use
``default_curving_step``:
>>> drawer.default_curving_step = 1
>>> h1 = NamedMorphism(A, D, "h1")
>>> diagram = Diagram([f, g, h, k, h1])
>>> grid = DiagramGrid(diagram)
>>> print(drawer.draw(diagram, grid))
\xymatrix{
A \ar[r]_{f} \ar@/^1cm/[rr]^{h_{1}} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_2cm/[ll]_{h} \\
& C &
}
The default value of ``default_curving_step`` is 4 units.
See Also
========
draw, ArrowStringDescription
"""
def __init__(self):
self.unit = "mm"
self.default_curving_amount = 3
self.default_curving_step = 4
# This dictionary maps properties to the corresponding arrow
# formatters.
self.arrow_formatters = {}
# This is the default arrow formatter which will be applied to
# each arrow independently of its properties.
self.default_arrow_formatter = None
@staticmethod
def _process_loop_morphism(i, j, grid, morphisms_str_info, object_coords):
"""
Produces the information required for constructing the string
representation of a loop morphism. This function is invoked
from ``_process_morphism``.
See Also
========
_process_morphism
"""
curving = ""
label_pos = "^"
looping_start = ""
looping_end = ""
# This is a loop morphism. Count how many morphisms stick
# in each of the four quadrants. Note that straight
# vertical and horizontal morphisms count in two quadrants
# at the same time (i.e., a morphism going up counts both
# in the first and the second quadrants).
# The usual numbering (counterclockwise) of quadrants
# applies.
quadrant = [0, 0, 0, 0]
obj = grid[i, j]
for m, m_str_info in morphisms_str_info.items():
if (m.domain == obj) and (m.codomain == obj):
# That's another loop morphism. Check how it
# loops and mark the corresponding quadrants as
# busy.
(l_s, l_e) = (m_str_info.looping_start, m_str_info.looping_end)
if (l_s, l_e) == ("r", "u"):
quadrant[0] += 1
elif (l_s, l_e) == ("u", "l"):
quadrant[1] += 1
elif (l_s, l_e) == ("l", "d"):
quadrant[2] += 1
elif (l_s, l_e) == ("d", "r"):
quadrant[3] += 1
continue
if m.domain == obj:
(end_i, end_j) = object_coords[m.codomain]
goes_out = True
elif m.codomain == obj:
(end_i, end_j) = object_coords[m.domain]
goes_out = False
else:
continue
d_i = end_i - i
d_j = end_j - j
m_curving = m_str_info.curving
if (d_i != 0) and (d_j != 0):
# This is really a diagonal morphism. Detect the
# quadrant.
if (d_i > 0) and (d_j > 0):
quadrant[0] += 1
elif (d_i > 0) and (d_j < 0):
quadrant[1] += 1
elif (d_i < 0) and (d_j < 0):
quadrant[2] += 1
elif (d_i < 0) and (d_j > 0):
quadrant[3] += 1
elif d_i == 0:
# Knowing where the other end of the morphism is
# and which way it goes, we now have to decide
# which quadrant is now the upper one and which is
# the lower one.
if d_j > 0:
if goes_out:
upper_quadrant = 0
lower_quadrant = 3
else:
upper_quadrant = 3
lower_quadrant = 0
else:
if goes_out:
upper_quadrant = 2
lower_quadrant = 1
else:
upper_quadrant = 1
lower_quadrant = 2
if m_curving:
if m_curving == "^":
quadrant[upper_quadrant] += 1
elif m_curving == "_":
quadrant[lower_quadrant] += 1
else:
# This morphism counts in both upper and lower
# quadrants.
quadrant[upper_quadrant] += 1
quadrant[lower_quadrant] += 1
elif d_j == 0:
# Knowing where the other end of the morphism is
# and which way it goes, we now have to decide
# which quadrant is now the left one and which is
# the right one.
if d_i < 0:
if goes_out:
left_quadrant = 1
right_quadrant = 0
else:
left_quadrant = 0
right_quadrant = 1
else:
if goes_out:
left_quadrant = 3
right_quadrant = 2
else:
left_quadrant = 2
right_quadrant = 3
if m_curving:
if m_curving == "^":
quadrant[left_quadrant] += 1
elif m_curving == "_":
quadrant[right_quadrant] += 1
else:
# This morphism counts in both upper and lower
# quadrants.
quadrant[left_quadrant] += 1
quadrant[right_quadrant] += 1
# Pick the freest quadrant to curve our morphism into.
freest_quadrant = 0
for i in range(4):
if quadrant[i] < quadrant[freest_quadrant]:
freest_quadrant = i
# Now set up proper looping.
(looping_start, looping_end) = [("r", "u"), ("u", "l"), ("l", "d"),
("d", "r")][freest_quadrant]
return (curving, label_pos, looping_start, looping_end)
@staticmethod
def _process_horizontal_morphism(i, j, target_j, grid, morphisms_str_info,
object_coords):
"""
Produces the information required for constructing the string
representation of a horizontal morphism. This function is
invoked from ``_process_morphism``.
See Also
========
_process_morphism
"""
# The arrow is horizontal. Check if it goes from left to
# right (``backwards == False``) or from right to left
# (``backwards == True``).
backwards = False
start = j
end = target_j
if end < start:
(start, end) = (end, start)
backwards = True
# Let's see which objects are there between ``start`` and
# ``end``, and then count how many morphisms stick out
# upwards, and how many stick out downwards.
#
# For example, consider the situation:
#
# B1 C1
# | |
# A--B--C--D
# |
# B2
#
# Between the objects `A` and `D` there are two objects:
# `B` and `C`. Further, there are two morphisms which
# stick out upward (the ones between `B1` and `B` and
# between `C` and `C1`) and one morphism which sticks out
# downward (the one between `B and `B2`).
#
# We need this information to decide how to curve the
# arrow between `A` and `D`. First of all, since there
# are two objects between `A` and `D``, we must curve the
# arrow. Then, we will have it curve downward, because
# there is more space (less morphisms stick out downward
# than upward).
up = []
down = []
straight_horizontal = []
for k in range(start + 1, end):
obj = grid[i, k]
if not obj:
continue
for m in morphisms_str_info:
if m.domain == obj:
(end_i, end_j) = object_coords[m.codomain]
elif m.codomain == obj:
(end_i, end_j) = object_coords[m.domain]
else:
continue
if end_i > i:
down.append(m)
elif end_i < i:
up.append(m)
elif not morphisms_str_info[m].curving:
# This is a straight horizontal morphism,
# because it has no curving.
straight_horizontal.append(m)
if len(up) < len(down):
# More morphisms stick out downward than upward, let's
# curve the morphism up.
if backwards:
curving = "_"
label_pos = "_"
else:
curving = "^"
label_pos = "^"
# Assure that the straight horizontal morphisms have
# their labels on the lower side of the arrow.
for m in straight_horizontal:
(i1, j1) = object_coords[m.domain]
(i2, j2) = object_coords[m.codomain]
m_str_info = morphisms_str_info[m]
if j1 < j2:
m_str_info.label_position = "_"
else:
m_str_info.label_position = "^"
# Don't allow any further modifications of the
# position of this label.
m_str_info.forced_label_position = True
else:
# More morphisms stick out downward than upward, let's
# curve the morphism up.
if backwards:
curving = "^"
label_pos = "^"
else:
curving = "_"
label_pos = "_"
# Assure that the straight horizontal morphisms have
# their labels on the upper side of the arrow.
for m in straight_horizontal:
(i1, j1) = object_coords[m.domain]
(i2, j2) = object_coords[m.codomain]
m_str_info = morphisms_str_info[m]
if j1 < j2:
m_str_info.label_position = "^"
else:
m_str_info.label_position = "_"
# Don't allow any further modifications of the
# position of this label.
m_str_info.forced_label_position = True
return (curving, label_pos)
@staticmethod
def _process_vertical_morphism(i, j, target_i, grid, morphisms_str_info,
object_coords):
"""
Produces the information required for constructing the string
representation of a vertical morphism. This function is
invoked from ``_process_morphism``.
See Also
========
_process_morphism
"""
# This arrow is vertical. Check if it goes from top to
# bottom (``backwards == False``) or from bottom to top
# (``backwards == True``).
backwards = False
start = i
end = target_i
if end < start:
(start, end) = (end, start)
backwards = True
# Let's see which objects are there between ``start`` and
# ``end``, and then count how many morphisms stick out to
# the left, and how many stick out to the right.
#
# See the corresponding comment in the previous branch of
# this if-statement for more details.
left = []
right = []
straight_vertical = []
for k in range(start + 1, end):
obj = grid[k, j]
if not obj:
continue
for m in morphisms_str_info:
if m.domain == obj:
(end_i, end_j) = object_coords[m.codomain]
elif m.codomain == obj:
(end_i, end_j) = object_coords[m.domain]
else:
continue
if end_j > j:
right.append(m)
elif end_j < j:
left.append(m)
elif not morphisms_str_info[m].curving:
# This is a straight vertical morphism,
# because it has no curving.
straight_vertical.append(m)
if len(left) < len(right):
# More morphisms stick out to the left than to the
# right, let's curve the morphism to the right.
if backwards:
curving = "^"
label_pos = "^"
else:
curving = "_"
label_pos = "_"
# Assure that the straight vertical morphisms have
# their labels on the left side of the arrow.
for m in straight_vertical:
(i1, j1) = object_coords[m.domain]
(i2, j2) = object_coords[m.codomain]
m_str_info = morphisms_str_info[m]
if i1 < i2:
m_str_info.label_position = "^"
else:
m_str_info.label_position = "_"
# Don't allow any further modifications of the
# position of this label.
m_str_info.forced_label_position = True
else:
# More morphisms stick out to the right than to the
# left, let's curve the morphism to the left.
if backwards:
curving = "_"
label_pos = "_"
else:
curving = "^"
label_pos = "^"
# Assure that the straight vertical morphisms have
# their labels on the right side of the arrow.
for m in straight_vertical:
(i1, j1) = object_coords[m.domain]
(i2, j2) = object_coords[m.codomain]
m_str_info = morphisms_str_info[m]
if i1 < i2:
m_str_info.label_position = "_"
else:
m_str_info.label_position = "^"
# Don't allow any further modifications of the
# position of this label.
m_str_info.forced_label_position = True
return (curving, label_pos)
def _process_morphism(self, diagram, grid, morphism, object_coords,
morphisms, morphisms_str_info):
"""
Given the required information, produces the string
representation of ``morphism``.
"""
def repeat_string_cond(times, str_gt, str_lt):
"""
If ``times > 0``, repeats ``str_gt`` ``times`` times.
Otherwise, repeats ``str_lt`` ``-times`` times.
"""
if times > 0:
return str_gt * times
else:
return str_lt * (-times)
def count_morphisms_undirected(A, B):
"""
Counts how many processed morphisms there are between the
two supplied objects.
"""
return len([m for m in morphisms_str_info
if {m.domain, m.codomain} == {A, B}])
def count_morphisms_filtered(dom, cod, curving):
"""
Counts the processed morphisms which go out of ``dom``
into ``cod`` with curving ``curving``.
"""
return len([m for m, m_str_info in morphisms_str_info.items()
if (m.domain, m.codomain) == (dom, cod) and
(m_str_info.curving == curving)])
(i, j) = object_coords[morphism.domain]
(target_i, target_j) = object_coords[morphism.codomain]
# We now need to determine the direction of
# the arrow.
delta_i = target_i - i
delta_j = target_j - j
vertical_direction = repeat_string_cond(delta_i,
"d", "u")
horizontal_direction = repeat_string_cond(delta_j,
"r", "l")
curving = ""
label_pos = "^"
looping_start = ""
looping_end = ""
if (delta_i == 0) and (delta_j == 0):
# This is a loop morphism.
(curving, label_pos, looping_start,
looping_end) = XypicDiagramDrawer._process_loop_morphism(
i, j, grid, morphisms_str_info, object_coords)
elif (delta_i == 0) and (abs(j - target_j) > 1):
# This is a horizontal morphism.
(curving, label_pos) = XypicDiagramDrawer._process_horizontal_morphism(
i, j, target_j, grid, morphisms_str_info, object_coords)
elif (delta_j == 0) and (abs(i - target_i) > 1):
# This is a vertical morphism.
(curving, label_pos) = XypicDiagramDrawer._process_vertical_morphism(
i, j, target_i, grid, morphisms_str_info, object_coords)
count = count_morphisms_undirected(morphism.domain, morphism.codomain)
curving_amount = ""
if curving:
# This morphisms should be curved anyway.
curving_amount = self.default_curving_amount + count * \
self.default_curving_step
elif count:
# There are no objects between the domain and codomain of
# the current morphism, but this is not there already are
# some morphisms with the same domain and codomain, so we
# have to curve this one.
curving = "^"
filtered_morphisms = count_morphisms_filtered(
morphism.domain, morphism.codomain, curving)
curving_amount = self.default_curving_amount + \
filtered_morphisms * \
self.default_curving_step
# Let's now get the name of the morphism.
morphism_name = ""
if isinstance(morphism, IdentityMorphism):
morphism_name = "id_{%s}" + latex(grid[i, j])
elif isinstance(morphism, CompositeMorphism):
component_names = [latex(Symbol(component.name)) for
component in morphism.components]
component_names.reverse()
morphism_name = "\\circ ".join(component_names)
elif isinstance(morphism, NamedMorphism):
morphism_name = latex(Symbol(morphism.name))
return ArrowStringDescription(
self.unit, curving, curving_amount, looping_start,
looping_end, horizontal_direction, vertical_direction,
label_pos, morphism_name)
@staticmethod
def _check_free_space_horizontal(dom_i, dom_j, cod_j, grid):
"""
For a horizontal morphism, checks whether there is free space
(i.e., space not occupied by any objects) above the morphism
or below it.
"""
if dom_j < cod_j:
(start, end) = (dom_j, cod_j)
backwards = False
else:
(start, end) = (cod_j, dom_j)
backwards = True
# Check for free space above.
if dom_i == 0:
free_up = True
else:
free_up = all(grid[dom_i - 1, j] for j in
range(start, end + 1))
# Check for free space below.
if dom_i == grid.height - 1:
free_down = True
else:
free_down = not any(grid[dom_i + 1, j] for j in
range(start, end + 1))
return (free_up, free_down, backwards)
@staticmethod
def _check_free_space_vertical(dom_i, cod_i, dom_j, grid):
"""
For a vertical morphism, checks whether there is free space
(i.e., space not occupied by any objects) to the left of the
morphism or to the right of it.
"""
if dom_i < cod_i:
(start, end) = (dom_i, cod_i)
backwards = False
else:
(start, end) = (cod_i, dom_i)
backwards = True
# Check if there's space to the left.
if dom_j == 0:
free_left = True
else:
free_left = not any(grid[i, dom_j - 1] for i in
range(start, end + 1))
if dom_j == grid.width - 1:
free_right = True
else:
free_right = not any(grid[i, dom_j + 1] for i in
range(start, end + 1))
return (free_left, free_right, backwards)
@staticmethod
def _check_free_space_diagonal(dom_i, cod_i, dom_j, cod_j, grid):
"""
For a diagonal morphism, checks whether there is free space
(i.e., space not occupied by any objects) above the morphism
or below it.
"""
def abs_xrange(start, end):
if start < end:
return range(start, end + 1)
else:
return range(end, start + 1)
if dom_i < cod_i and dom_j < cod_j:
# This morphism goes from top-left to
# bottom-right.
(start_i, start_j) = (dom_i, dom_j)
(end_i, end_j) = (cod_i, cod_j)
backwards = False
elif dom_i > cod_i and dom_j > cod_j:
# This morphism goes from bottom-right to
# top-left.
(start_i, start_j) = (cod_i, cod_j)
(end_i, end_j) = (dom_i, dom_j)
backwards = True
if dom_i < cod_i and dom_j > cod_j:
# This morphism goes from top-right to
# bottom-left.
(start_i, start_j) = (dom_i, dom_j)
(end_i, end_j) = (cod_i, cod_j)
backwards = True
elif dom_i > cod_i and dom_j < cod_j:
# This morphism goes from bottom-left to
# top-right.
(start_i, start_j) = (cod_i, cod_j)
(end_i, end_j) = (dom_i, dom_j)
backwards = False
# This is an attempt at a fast and furious strategy to
# decide where there is free space on the two sides of
# a diagonal morphism. For a diagonal morphism
# starting at ``(start_i, start_j)`` and ending at
# ``(end_i, end_j)`` the rectangle defined by these
# two points is considered. The slope of the diagonal
# ``alpha`` is then computed. Then, for every cell
# ``(i, j)`` within the rectangle, the slope
# ``alpha1`` of the line through ``(start_i,
# start_j)`` and ``(i, j)`` is considered. If
# ``alpha1`` is between 0 and ``alpha``, the point
# ``(i, j)`` is above the diagonal, if ``alpha1`` is
# between ``alpha`` and infinity, the point is below
# the diagonal. Also note that, with some beforehand
# precautions, this trick works for both the main and
# the secondary diagonals of the rectangle.
# I have considered the possibility to only follow the
# shorter diagonals immediately above and below the
# main (or secondary) diagonal. This, however,
# wouldn't have resulted in much performance gain or
# better detection of outer edges, because of
# relatively small sizes of diagram grids, while the
# code would have become harder to understand.
alpha = float(end_i - start_i)/(end_j - start_j)
free_up = True
free_down = True
for i in abs_xrange(start_i, end_i):
if not free_up and not free_down:
break
for j in abs_xrange(start_j, end_j):
if not free_up and not free_down:
break
if (i, j) == (start_i, start_j):
continue
if j == start_j:
alpha1 = "inf"
else:
alpha1 = float(i - start_i)/(j - start_j)
if grid[i, j]:
if (alpha1 == "inf") or (abs(alpha1) > abs(alpha)):
free_down = False
elif abs(alpha1) < abs(alpha):
free_up = False
return (free_up, free_down, backwards)
def _push_labels_out(self, morphisms_str_info, grid, object_coords):
"""
For all straight morphisms which form the visual boundary of
the laid out diagram, puts their labels on their outer sides.
"""
def set_label_position(free1, free2, pos1, pos2, backwards, m_str_info):
"""
Given the information about room available to one side and
to the other side of a morphism (``free1`` and ``free2``),
sets the position of the morphism label in such a way that
it is on the freer side. This latter operations involves
choice between ``pos1`` and ``pos2``, taking ``backwards``
in consideration.
Thus this function will do nothing if either both ``free1
== True`` and ``free2 == True`` or both ``free1 == False``
and ``free2 == False``. In either case, choosing one side
over the other presents no advantage.
"""
if backwards:
(pos1, pos2) = (pos2, pos1)
if free1 and not free2:
m_str_info.label_position = pos1
elif free2 and not free1:
m_str_info.label_position = pos2
for m, m_str_info in morphisms_str_info.items():
if m_str_info.curving or m_str_info.forced_label_position:
# This is either a curved morphism, and curved
# morphisms have other magic, or the position of this
# label has already been fixed.
continue
if m.domain == m.codomain:
# This is a loop morphism, their labels, again have a
# different magic.
continue
(dom_i, dom_j) = object_coords[m.domain]
(cod_i, cod_j) = object_coords[m.codomain]
if dom_i == cod_i:
# Horizontal morphism.
(free_up, free_down,
backwards) = XypicDiagramDrawer._check_free_space_horizontal(
dom_i, dom_j, cod_j, grid)
set_label_position(free_up, free_down, "^", "_",
backwards, m_str_info)
elif dom_j == cod_j:
# Vertical morphism.
(free_left, free_right,
backwards) = XypicDiagramDrawer._check_free_space_vertical(
dom_i, cod_i, dom_j, grid)
set_label_position(free_left, free_right, "_", "^",
backwards, m_str_info)
else:
# A diagonal morphism.
(free_up, free_down,
backwards) = XypicDiagramDrawer._check_free_space_diagonal(
dom_i, cod_i, dom_j, cod_j, grid)
set_label_position(free_up, free_down, "^", "_",
backwards, m_str_info)
@staticmethod
def _morphism_sort_key(morphism, object_coords):
"""
Provides a morphism sorting key such that horizontal or
vertical morphisms between neighbouring objects come
first, then horizontal or vertical morphisms between more
far away objects, and finally, all other morphisms.
"""
(i, j) = object_coords[morphism.domain]
(target_i, target_j) = object_coords[morphism.codomain]
if morphism.domain == morphism.codomain:
# Loop morphisms should get after diagonal morphisms
# so that the proper direction in which to curve the
# loop can be determined.
return (3, 0, default_sort_key(morphism))
if target_i == i:
return (1, abs(target_j - j), default_sort_key(morphism))
if target_j == j:
return (1, abs(target_i - i), default_sort_key(morphism))
# Diagonal morphism.
return (2, 0, default_sort_key(morphism))
@staticmethod
def _build_xypic_string(diagram, grid, morphisms,
morphisms_str_info, diagram_format):
"""
Given a collection of :class:`ArrowStringDescription`
describing the morphisms of a diagram and the object layout
information of a diagram, produces the final Xy-pic picture.
"""
# Build the mapping between objects and morphisms which have
# them as domains.
object_morphisms = {}
for obj in diagram.objects:
object_morphisms[obj] = []
for morphism in morphisms:
object_morphisms[morphism.domain].append(morphism)
result = "\\xymatrix%s{\n" % diagram_format
for i in range(grid.height):
for j in range(grid.width):
obj = grid[i, j]
if obj:
result += latex(obj) + " "
morphisms_to_draw = object_morphisms[obj]
for morphism in morphisms_to_draw:
result += str(morphisms_str_info[morphism]) + " "
# Don't put the & after the last column.
if j < grid.width - 1:
result += "& "
# Don't put the line break after the last row.
if i < grid.height - 1:
result += "\\\\"
result += "\n"
result += "}\n"
return result
def draw(self, diagram, grid, masked=None, diagram_format=""):
r"""
Returns the Xy-pic representation of ``diagram`` laid out in
``grid``.
Consider the following simple triangle diagram.
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy.categories import DiagramGrid, XypicDiagramDrawer
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g], {g * f: "unique"})
To draw this diagram, its objects need to be laid out with a
:class:`DiagramGrid`::
>>> grid = DiagramGrid(diagram)
Finally, the drawing:
>>> drawer = XypicDiagramDrawer()
>>> print(drawer.draw(diagram, grid))
\xymatrix{
A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\
C &
}
The argument ``masked`` can be used to skip morphisms in the
presentation of the diagram:
>>> print(drawer.draw(diagram, grid, masked=[g * f]))
\xymatrix{
A \ar[r]^{f} & B \ar[ld]^{g} \\
C &
}
Finally, the ``diagram_format`` argument can be used to
specify the format string of the diagram. For example, to
increase the spacing by 1 cm, proceeding as follows:
>>> print(drawer.draw(diagram, grid, diagram_format="@+1cm"))
\xymatrix@+1cm{
A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\
C &
}
"""
# This method works in several steps. It starts by removing
# the masked morphisms, if necessary, and then maps objects to
# their positions in the grid (coordinate tuples). Remember
# that objects are unique in ``Diagram`` and in the layout
# produced by ``DiagramGrid``, so every object is mapped to a
# single coordinate pair.
#
# The next step is the central step and is concerned with
# analysing the morphisms of the diagram and deciding how to
# draw them. For example, how to curve the arrows is decided
# at this step. The bulk of the analysis is implemented in
# ``_process_morphism``, to the result of which the
# appropriate formatters are applied.
#
# The result of the previous step is a list of
# ``ArrowStringDescription``. After the analysis and
# application of formatters, some extra logic tries to assure
# better positioning of morphism labels (for example, an
# attempt is made to avoid the situations when arrows cross
# labels). This functionality constitutes the next step and
# is implemented in ``_push_labels_out``. Note that label
# positions which have been set via a formatter are not
# affected in this step.
#
# Finally, at the closing step, the array of
# ``ArrowStringDescription`` and the layout information
# incorporated in ``DiagramGrid`` are combined to produce the
# resulting Xy-pic picture. This part of code lies in
# ``_build_xypic_string``.
if not masked:
morphisms_props = grid.morphisms
else:
morphisms_props = {}
for m, props in grid.morphisms.items():
if m in masked:
continue
morphisms_props[m] = props
# Build the mapping between objects and their position in the
# grid.
object_coords = {}
for i in range(grid.height):
for j in range(grid.width):
if grid[i, j]:
object_coords[grid[i, j]] = (i, j)
morphisms = sorted(morphisms_props,
key=lambda m: XypicDiagramDrawer._morphism_sort_key(
m, object_coords))
# Build the tuples defining the string representations of
# morphisms.
morphisms_str_info = {}
for morphism in morphisms:
string_description = self._process_morphism(
diagram, grid, morphism, object_coords, morphisms,
morphisms_str_info)
if self.default_arrow_formatter:
self.default_arrow_formatter(string_description)
for prop in morphisms_props[morphism]:
# prop is a Symbol. TODO: Find out why.
if prop.name in self.arrow_formatters:
formatter = self.arrow_formatters[prop.name]
formatter(string_description)
morphisms_str_info[morphism] = string_description
# Reposition the labels a bit.
self._push_labels_out(morphisms_str_info, grid, object_coords)
return XypicDiagramDrawer._build_xypic_string(
diagram, grid, morphisms, morphisms_str_info, diagram_format)
def xypic_draw_diagram(diagram, masked=None, diagram_format="",
groups=None, **hints):
r"""
Provides a shortcut combining :class:`DiagramGrid` and
:class:`XypicDiagramDrawer`. Returns an Xy-pic presentation of
``diagram``. The argument ``masked`` is a list of morphisms which
will be not be drawn. The argument ``diagram_format`` is the
format string inserted after "\xymatrix". ``groups`` should be a
set of logical groups. The ``hints`` will be passed directly to
the constructor of :class:`DiagramGrid`.
For more information about the arguments, see the docstrings of
:class:`DiagramGrid` and ``XypicDiagramDrawer.draw``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy.categories import xypic_draw_diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> diagram = Diagram([f, g], {g * f: "unique"})
>>> print(xypic_draw_diagram(diagram))
\xymatrix{
A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\
C &
}
See Also
========
XypicDiagramDrawer, DiagramGrid
"""
grid = DiagramGrid(diagram, groups, **hints)
drawer = XypicDiagramDrawer()
return drawer.draw(diagram, grid, masked, diagram_format)
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',))
def preview_diagram(diagram, masked=None, diagram_format="", groups=None,
output='png', viewer=None, euler=True, **hints):
"""
Combines the functionality of ``xypic_draw_diagram`` and
``sympy.printing.preview``. The arguments ``masked``,
``diagram_format``, ``groups``, and ``hints`` are passed to
``xypic_draw_diagram``, while ``output``, ``viewer, and ``euler``
are passed to ``preview``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy.categories import preview_diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {g * f: "unique"})
>>> preview_diagram(d)
See Also
========
XypicDiagramDrawer
"""
from sympy.printing import preview
latex_output = xypic_draw_diagram(diagram, masked, diagram_format,
groups, **hints)
preview(latex_output, output, viewer, euler, ("xypic",))
|
1b7c0288d0a03624cb2c567118b7766459a7b1b891c616a949f656956482e74f | from sympy.core import S, Basic, Dict, Symbol, Tuple, sympify
from sympy.core.symbol import Str
from sympy.sets import Set, FiniteSet, EmptySet
from sympy.utilities.iterables import iterable
class Class(Set):
r"""
The base class for any kind of class in the set-theoretic sense.
Explanation
===========
In axiomatic set theories, everything is a class. A class which
can be a member of another class is a set. A class which is not a
member of another class is a proper class. The class `\{1, 2\}`
is a set; the class of all sets is a proper class.
This class is essentially a synonym for :class:`sympy.core.Set`.
The goal of this class is to assure easier migration to the
eventual proper implementation of set theory.
"""
is_proper = False
class Object(Symbol):
"""
The base class for any kind of object in an abstract category.
Explanation
===========
While technically any instance of :class:`~.Basic` will do, this
class is the recommended way to create abstract objects in
abstract categories.
"""
class Morphism(Basic):
"""
The base class for any morphism in an abstract category.
Explanation
===========
In abstract categories, a morphism is an arrow between two
category objects. The object where the arrow starts is called the
domain, while the object where the arrow ends is called the
codomain.
Two morphisms between the same pair of objects are considered to
be the same morphisms. To distinguish between morphisms between
the same objects use :class:`NamedMorphism`.
It is prohibited to instantiate this class. Use one of the
derived classes instead.
See Also
========
IdentityMorphism, NamedMorphism, CompositeMorphism
"""
def __new__(cls, domain, codomain):
raise(NotImplementedError(
"Cannot instantiate Morphism. Use derived classes instead."))
@property
def domain(self):
"""
Returns the domain of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.domain
Object("A")
"""
return self.args[0]
@property
def codomain(self):
"""
Returns the codomain of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.codomain
Object("B")
"""
return self.args[1]
def compose(self, other):
r"""
Composes self with the supplied morphism.
The order of elements in the composition is the usual order,
i.e., to construct `g\circ f` use ``g.compose(f)``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> g * f
CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g")))
>>> (g * f).domain
Object("A")
>>> (g * f).codomain
Object("C")
"""
return CompositeMorphism(other, self)
def __mul__(self, other):
r"""
Composes self with the supplied morphism.
The semantics of this operation is given by the following
equation: ``g * f == g.compose(f)`` for composable morphisms
``g`` and ``f``.
See Also
========
compose
"""
return self.compose(other)
class IdentityMorphism(Morphism):
"""
Represents an identity morphism.
Explanation
===========
An identity morphism is a morphism with equal domain and codomain,
which acts as an identity with respect to composition.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, IdentityMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> id_A = IdentityMorphism(A)
>>> id_B = IdentityMorphism(B)
>>> f * id_A == f
True
>>> id_B * f == f
True
See Also
========
Morphism
"""
def __new__(cls, domain):
return Basic.__new__(cls, domain)
@property
def codomain(self):
return self.domain
class NamedMorphism(Morphism):
"""
Represents a morphism which has a name.
Explanation
===========
Names are used to distinguish between morphisms which have the
same domain and codomain: two named morphisms are equal if they
have the same domains, codomains, and names.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f
NamedMorphism(Object("A"), Object("B"), "f")
>>> f.name
'f'
See Also
========
Morphism
"""
def __new__(cls, domain, codomain, name):
if not name:
raise ValueError("Empty morphism names not allowed.")
if not isinstance(name, Str):
name = Str(name)
return Basic.__new__(cls, domain, codomain, name)
@property
def name(self):
"""
Returns the name of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.name
'f'
"""
return self.args[2].name
class CompositeMorphism(Morphism):
r"""
Represents a morphism which is a composition of other morphisms.
Explanation
===========
Two composite morphisms are equal if the morphisms they were
obtained from (components) are the same and were listed in the
same order.
The arguments to the constructor for this class should be listed
in diagram order: to obtain the composition `g\circ f` from the
instances of :class:`Morphism` ``g`` and ``f`` use
``CompositeMorphism(f, g)``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, CompositeMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> g * f
CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g")))
>>> CompositeMorphism(f, g) == g * f
True
"""
@staticmethod
def _add_morphism(t, morphism):
"""
Intelligently adds ``morphism`` to tuple ``t``.
Explanation
===========
If ``morphism`` is a composite morphism, its components are
added to the tuple. If ``morphism`` is an identity, nothing
is added to the tuple.
No composability checks are performed.
"""
if isinstance(morphism, CompositeMorphism):
# ``morphism`` is a composite morphism; we have to
# denest its components.
return t + morphism.components
elif isinstance(morphism, IdentityMorphism):
# ``morphism`` is an identity. Nothing happens.
return t
else:
return t + Tuple(morphism)
def __new__(cls, *components):
if components and not isinstance(components[0], Morphism):
# Maybe the user has explicitly supplied a list of
# morphisms.
return CompositeMorphism.__new__(cls, *components[0])
normalised_components = Tuple()
for current, following in zip(components, components[1:]):
if not isinstance(current, Morphism) or \
not isinstance(following, Morphism):
raise TypeError("All components must be morphisms.")
if current.codomain != following.domain:
raise ValueError("Uncomposable morphisms.")
normalised_components = CompositeMorphism._add_morphism(
normalised_components, current)
# We haven't added the last morphism to the list of normalised
# components. Add it now.
normalised_components = CompositeMorphism._add_morphism(
normalised_components, components[-1])
if not normalised_components:
# If ``normalised_components`` is empty, only identities
# were supplied. Since they all were composable, they are
# all the same identities.
return components[0]
elif len(normalised_components) == 1:
# No sense to construct a whole CompositeMorphism.
return normalised_components[0]
return Basic.__new__(cls, normalised_components)
@property
def components(self):
"""
Returns the components of this composite morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).components
(NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g"))
"""
return self.args[0]
@property
def domain(self):
"""
Returns the domain of this composite morphism.
The domain of the composite morphism is the domain of its
first component.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).domain
Object("A")
"""
return self.components[0].domain
@property
def codomain(self):
"""
Returns the codomain of this composite morphism.
The codomain of the composite morphism is the codomain of its
last component.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).codomain
Object("C")
"""
return self.components[-1].codomain
def flatten(self, new_name):
"""
Forgets the composite structure of this morphism.
Explanation
===========
If ``new_name`` is not empty, returns a :class:`NamedMorphism`
with the supplied name, otherwise returns a :class:`Morphism`.
In both cases the domain of the new morphism is the domain of
this composite morphism and the codomain of the new morphism
is the codomain of this composite morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).flatten("h")
NamedMorphism(Object("A"), Object("C"), "h")
"""
return NamedMorphism(self.domain, self.codomain, new_name)
class Category(Basic):
r"""
An (abstract) category.
Explanation
===========
A category [JoyOfCats] is a quadruple `\mbox{K} = (O, \hom, id,
\circ)` consisting of
* a (set-theoretical) class `O`, whose members are called
`K`-objects,
* for each pair `(A, B)` of `K`-objects, a set `\hom(A, B)` whose
members are called `K`-morphisms from `A` to `B`,
* for a each `K`-object `A`, a morphism `id:A\rightarrow A`,
called the `K`-identity of `A`,
* a composition law `\circ` associating with every `K`-morphisms
`f:A\rightarrow B` and `g:B\rightarrow C` a `K`-morphism `g\circ
f:A\rightarrow C`, called the composite of `f` and `g`.
Composition is associative, `K`-identities are identities with
respect to composition, and the sets `\hom(A, B)` are pairwise
disjoint.
This class knows nothing about its objects and morphisms.
Concrete cases of (abstract) categories should be implemented as
classes derived from this one.
Certain instances of :class:`Diagram` can be asserted to be
commutative in a :class:`Category` by supplying the argument
``commutative_diagrams`` in the constructor.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram, Category
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> K = Category("K", commutative_diagrams=[d])
>>> K.commutative_diagrams == FiniteSet(d)
True
See Also
========
Diagram
"""
def __new__(cls, name, objects=EmptySet, commutative_diagrams=EmptySet):
if not name:
raise ValueError("A Category cannot have an empty name.")
if not isinstance(name, Str):
name = Str(name)
if not isinstance(objects, Class):
objects = Class(objects)
new_category = Basic.__new__(cls, name, objects,
FiniteSet(*commutative_diagrams))
return new_category
@property
def name(self):
"""
Returns the name of this category.
Examples
========
>>> from sympy.categories import Category
>>> K = Category("K")
>>> K.name
'K'
"""
return self.args[0].name
@property
def objects(self):
"""
Returns the class of objects of this category.
Examples
========
>>> from sympy.categories import Object, Category
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> K = Category("K", FiniteSet(A, B))
>>> K.objects
Class({Object("A"), Object("B")})
"""
return self.args[1]
@property
def commutative_diagrams(self):
"""
Returns the :class:`~.FiniteSet` of diagrams which are known to
be commutative in this category.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram, Category
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> K = Category("K", commutative_diagrams=[d])
>>> K.commutative_diagrams == FiniteSet(d)
True
"""
return self.args[2]
def hom(self, A, B):
raise NotImplementedError(
"hom-sets are not implemented in Category.")
def all_morphisms(self):
raise NotImplementedError(
"Obtaining the class of morphisms is not implemented in Category.")
class Diagram(Basic):
r"""
Represents a diagram in a certain category.
Explanation
===========
Informally, a diagram is a collection of objects of a category and
certain morphisms between them. A diagram is still a monoid with
respect to morphism composition; i.e., identity morphisms, as well
as all composites of morphisms included in the diagram belong to
the diagram. For a more formal approach to this notion see
[Pare1970].
The components of composite morphisms are also added to the
diagram. No properties are assigned to such morphisms by default.
A commutative diagram is often accompanied by a statement of the
following kind: "if such morphisms with such properties exist,
then such morphisms which such properties exist and the diagram is
commutative". To represent this, an instance of :class:`Diagram`
includes a collection of morphisms which are the premises and
another collection of conclusions. ``premises`` and
``conclusions`` associate morphisms belonging to the corresponding
categories with the :class:`~.FiniteSet`'s of their properties.
The set of properties of a composite morphism is the intersection
of the sets of properties of its components. The domain and
codomain of a conclusion morphism should be among the domains and
codomains of the morphisms listed as the premises of a diagram.
No checks are carried out of whether the supplied object and
morphisms do belong to one and the same category.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import pprint, default_sort_key
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> premises_keys = sorted(d.premises.keys(), key=default_sort_key)
>>> pprint(premises_keys, use_unicode=False)
[g*f:A-->C, id:A-->A, id:B-->B, id:C-->C, f:A-->B, g:B-->C]
>>> pprint(d.premises, use_unicode=False)
{g*f:A-->C: EmptySet, id:A-->A: EmptySet, id:B-->B: EmptySet, id:C-->C: EmptyS
et, f:A-->B: EmptySet, g:B-->C: EmptySet}
>>> d = Diagram([f, g], {g * f: "unique"})
>>> pprint(d.conclusions,use_unicode=False)
{g*f:A-->C: {unique}}
References
==========
[Pare1970] B. Pareigis: Categories and functors. Academic Press, 1970.
"""
@staticmethod
def _set_dict_union(dictionary, key, value):
"""
If ``key`` is in ``dictionary``, set the new value of ``key``
to be the union between the old value and ``value``.
Otherwise, set the value of ``key`` to ``value.
Returns ``True`` if the key already was in the dictionary and
``False`` otherwise.
"""
if key in dictionary:
dictionary[key] = dictionary[key] | value
return True
else:
dictionary[key] = value
return False
@staticmethod
def _add_morphism_closure(morphisms, morphism, props, add_identities=True,
recurse_composites=True):
"""
Adds a morphism and its attributes to the supplied dictionary
``morphisms``. If ``add_identities`` is True, also adds the
identity morphisms for the domain and the codomain of
``morphism``.
"""
if not Diagram._set_dict_union(morphisms, morphism, props):
# We have just added a new morphism.
if isinstance(morphism, IdentityMorphism):
if props:
# Properties for identity morphisms don't really
# make sense, because very much is known about
# identity morphisms already, so much that they
# are trivial. Having properties for identity
# morphisms would only be confusing.
raise ValueError(
"Instances of IdentityMorphism cannot have properties.")
return
if add_identities:
empty = EmptySet
id_dom = IdentityMorphism(morphism.domain)
id_cod = IdentityMorphism(morphism.codomain)
Diagram._set_dict_union(morphisms, id_dom, empty)
Diagram._set_dict_union(morphisms, id_cod, empty)
for existing_morphism, existing_props in list(morphisms.items()):
new_props = existing_props & props
if morphism.domain == existing_morphism.codomain:
left = morphism * existing_morphism
Diagram._set_dict_union(morphisms, left, new_props)
if morphism.codomain == existing_morphism.domain:
right = existing_morphism * morphism
Diagram._set_dict_union(morphisms, right, new_props)
if isinstance(morphism, CompositeMorphism) and recurse_composites:
# This is a composite morphism, add its components as
# well.
empty = EmptySet
for component in morphism.components:
Diagram._add_morphism_closure(morphisms, component, empty,
add_identities)
def __new__(cls, *args):
"""
Construct a new instance of Diagram.
Explanation
===========
If no arguments are supplied, an empty diagram is created.
If at least an argument is supplied, ``args[0]`` is
interpreted as the premises of the diagram. If ``args[0]`` is
a list, it is interpreted as a list of :class:`Morphism`'s, in
which each :class:`Morphism` has an empty set of properties.
If ``args[0]`` is a Python dictionary or a :class:`Dict`, it
is interpreted as a dictionary associating to some
:class:`Morphism`'s some properties.
If at least two arguments are supplied ``args[1]`` is
interpreted as the conclusions of the diagram. The type of
``args[1]`` is interpreted in exactly the same way as the type
of ``args[0]``. If only one argument is supplied, the diagram
has no conclusions.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> IdentityMorphism(A) in d.premises.keys()
True
>>> g * f in d.premises.keys()
True
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d.conclusions[g * f]
{unique}
"""
premises = {}
conclusions = {}
# Here we will keep track of the objects which appear in the
# premises.
objects = EmptySet
if len(args) >= 1:
# We've got some premises in the arguments.
premises_arg = args[0]
if isinstance(premises_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet
for morphism in premises_arg:
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(premises, morphism, empty)
elif isinstance(premises_arg, (dict, Dict)):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in premises_arg.items():
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(
premises, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props))
if len(args) >= 2:
# We also have some conclusions.
conclusions_arg = args[1]
if isinstance(conclusions_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet
for morphism in conclusions_arg:
# Check that no new objects appear in conclusions.
if ((sympify(objects.contains(morphism.domain)) is S.true) and
(sympify(objects.contains(morphism.codomain)) is S.true)):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions, morphism, empty, add_identities=False,
recurse_composites=False)
elif isinstance(conclusions_arg, dict) or \
isinstance(conclusions_arg, Dict):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in conclusions_arg.items():
# Check that no new objects appear in conclusions.
if (morphism.domain in objects) and \
(morphism.codomain in objects):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props),
add_identities=False, recurse_composites=False)
return Basic.__new__(cls, Dict(premises), Dict(conclusions), objects)
@property
def premises(self):
"""
Returns the premises of this diagram.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> from sympy import pretty
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> id_A = IdentityMorphism(A)
>>> id_B = IdentityMorphism(B)
>>> d = Diagram([f])
>>> print(pretty(d.premises, use_unicode=False))
{id:A-->A: EmptySet, id:B-->B: EmptySet, f:A-->B: EmptySet}
"""
return self.args[0]
@property
def conclusions(self):
"""
Returns the conclusions of this diagram.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> IdentityMorphism(A) in d.premises.keys()
True
>>> g * f in d.premises.keys()
True
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d.conclusions[g * f] == FiniteSet("unique")
True
"""
return self.args[1]
@property
def objects(self):
"""
Returns the :class:`~.FiniteSet` of objects that appear in this
diagram.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> d.objects
{Object("A"), Object("B"), Object("C")}
"""
return self.args[2]
def hom(self, A, B):
"""
Returns a 2-tuple of sets of morphisms between objects ``A`` and
``B``: one set of morphisms listed as premises, and the other set
of morphisms listed as conclusions.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import pretty
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {g * f: "unique"})
>>> print(pretty(d.hom(A, C), use_unicode=False))
({g*f:A-->C}, {g*f:A-->C})
See Also
========
Object, Morphism
"""
premises = EmptySet
conclusions = EmptySet
for morphism in self.premises.keys():
if (morphism.domain == A) and (morphism.codomain == B):
premises |= FiniteSet(morphism)
for morphism in self.conclusions.keys():
if (morphism.domain == A) and (morphism.codomain == B):
conclusions |= FiniteSet(morphism)
return (premises, conclusions)
def is_subdiagram(self, diagram):
"""
Checks whether ``diagram`` is a subdiagram of ``self``.
Diagram `D'` is a subdiagram of `D` if all premises
(conclusions) of `D'` are contained in the premises
(conclusions) of `D`. The morphisms contained
both in `D'` and `D` should have the same properties for `D'`
to be a subdiagram of `D`.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d1 = Diagram([f])
>>> d.is_subdiagram(d1)
True
>>> d1.is_subdiagram(d)
False
"""
premises = all((m in self.premises) and
(diagram.premises[m] == self.premises[m])
for m in diagram.premises)
if not premises:
return False
conclusions = all((m in self.conclusions) and
(diagram.conclusions[m] == self.conclusions[m])
for m in diagram.conclusions)
# Premises is surely ``True`` here.
return conclusions
def subdiagram_from_objects(self, objects):
"""
If ``objects`` is a subset of the objects of ``self``, returns
a diagram which has as premises all those premises of ``self``
which have a domains and codomains in ``objects``, likewise
for conclusions. Properties are preserved.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {f: "unique", g*f: "veryunique"})
>>> d1 = d.subdiagram_from_objects(FiniteSet(A, B))
>>> d1 == Diagram([f], {f: "unique"})
True
"""
if not objects.is_subset(self.objects):
raise ValueError(
"Supplied objects should all belong to the diagram.")
new_premises = {}
for morphism, props in self.premises.items():
if ((sympify(objects.contains(morphism.domain)) is S.true) and
(sympify(objects.contains(morphism.codomain)) is S.true)):
new_premises[morphism] = props
new_conclusions = {}
for morphism, props in self.conclusions.items():
if ((sympify(objects.contains(morphism.domain)) is S.true) and
(sympify(objects.contains(morphism.codomain)) is S.true)):
new_conclusions[morphism] = props
return Diagram(new_premises, new_conclusions)
|
829b3d9671a71ecf9ffebadd11b3bd192b2f1bc5a204eb555ef2ddd2bd46f146 | from sympy.core.numbers import Rational
from sympy.core.singleton import S
from sympy.functions.elementary.complexes import (conjugate, im, re, sign)
from sympy.functions.elementary.exponential import (exp, log as ln)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (acos, cos, sin)
from sympy.simplify.trigsimp import trigsimp
from sympy.integrals.integrals import integrate
from sympy.matrices.dense import MutableDenseMatrix as Matrix
from sympy.core.sympify import sympify
from sympy.core.expr import Expr
from mpmath.libmp.libmpf import prec_to_dps
class Quaternion(Expr):
"""Provides basic quaternion operations.
Quaternion objects can be instantiated as Quaternion(a, b, c, d)
as in (a + b*i + c*j + d*k).
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q
1 + 2*i + 3*j + 4*k
Quaternions over complex fields can be defined as :
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import symbols, I
>>> x = symbols('x')
>>> q1 = Quaternion(x, x**3, x, x**2, real_field = False)
>>> q2 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> q1
x + x**3*i + x*j + x**2*k
>>> q2
(3 + 4*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k
References
==========
.. [1] http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/
.. [2] https://en.wikipedia.org/wiki/Quaternion
"""
_op_priority = 11.0
is_commutative = False
def __new__(cls, a=0, b=0, c=0, d=0, real_field=True):
a = sympify(a)
b = sympify(b)
c = sympify(c)
d = sympify(d)
if any(i.is_commutative is False for i in [a, b, c, d]):
raise ValueError("arguments have to be commutative")
else:
obj = Expr.__new__(cls, a, b, c, d)
obj._a = a
obj._b = b
obj._c = c
obj._d = d
obj._real_field = real_field
return obj
@property
def a(self):
return self._a
@property
def b(self):
return self._b
@property
def c(self):
return self._c
@property
def d(self):
return self._d
@property
def real_field(self):
return self._real_field
@classmethod
def from_axis_angle(cls, vector, angle):
"""Returns a rotation quaternion given the axis and the angle of rotation.
Parameters
==========
vector : tuple of three numbers
The vector representation of the given axis.
angle : number
The angle by which axis is rotated (in radians).
Returns
=======
Quaternion
The normalized rotation quaternion calculated from the given axis and the angle of rotation.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import pi, sqrt
>>> q = Quaternion.from_axis_angle((sqrt(3)/3, sqrt(3)/3, sqrt(3)/3), 2*pi/3)
>>> q
1/2 + 1/2*i + 1/2*j + 1/2*k
"""
(x, y, z) = vector
norm = sqrt(x**2 + y**2 + z**2)
(x, y, z) = (x / norm, y / norm, z / norm)
s = sin(angle * S.Half)
a = cos(angle * S.Half)
b = x * s
c = y * s
d = z * s
# note that this quaternion is already normalized by construction:
# c^2 + (s*x)^2 + (s*y)^2 + (s*z)^2 = c^2 + s^2*(x^2 + y^2 + z^2) = c^2 + s^2 * 1 = c^2 + s^2 = 1
# so, what we return is a normalized quaternion
return cls(a, b, c, d)
@classmethod
def from_rotation_matrix(cls, M):
"""Returns the equivalent quaternion of a matrix. The quaternion will be normalized
only if the matrix is special orthogonal (orthogonal and det(M) = 1).
Parameters
==========
M : Matrix
Input matrix to be converted to equivalent quaternion. M must be special
orthogonal (orthogonal and det(M) = 1) for the quaternion to be normalized.
Returns
=======
Quaternion
The quaternion equivalent to given matrix.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import Matrix, symbols, cos, sin, trigsimp
>>> x = symbols('x')
>>> M = Matrix([[cos(x), -sin(x), 0], [sin(x), cos(x), 0], [0, 0, 1]])
>>> q = trigsimp(Quaternion.from_rotation_matrix(M))
>>> q
sqrt(2)*sqrt(cos(x) + 1)/2 + 0*i + 0*j + sqrt(2 - 2*cos(x))*sign(sin(x))/2*k
"""
absQ = M.det()**Rational(1, 3)
a = sqrt(absQ + M[0, 0] + M[1, 1] + M[2, 2]) / 2
b = sqrt(absQ + M[0, 0] - M[1, 1] - M[2, 2]) / 2
c = sqrt(absQ - M[0, 0] + M[1, 1] - M[2, 2]) / 2
d = sqrt(absQ - M[0, 0] - M[1, 1] + M[2, 2]) / 2
b = b * sign(M[2, 1] - M[1, 2])
c = c * sign(M[0, 2] - M[2, 0])
d = d * sign(M[1, 0] - M[0, 1])
return Quaternion(a, b, c, d)
def __add__(self, other):
return self.add(other)
def __radd__(self, other):
return self.add(other)
def __sub__(self, other):
return self.add(other*-1)
def __mul__(self, other):
return self._generic_mul(self, other)
def __rmul__(self, other):
return self._generic_mul(other, self)
def __pow__(self, p):
return self.pow(p)
def __neg__(self):
return Quaternion(-self._a, -self._b, -self._c, -self.d)
def __truediv__(self, other):
return self * sympify(other)**-1
def __rtruediv__(self, other):
return sympify(other) * self**-1
def _eval_Integral(self, *args):
return self.integrate(*args)
def diff(self, *symbols, **kwargs):
kwargs.setdefault('evaluate', True)
return self.func(*[a.diff(*symbols, **kwargs) for a in self.args])
def add(self, other):
"""Adds quaternions.
Parameters
==========
other : Quaternion
The quaternion to add to current (self) quaternion.
Returns
=======
Quaternion
The resultant quaternion after adding self to other
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import symbols
>>> q1 = Quaternion(1, 2, 3, 4)
>>> q2 = Quaternion(5, 6, 7, 8)
>>> q1.add(q2)
6 + 8*i + 10*j + 12*k
>>> q1 + 5
6 + 2*i + 3*j + 4*k
>>> x = symbols('x', real = True)
>>> q1.add(x)
(x + 1) + 2*i + 3*j + 4*k
Quaternions over complex fields :
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import I
>>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> q3.add(2 + 3*I)
(5 + 7*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k
"""
q1 = self
q2 = sympify(other)
# If q2 is a number or a SymPy expression instead of a quaternion
if not isinstance(q2, Quaternion):
if q1.real_field and q2.is_complex:
return Quaternion(re(q2) + q1.a, im(q2) + q1.b, q1.c, q1.d)
elif q2.is_commutative:
return Quaternion(q1.a + q2, q1.b, q1.c, q1.d)
else:
raise ValueError("Only commutative expressions can be added with a Quaternion.")
return Quaternion(q1.a + q2.a, q1.b + q2.b, q1.c + q2.c, q1.d
+ q2.d)
def mul(self, other):
"""Multiplies quaternions.
Parameters
==========
other : Quaternion or symbol
The quaternion to multiply to current (self) quaternion.
Returns
=======
Quaternion
The resultant quaternion after multiplying self with other
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import symbols
>>> q1 = Quaternion(1, 2, 3, 4)
>>> q2 = Quaternion(5, 6, 7, 8)
>>> q1.mul(q2)
(-60) + 12*i + 30*j + 24*k
>>> q1.mul(2)
2 + 4*i + 6*j + 8*k
>>> x = symbols('x', real = True)
>>> q1.mul(x)
x + 2*x*i + 3*x*j + 4*x*k
Quaternions over complex fields :
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import I
>>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> q3.mul(2 + 3*I)
(2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k
"""
return self._generic_mul(self, other)
@staticmethod
def _generic_mul(q1, q2):
"""Generic multiplication.
Parameters
==========
q1 : Quaternion or symbol
q2 : Quaternion or symbol
It's important to note that if neither q1 nor q2 is a Quaternion,
this function simply returns q1 * q2.
Returns
=======
Quaternion
The resultant quaternion after multiplying q1 and q2
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import Symbol
>>> q1 = Quaternion(1, 2, 3, 4)
>>> q2 = Quaternion(5, 6, 7, 8)
>>> Quaternion._generic_mul(q1, q2)
(-60) + 12*i + 30*j + 24*k
>>> Quaternion._generic_mul(q1, 2)
2 + 4*i + 6*j + 8*k
>>> x = Symbol('x', real = True)
>>> Quaternion._generic_mul(q1, x)
x + 2*x*i + 3*x*j + 4*x*k
Quaternions over complex fields :
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import I
>>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False)
>>> Quaternion._generic_mul(q3, 2 + 3*I)
(2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k
"""
q1 = sympify(q1)
q2 = sympify(q2)
# None is a Quaternion:
if not isinstance(q1, Quaternion) and not isinstance(q2, Quaternion):
return q1 * q2
# If q1 is a number or a SymPy expression instead of a quaternion
if not isinstance(q1, Quaternion):
if q2.real_field and q1.is_complex:
return Quaternion(re(q1), im(q1), 0, 0) * q2
elif q1.is_commutative:
return Quaternion(q1 * q2.a, q1 * q2.b, q1 * q2.c, q1 * q2.d)
else:
raise ValueError("Only commutative expressions can be multiplied with a Quaternion.")
# If q2 is a number or a SymPy expression instead of a quaternion
if not isinstance(q2, Quaternion):
if q1.real_field and q2.is_complex:
return q1 * Quaternion(re(q2), im(q2), 0, 0)
elif q2.is_commutative:
return Quaternion(q2 * q1.a, q2 * q1.b, q2 * q1.c, q2 * q1.d)
else:
raise ValueError("Only commutative expressions can be multiplied with a Quaternion.")
return Quaternion(-q1.b*q2.b - q1.c*q2.c - q1.d*q2.d + q1.a*q2.a,
q1.b*q2.a + q1.c*q2.d - q1.d*q2.c + q1.a*q2.b,
-q1.b*q2.d + q1.c*q2.a + q1.d*q2.b + q1.a*q2.c,
q1.b*q2.c - q1.c*q2.b + q1.d*q2.a + q1.a * q2.d)
def _eval_conjugate(self):
"""Returns the conjugate of the quaternion."""
q = self
return Quaternion(q.a, -q.b, -q.c, -q.d)
def norm(self):
"""Returns the norm of the quaternion."""
q = self
# trigsimp is used to simplify sin(x)^2 + cos(x)^2 (these terms
# arise when from_axis_angle is used).
return sqrt(trigsimp(q.a**2 + q.b**2 + q.c**2 + q.d**2))
def normalize(self):
"""Returns the normalized form of the quaternion."""
q = self
return q * (1/q.norm())
def inverse(self):
"""Returns the inverse of the quaternion."""
q = self
if not q.norm():
raise ValueError("Cannot compute inverse for a quaternion with zero norm")
return conjugate(q) * (1/q.norm()**2)
def pow(self, p):
"""Finds the pth power of the quaternion.
Parameters
==========
p : int
Power to be applied on quaternion.
Returns
=======
Quaternion
Returns the p-th power of the current quaternion.
Returns the inverse if p = -1.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q.pow(4)
668 + (-224)*i + (-336)*j + (-448)*k
"""
p = sympify(p)
q = self
if p == -1:
return q.inverse()
res = 1
if not p.is_Integer:
return NotImplemented
if p < 0:
q, p = q.inverse(), -p
while p > 0:
if p % 2 == 1:
res = q * res
p = p//2
q = q * q
return res
def exp(self):
"""Returns the exponential of q (e^q).
Returns
=======
Quaternion
Exponential of q (e^q).
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q.exp()
E*cos(sqrt(29))
+ 2*sqrt(29)*E*sin(sqrt(29))/29*i
+ 3*sqrt(29)*E*sin(sqrt(29))/29*j
+ 4*sqrt(29)*E*sin(sqrt(29))/29*k
"""
# exp(q) = e^a(cos||v|| + v/||v||*sin||v||)
q = self
vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2)
a = exp(q.a) * cos(vector_norm)
b = exp(q.a) * sin(vector_norm) * q.b / vector_norm
c = exp(q.a) * sin(vector_norm) * q.c / vector_norm
d = exp(q.a) * sin(vector_norm) * q.d / vector_norm
return Quaternion(a, b, c, d)
def _ln(self):
"""Returns the natural logarithm of the quaternion (_ln(q)).
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q._ln()
log(sqrt(30))
+ 2*sqrt(29)*acos(sqrt(30)/30)/29*i
+ 3*sqrt(29)*acos(sqrt(30)/30)/29*j
+ 4*sqrt(29)*acos(sqrt(30)/30)/29*k
"""
# _ln(q) = _ln||q|| + v/||v||*arccos(a/||q||)
q = self
vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2)
q_norm = q.norm()
a = ln(q_norm)
b = q.b * acos(q.a / q_norm) / vector_norm
c = q.c * acos(q.a / q_norm) / vector_norm
d = q.d * acos(q.a / q_norm) / vector_norm
return Quaternion(a, b, c, d)
def _eval_evalf(self, prec):
"""Returns the floating point approximations (decimal numbers) of the quaternion.
Returns
=======
Quaternion
Floating point approximations of quaternion(self)
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import sqrt
>>> q = Quaternion(1/sqrt(1), 1/sqrt(2), 1/sqrt(3), 1/sqrt(4))
>>> q.evalf()
1.00000000000000
+ 0.707106781186547*i
+ 0.577350269189626*j
+ 0.500000000000000*k
"""
nprec = prec_to_dps(prec)
return Quaternion(*[arg.evalf(n=nprec) for arg in self.args])
def pow_cos_sin(self, p):
"""Computes the pth power in the cos-sin form.
Parameters
==========
p : int
Power to be applied on quaternion.
Returns
=======
Quaternion
The p-th power in the cos-sin form.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 2, 3, 4)
>>> q.pow_cos_sin(4)
900*cos(4*acos(sqrt(30)/30))
+ 1800*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*i
+ 2700*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*j
+ 3600*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*k
"""
# q = ||q||*(cos(a) + u*sin(a))
# q^p = ||q||^p * (cos(p*a) + u*sin(p*a))
q = self
(v, angle) = q.to_axis_angle()
q2 = Quaternion.from_axis_angle(v, p * angle)
return q2 * (q.norm()**p)
def integrate(self, *args):
"""Computes integration of quaternion.
Returns
=======
Quaternion
Integration of the quaternion(self) with the given variable.
Examples
========
Indefinite Integral of quaternion :
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy.abc import x
>>> q = Quaternion(1, 2, 3, 4)
>>> q.integrate(x)
x + 2*x*i + 3*x*j + 4*x*k
Definite integral of quaternion :
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy.abc import x
>>> q = Quaternion(1, 2, 3, 4)
>>> q.integrate((x, 1, 5))
4 + 8*i + 12*j + 16*k
"""
# TODO: is this expression correct?
return Quaternion(integrate(self.a, *args), integrate(self.b, *args),
integrate(self.c, *args), integrate(self.d, *args))
@staticmethod
def rotate_point(pin, r):
"""Returns the coordinates of the point pin(a 3 tuple) after rotation.
Parameters
==========
pin : tuple
A 3-element tuple of coordinates of a point which needs to be
rotated.
r : Quaternion or tuple
Axis and angle of rotation.
It's important to note that when r is a tuple, it must be of the form
(axis, angle)
Returns
=======
tuple
The coordinates of the point after rotation.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import symbols, trigsimp, cos, sin
>>> x = symbols('x')
>>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))
>>> trigsimp(Quaternion.rotate_point((1, 1, 1), q))
(sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)
>>> (axis, angle) = q.to_axis_angle()
>>> trigsimp(Quaternion.rotate_point((1, 1, 1), (axis, angle)))
(sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)
"""
if isinstance(r, tuple):
# if r is of the form (vector, angle)
q = Quaternion.from_axis_angle(r[0], r[1])
else:
# if r is a quaternion
q = r.normalize()
pout = q * Quaternion(0, pin[0], pin[1], pin[2]) * conjugate(q)
return (pout.b, pout.c, pout.d)
def to_axis_angle(self):
"""Returns the axis and angle of rotation of a quaternion
Returns
=======
tuple
Tuple of (axis, angle)
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 1, 1, 1)
>>> (axis, angle) = q.to_axis_angle()
>>> axis
(sqrt(3)/3, sqrt(3)/3, sqrt(3)/3)
>>> angle
2*pi/3
"""
q = self
if q.a.is_negative:
q = q * -1
q = q.normalize()
angle = trigsimp(2 * acos(q.a))
# Since quaternion is normalised, q.a is less than 1.
s = sqrt(1 - q.a*q.a)
x = trigsimp(q.b / s)
y = trigsimp(q.c / s)
z = trigsimp(q.d / s)
v = (x, y, z)
t = (v, angle)
return t
def to_rotation_matrix(self, v=None):
"""Returns the equivalent rotation transformation matrix of the quaternion
which represents rotation about the origin if v is not passed.
Parameters
==========
v : tuple or None
Default value: None
Returns
=======
tuple
Returns the equivalent rotation transformation matrix of the quaternion
which represents rotation about the origin if v is not passed.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import symbols, trigsimp, cos, sin
>>> x = symbols('x')
>>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))
>>> trigsimp(q.to_rotation_matrix())
Matrix([
[cos(x), -sin(x), 0],
[sin(x), cos(x), 0],
[ 0, 0, 1]])
Generates a 4x4 transformation matrix (used for rotation about a point
other than the origin) if the point(v) is passed as an argument.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> from sympy import symbols, trigsimp, cos, sin
>>> x = symbols('x')
>>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))
>>> trigsimp(q.to_rotation_matrix((1, 1, 1)))
Matrix([
[cos(x), -sin(x), 0, sin(x) - cos(x) + 1],
[sin(x), cos(x), 0, -sin(x) - cos(x) + 1],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]])
"""
q = self
s = q.norm()**-2
m00 = 1 - 2*s*(q.c**2 + q.d**2)
m01 = 2*s*(q.b*q.c - q.d*q.a)
m02 = 2*s*(q.b*q.d + q.c*q.a)
m10 = 2*s*(q.b*q.c + q.d*q.a)
m11 = 1 - 2*s*(q.b**2 + q.d**2)
m12 = 2*s*(q.c*q.d - q.b*q.a)
m20 = 2*s*(q.b*q.d - q.c*q.a)
m21 = 2*s*(q.c*q.d + q.b*q.a)
m22 = 1 - 2*s*(q.b**2 + q.c**2)
if not v:
return Matrix([[m00, m01, m02], [m10, m11, m12], [m20, m21, m22]])
else:
(x, y, z) = v
m03 = x - x*m00 - y*m01 - z*m02
m13 = y - x*m10 - y*m11 - z*m12
m23 = z - x*m20 - y*m21 - z*m22
m30 = m31 = m32 = 0
m33 = 1
return Matrix([[m00, m01, m02, m03], [m10, m11, m12, m13],
[m20, m21, m22, m23], [m30, m31, m32, m33]])
|
83b2d046c557b92adbae189699deda0a2a52a9237cbd750206977fb9b504cd21 | """Predefined R^n manifolds together with common coord. systems.
Coordinate systems are predefined as well as the transformation laws between
them.
Coordinate functions can be accessed as attributes of the manifold (eg `R2.x`),
as attributes of the coordinate systems (eg `R2_r.x` and `R2_p.theta`), or by
using the usual `coord_sys.coord_function(index, name)` interface.
"""
from typing import Any
import warnings
from sympy.core.symbol import (Dummy, symbols)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (acos, atan2, cos, sin)
from .diffgeom import Manifold, Patch, CoordSystem
__all__ = [
'R2', 'R2_origin', 'relations_2d', 'R2_r', 'R2_p',
'R3', 'R3_origin', 'relations_3d', 'R3_r', 'R3_c', 'R3_s'
]
###############################################################################
# R2
###############################################################################
R2 = Manifold('R^2', 2) # type: Any
R2_origin = Patch('origin', R2) # type: Any
x, y = symbols('x y', real=True)
r, theta = symbols('rho theta', nonnegative=True)
relations_2d = {
('rectangular', 'polar'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))],
('polar', 'rectangular'): [(r, theta), (r*cos(theta), r*sin(theta))],
}
R2_r = CoordSystem('rectangular', R2_origin, (x, y), relations_2d) # type: Any
R2_p = CoordSystem('polar', R2_origin, (r, theta), relations_2d) # type: Any
# support deprecated feature
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, y, r, theta = symbols('x y r theta', cls=Dummy)
R2_r.connect_to(R2_p, [x, y],
[sqrt(x**2 + y**2), atan2(y, x)],
inverse=False, fill_in_gaps=False)
R2_p.connect_to(R2_r, [r, theta],
[r*cos(theta), r*sin(theta)],
inverse=False, fill_in_gaps=False)
# Defining the basis coordinate functions and adding shortcuts for them to the
# manifold and the patch.
R2.x, R2.y = R2_origin.x, R2_origin.y = R2_r.x, R2_r.y = R2_r.coord_functions()
R2.r, R2.theta = R2_origin.r, R2_origin.theta = R2_p.r, R2_p.theta = R2_p.coord_functions()
# Defining the basis vector fields and adding shortcuts for them to the
# manifold and the patch.
R2.e_x, R2.e_y = R2_origin.e_x, R2_origin.e_y = R2_r.e_x, R2_r.e_y = R2_r.base_vectors()
R2.e_r, R2.e_theta = R2_origin.e_r, R2_origin.e_theta = R2_p.e_r, R2_p.e_theta = R2_p.base_vectors()
# Defining the basis oneform fields and adding shortcuts for them to the
# manifold and the patch.
R2.dx, R2.dy = R2_origin.dx, R2_origin.dy = R2_r.dx, R2_r.dy = R2_r.base_oneforms()
R2.dr, R2.dtheta = R2_origin.dr, R2_origin.dtheta = R2_p.dr, R2_p.dtheta = R2_p.base_oneforms()
###############################################################################
# R3
###############################################################################
R3 = Manifold('R^3', 3) # type: Any
R3_origin = Patch('origin', R3) # type: Any
x, y, z = symbols('x y z', real=True)
rho, psi, r, theta, phi = symbols('rho psi r theta phi', nonnegative=True)
relations_3d = {
('rectangular', 'cylindrical'): [(x, y, z),
(sqrt(x**2 + y**2), atan2(y, x), z)],
('cylindrical', 'rectangular'): [(rho, psi, z),
(rho*cos(psi), rho*sin(psi), z)],
('rectangular', 'spherical'): [(x, y, z),
(sqrt(x**2 + y**2 + z**2),
acos(z/sqrt(x**2 + y**2 + z**2)),
atan2(y, x))],
('spherical', 'rectangular'): [(r, theta, phi),
(r*sin(theta)*cos(phi),
r*sin(theta)*sin(phi),
r*cos(theta))],
('cylindrical', 'spherical'): [(rho, psi, z),
(sqrt(rho**2 + z**2),
acos(z/sqrt(rho**2 + z**2)),
psi)],
('spherical', 'cylindrical'): [(r, theta, phi),
(r*sin(theta), phi, r*cos(theta))],
}
R3_r = CoordSystem('rectangular', R3_origin, (x, y, z), relations_3d) # type: Any
R3_c = CoordSystem('cylindrical', R3_origin, (rho, psi, z), relations_3d) # type: Any
R3_s = CoordSystem('spherical', R3_origin, (r, theta, phi), relations_3d) # type: Any
# support deprecated feature
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, y, z, rho, psi, r, theta, phi = symbols('x y z rho psi r theta phi', cls=Dummy)
R3_r.connect_to(R3_c, [x, y, z],
[sqrt(x**2 + y**2), atan2(y, x), z],
inverse=False, fill_in_gaps=False)
R3_c.connect_to(R3_r, [rho, psi, z],
[rho*cos(psi), rho*sin(psi), z],
inverse=False, fill_in_gaps=False)
## rectangular <-> spherical
R3_r.connect_to(R3_s, [x, y, z],
[sqrt(x**2 + y**2 + z**2), acos(z/
sqrt(x**2 + y**2 + z**2)), atan2(y, x)],
inverse=False, fill_in_gaps=False)
R3_s.connect_to(R3_r, [r, theta, phi],
[r*sin(theta)*cos(phi), r*sin(
theta)*sin(phi), r*cos(theta)],
inverse=False, fill_in_gaps=False)
## cylindrical <-> spherical
R3_c.connect_to(R3_s, [rho, psi, z],
[sqrt(rho**2 + z**2), acos(z/sqrt(rho**2 + z**2)), psi],
inverse=False, fill_in_gaps=False)
R3_s.connect_to(R3_c, [r, theta, phi],
[r*sin(theta), phi, r*cos(theta)],
inverse=False, fill_in_gaps=False)
# Defining the basis coordinate functions.
R3_r.x, R3_r.y, R3_r.z = R3_r.coord_functions()
R3_c.rho, R3_c.psi, R3_c.z = R3_c.coord_functions()
R3_s.r, R3_s.theta, R3_s.phi = R3_s.coord_functions()
# Defining the basis vector fields.
R3_r.e_x, R3_r.e_y, R3_r.e_z = R3_r.base_vectors()
R3_c.e_rho, R3_c.e_psi, R3_c.e_z = R3_c.base_vectors()
R3_s.e_r, R3_s.e_theta, R3_s.e_phi = R3_s.base_vectors()
# Defining the basis oneform fields.
R3_r.dx, R3_r.dy, R3_r.dz = R3_r.base_oneforms()
R3_c.drho, R3_c.dpsi, R3_c.dz = R3_c.base_oneforms()
R3_s.dr, R3_s.dtheta, R3_s.dphi = R3_s.base_oneforms()
|
e2e51e07905547e36c56890c55b72c2a2fe50b39b5d39d28154b4b57d9854e9d | from typing import Any, Set as tSet
from functools import reduce
from itertools import permutations
from sympy.combinatorics import Permutation
from sympy.core import (
Basic, Expr, Function, diff,
Pow, Mul, Add, Lambda, S, Tuple, Dict
)
from sympy.core.cache import cacheit
from sympy.core.symbol import Symbol, Dummy
from sympy.core.symbol import Str
from sympy.core.sympify import _sympify
from sympy.functions import factorial
from sympy.matrices import ImmutableDenseMatrix as Matrix
from sympy.solvers import solve
from sympy.utilities.exceptions import SymPyDeprecationWarning
# TODO you are a bit excessive in the use of Dummies
# TODO dummy point, literal field
# TODO too often one needs to call doit or simplify on the output, check the
# tests and find out why
from sympy.tensor.array import ImmutableDenseNDimArray
class Manifold(Basic):
"""
A mathematical manifold.
Explanation
===========
A manifold is a topological space that locally resembles
Euclidean space near each point [1].
This class does not provide any means to study the topological
characteristics of the manifold that it represents, though.
Parameters
==========
name : str
The name of the manifold.
dim : int
The dimension of the manifold.
Examples
========
>>> from sympy.diffgeom import Manifold
>>> m = Manifold('M', 2)
>>> m
M
>>> m.dim
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Manifold
"""
def __new__(cls, name, dim, **kwargs):
if not isinstance(name, Str):
name = Str(name)
dim = _sympify(dim)
obj = super().__new__(cls, name, dim)
obj.patches = _deprecated_list(
"Manifold.patches",
"external container for registry",
19321,
"1.7",
[]
)
return obj
@property
def name(self):
return self.args[0]
@property
def dim(self):
return self.args[1]
class Patch(Basic):
"""
A patch on a manifold.
Explanation
===========
Coordinate patch, or patch in short, is a simply-connected open set around
a point in the manifold [1]. On a manifold one can have many patches that
do not always include the whole manifold. On these patches coordinate
charts can be defined that permit the parameterization of any point on the
patch in terms of a tuple of real numbers (the coordinates).
This class does not provide any means to study the topological
characteristics of the patch that it represents.
Parameters
==========
name : str
The name of the patch.
manifold : Manifold
The manifold on which the patch is defined.
Examples
========
>>> from sympy.diffgeom import Manifold, Patch
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> p
P
>>> p.dim
2
References
==========
.. [1] G. Sussman, J. Wisdom, W. Farr, Functional Differential Geometry
(2013)
"""
def __new__(cls, name, manifold, **kwargs):
if not isinstance(name, Str):
name = Str(name)
obj = super().__new__(cls, name, manifold)
obj.manifold.patches.append(obj) # deprecated
obj.coord_systems = _deprecated_list(
"Patch.coord_systems",
"external container for registry",
19321,
"1.7",
[]
)
return obj
@property
def name(self):
return self.args[0]
@property
def manifold(self):
return self.args[1]
@property
def dim(self):
return self.manifold.dim
class CoordSystem(Basic):
"""
A coordinate system defined on the patch.
Explanation
===========
Coordinate system is a system that uses one or more coordinates to uniquely
determine the position of the points or other geometric elements on a
manifold [1].
By passing ``Symbols`` to *symbols* parameter, user can define the name and
assumptions of coordinate symbols of the coordinate system. If not passed,
these symbols are generated automatically and are assumed to be real valued.
By passing *relations* parameter, user can define the tranform relations of
coordinate systems. Inverse transformation and indirect transformation can
be found automatically. If this parameter is not passed, coordinate
transformation cannot be done.
Parameters
==========
name : str
The name of the coordinate system.
patch : Patch
The patch where the coordinate system is defined.
symbols : list of Symbols, optional
Defines the names and assumptions of coordinate symbols.
relations : dict, optional
Key is a tuple of two strings, who are the names of the systems where
the coordinates transform from and transform to.
Value is a tuple of the symbols before transformation and a tuple of
the expressions after transformation.
Examples
========
We define two-dimensional Cartesian coordinate system and polar coordinate
system.
>>> from sympy import symbols, pi, sqrt, atan2, cos, sin
>>> from sympy.diffgeom import Manifold, Patch, CoordSystem
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> x, y = symbols('x y', real=True)
>>> r, theta = symbols('r theta', nonnegative=True)
>>> relation_dict = {
... ('Car2D', 'Pol'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))],
... ('Pol', 'Car2D'): [(r, theta), (r*cos(theta), r*sin(theta))]
... }
>>> Car2D = CoordSystem('Car2D', p, (x, y), relation_dict)
>>> Pol = CoordSystem('Pol', p, (r, theta), relation_dict)
``symbols`` property returns ``CoordinateSymbol`` instances. These symbols
are not same with the symbols used to construct the coordinate system.
>>> Car2D
Car2D
>>> Car2D.dim
2
>>> Car2D.symbols
(x, y)
>>> _[0].func
<class 'sympy.diffgeom.diffgeom.CoordinateSymbol'>
``transformation()`` method returns the transformation function from
one coordinate system to another. ``transform()`` method returns the
transformed coordinates.
>>> Car2D.transformation(Pol)
Lambda((x, y), Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]]))
>>> Car2D.transform(Pol)
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> Car2D.transform(Pol, [1, 2])
Matrix([
[sqrt(5)],
[atan(2)]])
``jacobian()`` method returns the Jacobian matrix of coordinate
transformation between two systems. ``jacobian_determinant()`` method
returns the Jacobian determinant of coordinate transformation between two
systems.
>>> Pol.jacobian(Car2D)
Matrix([
[cos(theta), -r*sin(theta)],
[sin(theta), r*cos(theta)]])
>>> Pol.jacobian(Car2D, [1, pi/2])
Matrix([
[0, -1],
[1, 0]])
>>> Car2D.jacobian_determinant(Pol)
1/sqrt(x**2 + y**2)
>>> Car2D.jacobian_determinant(Pol, [1,0])
1
References
==========
.. [1] https://en.wikipedia.org/wiki/Coordinate_system
"""
def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):
if not isinstance(name, Str):
name = Str(name)
# canonicallize the symbols
if symbols is None:
names = kwargs.get('names', None)
if names is None:
symbols = Tuple(
*[Symbol('%s_%s' % (name.name, i), real=True)
for i in range(patch.dim)]
)
else:
SymPyDeprecationWarning(
feature="Class signature 'names' of CoordSystem",
useinstead="class signature 'symbols'",
issue=19321,
deprecated_since_version="1.7"
).warn()
symbols = Tuple(
*[Symbol(n, real=True) for n in names]
)
else:
syms = []
for s in symbols:
if isinstance(s, Symbol):
syms.append(Symbol(s.name, **s._assumptions.generator))
elif isinstance(s, str):
SymPyDeprecationWarning(
feature="Passing str as coordinate symbol's name",
useinstead="Symbol which contains the name and assumption for coordinate symbol",
issue=19321,
deprecated_since_version="1.7"
).warn()
syms.append(Symbol(s, real=True))
symbols = Tuple(*syms)
# canonicallize the relations
rel_temp = {}
for k,v in relations.items():
s1, s2 = k
if not isinstance(s1, Str):
s1 = Str(s1)
if not isinstance(s2, Str):
s2 = Str(s2)
key = Tuple(s1, s2)
# Old version used Lambda as a value.
if isinstance(v, Lambda):
v = (tuple(v.signature), tuple(v.expr))
else:
v = (tuple(v[0]), tuple(v[1]))
rel_temp[key] = v
relations = Dict(rel_temp)
# construct the object
obj = super().__new__(cls, name, patch, symbols, relations)
# Add deprecated attributes
obj.transforms = _deprecated_dict(
"Mutable CoordSystem.transforms",
"'relations' parameter in class signature",
19321,
"1.7",
{}
)
obj._names = [str(n) for n in symbols]
obj.patch.coord_systems.append(obj) # deprecated
obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated
obj._dummy = Dummy()
return obj
@property
def name(self):
return self.args[0]
@property
def patch(self):
return self.args[1]
@property
def manifold(self):
return self.patch.manifold
@property
def symbols(self):
return tuple(CoordinateSymbol(self, i, **s._assumptions.generator)
for i,s in enumerate(self.args[2]))
@property
def relations(self):
return self.args[3]
@property
def dim(self):
return self.patch.dim
##########################################################################
# Finding transformation relation
##########################################################################
def transformation(self, sys):
"""
Return coordinate transformation function from *self* to *sys*.
Parameters
==========
sys : CoordSystem
Returns
=======
sympy.Lambda
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.transformation(R2_p)
Lambda((x, y), Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]]))
"""
signature = self.args[2]
key = Tuple(self.name, sys.name)
if self == sys:
expr = Matrix(self.symbols)
elif key in self.relations:
expr = Matrix(self.relations[key][1])
elif key[::-1] in self.relations:
expr = Matrix(self._inverse_transformation(sys, self))
else:
expr = Matrix(self._indirect_transformation(self, sys))
return Lambda(signature, expr)
@staticmethod
def _solve_inverse(sym1, sym2, exprs, sys1_name, sys2_name):
ret = solve(
[t[0] - t[1] for t in zip(sym2, exprs)],
list(sym1), dict=True)
if len(ret) == 0:
temp = "Cannot solve inverse relation from {} to {}."
raise NotImplementedError(temp.format(sys1_name, sys2_name))
elif len(ret) > 1:
temp = "Obtained multiple inverse relation from {} to {}."
raise ValueError(temp.format(sys1_name, sys2_name))
return ret[0]
@classmethod
def _inverse_transformation(cls, sys1, sys2):
# Find the transformation relation from sys2 to sys1
forward = sys1.transform(sys2)
inv_results = cls._solve_inverse(sys1.symbols, sys2.symbols, forward,
sys1.name, sys2.name)
signature = tuple(sys1.symbols)
return [inv_results[s] for s in signature]
@classmethod
@cacheit
def _indirect_transformation(cls, sys1, sys2):
# Find the transformation relation between two indirectly connected
# coordinate systems
rel = sys1.relations
path = cls._dijkstra(sys1, sys2)
transforms = []
for s1, s2 in zip(path, path[1:]):
if (s1, s2) in rel:
transforms.append(rel[(s1, s2)])
else:
sym2, inv_exprs = rel[(s2, s1)]
sym1 = tuple(Dummy() for i in sym2)
ret = cls._solve_inverse(sym2, sym1, inv_exprs, s2, s1)
ret = tuple(ret[s] for s in sym2)
transforms.append((sym1, ret))
syms = sys1.args[2]
exprs = syms
for newsyms, newexprs in transforms:
exprs = tuple(e.subs(zip(newsyms, exprs)) for e in newexprs)
return exprs
@staticmethod
def _dijkstra(sys1, sys2):
# Use Dijkstra algorithm to find the shortest path between two indirectly-connected
# coordinate systems
# return value is the list of the names of the systems.
relations = sys1.relations
graph = {}
for s1, s2 in relations.keys():
if s1 not in graph:
graph[s1] = {s2}
else:
graph[s1].add(s2)
if s2 not in graph:
graph[s2] = {s1}
else:
graph[s2].add(s1)
path_dict = {sys:[0, [], 0] for sys in graph} # minimum distance, path, times of visited
def visit(sys):
path_dict[sys][2] = 1
for newsys in graph[sys]:
distance = path_dict[sys][0] + 1
if path_dict[newsys][0] >= distance or not path_dict[newsys][1]:
path_dict[newsys][0] = distance
path_dict[newsys][1] = [i for i in path_dict[sys][1]]
path_dict[newsys][1].append(sys)
visit(sys1.name)
while True:
min_distance = max(path_dict.values(), key=lambda x:x[0])[0]
newsys = None
for sys, lst in path_dict.items():
if 0 < lst[0] <= min_distance and not lst[2]:
min_distance = lst[0]
newsys = sys
if newsys is None:
break
visit(newsys)
result = path_dict[sys2.name][1]
result.append(sys2.name)
if result == [sys2.name]:
raise KeyError("Two coordinate systems are not connected.")
return result
def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False):
SymPyDeprecationWarning(
feature="CoordSystem.connect_to",
useinstead="new instance generated with new 'transforms' parameter",
issue=19321,
deprecated_since_version="1.7"
).warn()
from_coords, to_exprs = dummyfy(from_coords, to_exprs)
self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs)
if inverse:
to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs)
if fill_in_gaps:
self._fill_gaps_in_transformations()
@staticmethod
def _inv_transf(from_coords, to_exprs):
# Will be removed when connect_to is removed
inv_from = [i.as_dummy() for i in from_coords]
inv_to = solve(
[t[0] - t[1] for t in zip(inv_from, to_exprs)],
list(from_coords), dict=True)[0]
inv_to = [inv_to[fc] for fc in from_coords]
return Matrix(inv_from), Matrix(inv_to)
@staticmethod
def _fill_gaps_in_transformations():
# Will be removed when connect_to is removed
raise NotImplementedError
##########################################################################
# Coordinate transformations
##########################################################################
def transform(self, sys, coordinates=None):
"""
Return the result of coordinate transformation from *self* to *sys*.
If coordinates are not given, coordinate symbols of *self* are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.ImmutableDenseMatrix containing CoordinateSymbol
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.transform(R2_p)
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> R2_r.transform(R2_p, [0, 1])
Matrix([
[ 1],
[pi/2]])
"""
if coordinates is None:
coordinates = self.symbols
if self != sys:
transf = self.transformation(sys)
coordinates = transf(*coordinates)
else:
coordinates = Matrix(coordinates)
return coordinates
def coord_tuple_transform_to(self, to_sys, coords):
"""Transform ``coords`` to coord system ``to_sys``."""
SymPyDeprecationWarning(
feature="CoordSystem.coord_tuple_transform_to",
useinstead="CoordSystem.transform",
issue=19321,
deprecated_since_version="1.7"
).warn()
coords = Matrix(coords)
if self != to_sys:
transf = self.transforms[to_sys]
coords = transf[1].subs(list(zip(transf[0], coords)))
return coords
def jacobian(self, sys, coordinates=None):
"""
Return the jacobian matrix of a transformation on given coordinates.
If coordinates are not given, coordinate symbols of *self* are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.ImmutableDenseMatrix
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_p.jacobian(R2_r)
Matrix([
[cos(theta), -rho*sin(theta)],
[sin(theta), rho*cos(theta)]])
>>> R2_p.jacobian(R2_r, [1, 0])
Matrix([
[1, 0],
[0, 1]])
"""
result = self.transform(sys).jacobian(self.symbols)
if coordinates is not None:
result = result.subs(list(zip(self.symbols, coordinates)))
return result
jacobian_matrix = jacobian
def jacobian_determinant(self, sys, coordinates=None):
"""
Return the jacobian determinant of a transformation on given
coordinates. If coordinates are not given, coordinate symbols of *self*
are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.Expr
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.jacobian_determinant(R2_p)
1/sqrt(x**2 + y**2)
>>> R2_r.jacobian_determinant(R2_p, [1, 0])
1
"""
return self.jacobian(sys, coordinates).det()
##########################################################################
# Points
##########################################################################
def point(self, coords):
"""Create a ``Point`` with coordinates given in this coord system."""
return Point(self, coords)
def point_to_coords(self, point):
"""Calculate the coordinates of a point in this coord system."""
return point.coords(self)
##########################################################################
# Base fields.
##########################################################################
def base_scalar(self, coord_index):
"""Return ``BaseScalarField`` that takes a point and returns one of the coordinates."""
return BaseScalarField(self, coord_index)
coord_function = base_scalar
def base_scalars(self):
"""Returns a list of all coordinate functions.
For more details see the ``base_scalar`` method of this class."""
return [self.base_scalar(i) for i in range(self.dim)]
coord_functions = base_scalars
def base_vector(self, coord_index):
"""Return a basis vector field.
The basis vector field for this coordinate system. It is also an
operator on scalar fields."""
return BaseVectorField(self, coord_index)
def base_vectors(self):
"""Returns a list of all base vectors.
For more details see the ``base_vector`` method of this class."""
return [self.base_vector(i) for i in range(self.dim)]
def base_oneform(self, coord_index):
"""Return a basis 1-form field.
The basis one-form field for this coordinate system. It is also an
operator on vector fields."""
return Differential(self.coord_function(coord_index))
def base_oneforms(self):
"""Returns a list of all base oneforms.
For more details see the ``base_oneform`` method of this class."""
return [self.base_oneform(i) for i in range(self.dim)]
class CoordinateSymbol(Symbol):
"""A symbol which denotes an abstract value of i-th coordinate of
the coordinate system with given context.
Explanation
===========
Each coordinates in coordinate system are represented by unique symbol,
such as x, y, z in Cartesian coordinate system.
You may not construct this class directly. Instead, use `symbols` method
of CoordSystem.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import symbols, Lambda, Matrix, sqrt, atan2, cos, sin
>>> from sympy.diffgeom import Manifold, Patch, CoordSystem
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> x, y = symbols('x y', real=True)
>>> r, theta = symbols('r theta', nonnegative=True)
>>> relation_dict = {
... ('Car2D', 'Pol'): Lambda((x, y), Matrix([sqrt(x**2 + y**2), atan2(y, x)])),
... ('Pol', 'Car2D'): Lambda((r, theta), Matrix([r*cos(theta), r*sin(theta)]))
... }
>>> Car2D = CoordSystem('Car2D', p, [x, y], relation_dict)
>>> Pol = CoordSystem('Pol', p, [r, theta], relation_dict)
>>> x, y = Car2D.symbols
``CoordinateSymbol`` contains its coordinate symbol and index.
>>> x.name
'x'
>>> x.coord_sys == Car2D
True
>>> x.index
0
>>> x.is_real
True
You can transform ``CoordinateSymbol`` into other coordinate system using
``rewrite()`` method.
>>> x.rewrite(Pol)
r*cos(theta)
>>> sqrt(x**2 + y**2).rewrite(Pol).simplify()
r
"""
def __new__(cls, coord_sys, index, **assumptions):
name = coord_sys.args[2][index].name
obj = super().__new__(cls, name, **assumptions)
obj.coord_sys = coord_sys
obj.index = index
return obj
def __getnewargs__(self):
return (self.coord_sys, self.index)
def _hashable_content(self):
return (
self.coord_sys, self.index
) + tuple(sorted(self.assumptions0.items()))
def _eval_rewrite(self, rule, args, **hints):
if isinstance(rule, CoordSystem):
return rule.transform(self.coord_sys)[self.index]
return super()._eval_rewrite(rule, args, **hints)
class Point(Basic):
"""Point defined in a coordinate system.
Explanation
===========
Mathematically, point is defined in the manifold and does not have any coordinates
by itself. Coordinate system is what imbues the coordinates to the point by coordinate
chart. However, due to the difficulty of realizing such logic, you must supply
a coordinate system and coordinates to define a Point here.
The usage of this object after its definition is independent of the
coordinate system that was used in order to define it, however due to
limitations in the simplification routines you can arrive at complicated
expressions if you use inappropriate coordinate systems.
Parameters
==========
coord_sys : CoordSystem
coords : list
The coordinates of the point.
Examples
========
>>> from sympy import pi
>>> from sympy.diffgeom import Point
>>> from sympy.diffgeom.rn import R2, R2_r, R2_p
>>> rho, theta = R2_p.symbols
>>> p = Point(R2_p, [rho, 3*pi/4])
>>> p.manifold == R2
True
>>> p.coords()
Matrix([
[ rho],
[3*pi/4]])
>>> p.coords(R2_r)
Matrix([
[-sqrt(2)*rho/2],
[ sqrt(2)*rho/2]])
"""
def __new__(cls, coord_sys, coords, **kwargs):
coords = Matrix(coords)
obj = super().__new__(cls, coord_sys, coords)
obj._coord_sys = coord_sys
obj._coords = coords
return obj
@property
def patch(self):
return self._coord_sys.patch
@property
def manifold(self):
return self._coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def coords(self, sys=None):
"""
Coordinates of the point in given coordinate system. If coordinate system
is not passed, it returns the coordinates in the coordinate system in which
the poin was defined.
"""
if sys is None:
return self._coords
else:
return self._coord_sys.transform(sys, self._coords)
@property
def free_symbols(self):
return self._coords.free_symbols
class BaseScalarField(Expr):
"""Base scalar field over a manifold for a given coordinate system.
Explanation
===========
A scalar field takes a point as an argument and returns a scalar.
A base scalar field of a coordinate system takes a point and returns one of
the coordinates of that point in the coordinate system in question.
To define a scalar field you need to choose the coordinate system and the
index of the coordinate.
The use of the scalar field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in
the simplification routines you may arrive at more complicated
expression if you use unappropriate coordinate systems.
You can build complicated scalar fields by just building up SymPy
expressions containing ``BaseScalarField`` instances.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import Function, pi
>>> from sympy.diffgeom import BaseScalarField
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> rho, _ = R2_p.symbols
>>> point = R2_p.point([rho, 0])
>>> fx, fy = R2_r.base_scalars()
>>> ftheta = BaseScalarField(R2_r, 1)
>>> fx(point)
rho
>>> fy(point)
0
>>> (fx**2+fy**2).rcall(point)
rho**2
>>> g = Function('g')
>>> fg = g(ftheta-pi)
>>> fg.rcall(point)
g(-pi)
"""
is_commutative = True
def __new__(cls, coord_sys, index, **kwargs):
index = _sympify(index)
obj = super().__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def patch(self):
return self.coord_sys.patch
@property
def manifold(self):
return self.coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def __call__(self, *args):
"""Evaluating the field at a point or doing nothing.
If the argument is a ``Point`` instance, the field is evaluated at that
point. The field is returned itself if the argument is any other
object. It is so in order to have working recursive calling mechanics
for all fields (check the ``__call__`` method of ``Expr``).
"""
point = args[0]
if len(args) != 1 or not isinstance(point, Point):
return self
coords = point.coords(self._coord_sys)
# XXX Calling doit is necessary with all the Subs expressions
# XXX Calling simplify is necessary with all the trig expressions
return simplify(coords[self._index]).doit()
# XXX Workaround for limitations on the content of args
free_symbols = set() # type: tSet[Any]
def doit(self):
return self
class BaseVectorField(Expr):
r"""Base vector field over a manifold for a given coordinate system.
Explanation
===========
A vector field is an operator taking a scalar field and returning a
directional derivative (which is also a scalar field).
A base vector field is the same type of operator, however the derivation is
specifically done with respect to a chosen coordinate.
To define a base vector field you need to choose the coordinate system and
the index of the coordinate.
The use of the vector field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in the
simplification routines you may arrive at more complicated expression if you
use unappropriate coordinate systems.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import Function
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import BaseVectorField
>>> from sympy import pprint
>>> x, y = R2_r.symbols
>>> rho, theta = R2_p.symbols
>>> fx, fy = R2_r.base_scalars()
>>> point_p = R2_p.point([rho, theta])
>>> point_r = R2_r.point([x, y])
>>> g = Function('g')
>>> s_field = g(fx, fy)
>>> v = BaseVectorField(R2_r, 1)
>>> pprint(v(s_field))
/ d \|
|---(g(x, xi))||
\dxi /|xi=y
>>> pprint(v(s_field).rcall(point_r).doit())
d
--(g(x, y))
dy
>>> pprint(v(s_field).rcall(point_p))
/ d \|
|---(g(rho*cos(theta), xi))||
\dxi /|xi=rho*sin(theta)
"""
is_commutative = False
def __new__(cls, coord_sys, index, **kwargs):
index = _sympify(index)
obj = super().__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def patch(self):
return self.coord_sys.patch
@property
def manifold(self):
return self.coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def __call__(self, scalar_field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field an error is raised.
"""
if covariant_order(scalar_field) or contravariant_order(scalar_field):
raise ValueError('Only scalar fields can be supplied as arguments to vector fields.')
if scalar_field is None:
return self
base_scalars = list(scalar_field.atoms(BaseScalarField))
# First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r)
d_var = self._coord_sys._dummy
# TODO: you need a real dummy function for the next line
d_funcs = [Function('_#_%s' % i)(d_var) for i,
b in enumerate(base_scalars)]
d_result = scalar_field.subs(list(zip(base_scalars, d_funcs)))
d_result = d_result.diff(d_var)
# Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y))
coords = self._coord_sys.symbols
d_funcs_deriv = [f.diff(d_var) for f in d_funcs]
d_funcs_deriv_sub = []
for b in base_scalars:
jac = self._coord_sys.jacobian(b._coord_sys, coords)
d_funcs_deriv_sub.append(jac[b._index, self._index])
d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub)))
# Remove the dummies
result = d_result.subs(list(zip(d_funcs, base_scalars)))
result = result.subs(list(zip(coords, self._coord_sys.coord_functions())))
return result.doit()
def _find_coords(expr):
# Finds CoordinateSystems existing in expr
fields = expr.atoms(BaseScalarField, BaseVectorField)
result = set()
for f in fields:
result.add(f._coord_sys)
return result
class Commutator(Expr):
r"""Commutator of two vector fields.
Explanation
===========
The commutator of two vector fields `v_1` and `v_2` is defined as the
vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal
to `v_1(v_2(f)) - v_2(v_1(f))`.
Examples
========
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import Commutator
>>> from sympy import simplify
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_r = R2_p.base_vector(0)
>>> c_xy = Commutator(e_x, e_y)
>>> c_xr = Commutator(e_x, e_r)
>>> c_xy
0
Unfortunately, the current code is not able to compute everything:
>>> c_xr
Commutator(e_x, e_rho)
>>> simplify(c_xr(fy**2))
-2*cos(theta)*y**2/(x**2 + y**2)
"""
def __new__(cls, v1, v2):
if (covariant_order(v1) or contravariant_order(v1) != 1
or covariant_order(v2) or contravariant_order(v2) != 1):
raise ValueError(
'Only commutators of vector fields are supported.')
if v1 == v2:
return S.Zero
coord_sys = set().union(*[_find_coords(v) for v in (v1, v2)])
if len(coord_sys) == 1:
# Only one coordinate systems is used, hence it is easy enough to
# actually evaluate the commutator.
if all(isinstance(v, BaseVectorField) for v in (v1, v2)):
return S.Zero
bases_1, bases_2 = [list(v.atoms(BaseVectorField))
for v in (v1, v2)]
coeffs_1 = [v1.expand().coeff(b) for b in bases_1]
coeffs_2 = [v2.expand().coeff(b) for b in bases_2]
res = 0
for c1, b1 in zip(coeffs_1, bases_1):
for c2, b2 in zip(coeffs_2, bases_2):
res += c1*b1(c2)*b2 - c2*b2(c1)*b1
return res
else:
obj = super().__new__(cls, v1, v2)
obj._v1 = v1 # deprecated assignment
obj._v2 = v2 # deprecated assignment
return obj
@property
def v1(self):
return self.args[0]
@property
def v2(self):
return self.args[1]
def __call__(self, scalar_field):
"""Apply on a scalar field.
If the argument is not a scalar field an error is raised.
"""
return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field))
class Differential(Expr):
r"""Return the differential (exterior derivative) of a form field.
Explanation
===========
The differential of a form (i.e. the exterior derivative) has a complicated
definition in the general case.
The differential `df` of the 0-form `f` is defined for any vector field `v`
as `df(v) = v(f)`.
Examples
========
>>> from sympy import Function
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import Differential
>>> from sympy import pprint
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> g = Function('g')
>>> s_field = g(fx, fy)
>>> dg = Differential(s_field)
>>> dg
d(g(x, y))
>>> pprint(dg(e_x))
/ d \|
|---(g(xi, y))||
\dxi /|xi=x
>>> pprint(dg(e_y))
/ d \|
|---(g(x, xi))||
\dxi /|xi=y
Applying the exterior derivative operator twice always results in:
>>> Differential(dg)
0
"""
is_commutative = False
def __new__(cls, form_field):
if contravariant_order(form_field):
raise ValueError(
'A vector field was supplied as an argument to Differential.')
if isinstance(form_field, Differential):
return S.Zero
else:
obj = super().__new__(cls, form_field)
obj._form_field = form_field # deprecated assignment
return obj
@property
def form_field(self):
return self.args[0]
def __call__(self, *vector_fields):
"""Apply on a list of vector_fields.
Explanation
===========
If the number of vector fields supplied is not equal to 1 + the order of
the form field inside the differential the result is undefined.
For 1-forms (i.e. differentials of scalar fields) the evaluation is
done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector
field, the differential is returned unchanged. This is done in order to
permit partial contractions for higher forms.
In the general case the evaluation is done by applying the form field
inside the differential on a list with one less elements than the number
of elements in the original list. Lowering the number of vector fields
is achieved through replacing each pair of fields by their
commutator.
If the arguments are not vectors or ``None``s an error is raised.
"""
if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None
for a in vector_fields):
raise ValueError('The arguments supplied to Differential should be vector fields or Nones.')
k = len(vector_fields)
if k == 1:
if vector_fields[0]:
return vector_fields[0].rcall(self._form_field)
return self
else:
# For higher form it is more complicated:
# Invariant formula:
# https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula
# df(v1, ... vn) = +/- vi(f(v1..no i..vn))
# +/- f([vi,vj],v1..no i, no j..vn)
f = self._form_field
v = vector_fields
ret = 0
for i in range(k):
t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:]))
ret += (-1)**i*t
for j in range(i + 1, k):
c = Commutator(v[i], v[j])
if c: # TODO this is ugly - the Commutator can be Zero and
# this causes the next line to fail
t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:])
ret += (-1)**(i + j)*t
return ret
class TensorProduct(Expr):
"""Tensor product of forms.
Explanation
===========
The tensor product permits the creation of multilinear functionals (i.e.
higher order tensors) out of lower order fields (e.g. 1-forms and vector
fields). However, the higher tensors thus created lack the interesting
features provided by the other type of product, the wedge product, namely
they are not antisymmetric and hence are not form fields.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> TensorProduct(dx, dy)(e_x, e_y)
1
>>> TensorProduct(dx, dy)(e_y, e_x)
0
>>> TensorProduct(dx, fx*dy)(fx*e_x, e_y)
x**2
>>> TensorProduct(e_x, e_y)(fx**2, fy**2)
4*x*y
>>> TensorProduct(e_y, dx)(fy)
dx
You can nest tensor products.
>>> tp1 = TensorProduct(dx, dy)
>>> TensorProduct(tp1, dx)(e_x, e_y, e_x)
1
You can make partial contraction for instance when 'raising an index'.
Putting ``None`` in the second argument of ``rcall`` means that the
respective position in the tensor product is left as it is.
>>> TP = TensorProduct
>>> metric = TP(dx, dx) + 3*TP(dy, dy)
>>> metric.rcall(e_y, None)
3*dy
Or automatically pad the args with ``None`` without specifying them.
>>> metric.rcall(e_y)
3*dy
"""
def __new__(cls, *args):
scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0])
multifields = [m for m in args if covariant_order(m) + contravariant_order(m)]
if multifields:
if len(multifields) == 1:
return scalar*multifields[0]
return scalar*super().__new__(cls, *multifields)
else:
return scalar
def __call__(self, *fields):
"""Apply on a list of fields.
If the number of input fields supplied is not equal to the order of
the tensor product field, the list of arguments is padded with ``None``'s.
The list of arguments is divided in sublists depending on the order of
the forms inside the tensor product. The sublists are provided as
arguments to these forms and the resulting expressions are given to the
constructor of ``TensorProduct``.
"""
tot_order = covariant_order(self) + contravariant_order(self)
tot_args = len(fields)
if tot_args != tot_order:
fields = list(fields) + [None]*(tot_order - tot_args)
orders = [covariant_order(f) + contravariant_order(f) for f in self._args]
indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)]
fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])]
multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)]
return TensorProduct(*multipliers)
class WedgeProduct(TensorProduct):
"""Wedge product of forms.
Explanation
===========
In the context of integration only completely antisymmetric forms make
sense. The wedge product permits the creation of such forms.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import WedgeProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> WedgeProduct(dx, dy)(e_x, e_y)
1
>>> WedgeProduct(dx, dy)(e_y, e_x)
-1
>>> WedgeProduct(dx, fx*dy)(fx*e_x, e_y)
x**2
>>> WedgeProduct(e_x, e_y)(fy, None)
-e_x
You can nest wedge products.
>>> wp1 = WedgeProduct(dx, dy)
>>> WedgeProduct(wp1, dx)(e_x, e_y, e_x)
0
"""
# TODO the calculation of signatures is slow
# TODO you do not need all these permutations (neither the prefactor)
def __call__(self, *fields):
"""Apply on a list of vector_fields.
The expression is rewritten internally in terms of tensor products and evaluated."""
orders = (covariant_order(e) + contravariant_order(e) for e in self.args)
mul = 1/Mul(*(factorial(o) for o in orders))
perms = permutations(fields)
perms_par = (Permutation(
p).signature() for p in permutations(list(range(len(fields)))))
tensor_prod = TensorProduct(*self.args)
return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)])
class LieDerivative(Expr):
"""Lie derivative with respect to a vector field.
Explanation
===========
The transport operator that defines the Lie derivative is the pushforward of
the field to be derived along the integral curve of the field with respect
to which one derives.
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> from sympy.diffgeom import (LieDerivative, TensorProduct)
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_rho, e_theta = R2_p.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> LieDerivative(e_x, fy)
0
>>> LieDerivative(e_x, fx)
1
>>> LieDerivative(e_x, e_x)
0
The Lie derivative of a tensor field by another tensor field is equal to
their commutator:
>>> LieDerivative(e_x, e_rho)
Commutator(e_x, e_rho)
>>> LieDerivative(e_x + e_y, fx)
1
>>> tp = TensorProduct(dx, dy)
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
"""
def __new__(cls, v_field, expr):
expr_form_ord = covariant_order(expr)
if contravariant_order(v_field) != 1 or covariant_order(v_field):
raise ValueError('Lie derivatives are defined only with respect to'
' vector fields. The supplied argument was not a '
'vector field.')
if expr_form_ord > 0:
obj = super().__new__(cls, v_field, expr)
# deprecated assignments
obj._v_field = v_field
obj._expr = expr
return obj
if expr.atoms(BaseVectorField):
return Commutator(v_field, expr)
else:
return v_field.rcall(expr)
@property
def v_field(self):
return self.args[0]
@property
def expr(self):
return self.args[1]
def __call__(self, *args):
v = self.v_field
expr = self.expr
lead_term = v(expr(*args))
rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:])
for i in range(len(args))])
return lead_term - rest
class BaseCovarDerivativeOp(Expr):
"""Covariant derivative operator with respect to a base vector.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import BaseCovarDerivativeOp
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch)
>>> cvd(fx)
1
>>> cvd(fx*e_x)
e_x
"""
def __new__(cls, coord_sys, index, christoffel):
index = _sympify(index)
christoffel = ImmutableDenseNDimArray(christoffel)
obj = super().__new__(cls, coord_sys, index, christoffel)
# deprecated assignments
obj._coord_sys = coord_sys
obj._index = index
obj._christoffel = christoffel
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def christoffel(self):
return self.args[2]
def __call__(self, field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field the behaviour is undefined.
"""
if covariant_order(field) != 0:
raise NotImplementedError()
field = vectors_in_basis(field, self._coord_sys)
wrt_vector = self._coord_sys.base_vector(self._index)
wrt_scalar = self._coord_sys.coord_function(self._index)
vectors = list(field.atoms(BaseVectorField))
# First step: replace all vectors with something susceptible to
# derivation and do the derivation
# TODO: you need a real dummy function for the next line
d_funcs = [Function('_#_%s' % i)(wrt_scalar) for i,
b in enumerate(vectors)]
d_result = field.subs(list(zip(vectors, d_funcs)))
d_result = wrt_vector(d_result)
# Second step: backsubstitute the vectors in
d_result = d_result.subs(list(zip(d_funcs, vectors)))
# Third step: evaluate the derivatives of the vectors
derivs = []
for v in vectors:
d = Add(*[(self._christoffel[k, wrt_vector._index, v._index]
*v._coord_sys.base_vector(k))
for k in range(v._coord_sys.dim)])
derivs.append(d)
to_subs = [wrt_vector(d) for d in d_funcs]
# XXX: This substitution can fail when there are Dummy symbols and the
# cache is disabled: https://github.com/sympy/sympy/issues/17794
result = d_result.subs(list(zip(to_subs, derivs)))
# Remove the dummies
result = result.subs(list(zip(d_funcs, vectors)))
return result.doit()
class CovarDerivativeOp(Expr):
"""Covariant derivative operator.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import CovarDerivativeOp
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = CovarDerivativeOp(fx*e_x, ch)
>>> cvd(fx)
x
>>> cvd(fx*e_x)
x*e_x
"""
def __new__(cls, wrt, christoffel):
if len({v._coord_sys for v in wrt.atoms(BaseVectorField)}) > 1:
raise NotImplementedError()
if contravariant_order(wrt) != 1 or covariant_order(wrt):
raise ValueError('Covariant derivatives are defined only with '
'respect to vector fields. The supplied argument '
'was not a vector field.')
obj = super().__new__(cls, wrt, christoffel)
# deprecated assigments
obj._wrt = wrt
obj._christoffel = christoffel
return obj
@property
def wrt(self):
return self.args[0]
@property
def christoffel(self):
return self.args[1]
def __call__(self, field):
vectors = list(self._wrt.atoms(BaseVectorField))
base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel)
for v in vectors]
return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field)
###############################################################################
# Integral curves on vector fields
###############################################################################
def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False):
r"""Return the series expansion for an integral curve of the field.
Explanation
===========
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This equation can also be decomposed of a basis of coordinate functions
`V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i`
This function returns a series expansion of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
n
the order to which to expand
coord_sys
the coordinate system in which to expand
coeffs (default False) - if True return a list of elements of the expansion
Examples
========
Use the predefined R2 manifold:
>>> from sympy.abc import t, x, y
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import intcurve_series
Specify a starting point and a vector field:
>>> start_point = R2_r.point([x, y])
>>> vector_field = R2_r.e_x
Calculate the series:
>>> intcurve_series(vector_field, t, start_point, n=3)
Matrix([
[t + x],
[ y]])
Or get the elements of the expansion in a list:
>>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True)
>>> series[0]
Matrix([
[x],
[y]])
>>> series[1]
Matrix([
[t],
[0]])
>>> series[2]
Matrix([
[0],
[0]])
The series in the polar coordinate system:
>>> series = intcurve_series(vector_field, t, start_point,
... n=3, coord_sys=R2_p, coeffs=True)
>>> series[0]
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> series[1]
Matrix([
[t*x/sqrt(x**2 + y**2)],
[ -t*y/(x**2 + y**2)]])
>>> series[2]
Matrix([
[t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2],
[ t**2*x*y/(x**2 + y**2)**2]])
See Also
========
intcurve_diffequ
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
def iter_vfield(scalar_field, i):
"""Return ``vector_field`` called `i` times on ``scalar_field``."""
return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field)
def taylor_terms_per_coord(coord_function):
"""Return the series for one of the coordinates."""
return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i)
for i in range(n)]
coord_sys = coord_sys if coord_sys else start_point._coord_sys
coord_functions = coord_sys.coord_functions()
taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions]
if coeffs:
return [Matrix(t) for t in zip(*taylor_terms)]
else:
return Matrix([sum(c) for c in taylor_terms])
def intcurve_diffequ(vector_field, param, start_point, coord_sys=None):
r"""Return the differential equation for an integral curve of the field.
Explanation
===========
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This function returns the differential equation of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
coord_sys
the coordinate system in which to give the equations
Returns
=======
a tuple of (equations, initial conditions)
Examples
========
Use the predefined R2 manifold:
>>> from sympy.abc import t
>>> from sympy.diffgeom.rn import R2, R2_p, R2_r
>>> from sympy.diffgeom import intcurve_diffequ
Specify a starting point and a vector field:
>>> start_point = R2_r.point([0, 1])
>>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y
Get the equation:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point)
>>> equations
[f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)]
>>> init_cond
[f_0(0), f_1(0) - 1]
The series in the polar coordinate system:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p)
>>> equations
[Derivative(f_0(t), t), Derivative(f_1(t), t) - 1]
>>> init_cond
[f_0(0) - 1, f_1(0) - pi/2]
See Also
========
intcurve_series
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
coord_sys = coord_sys if coord_sys else start_point._coord_sys
gammas = [Function('f_%d' % i)(param) for i in range(
start_point._coord_sys.dim)]
arbitrary_p = Point(coord_sys, gammas)
coord_functions = coord_sys.coord_functions()
equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p))
for cf in coord_functions]
init_cond = [simplify(cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point))
for cf in coord_functions]
return equations, init_cond
###############################################################################
# Helpers
###############################################################################
def dummyfy(args, exprs):
# TODO Is this a good idea?
d_args = Matrix([s.as_dummy() for s in args])
reps = dict(zip(args, d_args))
d_exprs = Matrix([_sympify(expr).subs(reps) for expr in exprs])
return d_args, d_exprs
###############################################################################
# Helpers
###############################################################################
def contravariant_order(expr, _strict=False):
"""Return the contravariant order of an expression.
Examples
========
>>> from sympy.diffgeom import contravariant_order
>>> from sympy.diffgeom.rn import R2
>>> from sympy.abc import a
>>> contravariant_order(a)
0
>>> contravariant_order(a*R2.x + 2)
0
>>> contravariant_order(a*R2.x*R2.e_y + R2.e_x)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [contravariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing contravariant fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [contravariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between vectors.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a vector.')
return 0
elif isinstance(expr, BaseVectorField):
return 1
elif isinstance(expr, TensorProduct):
return sum(contravariant_order(a) for a in expr.args)
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
def covariant_order(expr, _strict=False):
"""Return the covariant order of an expression.
Examples
========
>>> from sympy.diffgeom import covariant_order
>>> from sympy.diffgeom.rn import R2
>>> from sympy.abc import a
>>> covariant_order(a)
0
>>> covariant_order(a*R2.x + 2)
0
>>> covariant_order(a*R2.x*R2.dy + R2.dx)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [covariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing form fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [covariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between forms.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a form.')
return 0
elif isinstance(expr, Differential):
return covariant_order(*expr.args) + 1
elif isinstance(expr, TensorProduct):
return sum(covariant_order(a) for a in expr.args)
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
###############################################################################
# Coordinate transformation functions
###############################################################################
def vectors_in_basis(expr, to_sys):
"""Transform all base vectors in base vectors of a specified coord basis.
While the new base vectors are in the new coordinate system basis, any
coefficients are kept in the old system.
Examples
========
>>> from sympy.diffgeom import vectors_in_basis
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> vectors_in_basis(R2_r.e_x, R2_p)
-y*e_theta/(x**2 + y**2) + x*e_rho/sqrt(x**2 + y**2)
>>> vectors_in_basis(R2_p.e_r, R2_r)
sin(theta)*e_y + cos(theta)*e_x
"""
vectors = list(expr.atoms(BaseVectorField))
new_vectors = []
for v in vectors:
cs = v._coord_sys
jac = cs.jacobian(to_sys, cs.coord_functions())
new = (jac.T*Matrix(to_sys.base_vectors()))[v._index]
new_vectors.append(new)
return expr.subs(list(zip(vectors, new_vectors)))
###############################################################################
# Coordinate-dependent functions
###############################################################################
def twoform_to_matrix(expr):
"""Return the matrix representing the twoform.
For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`,
where `e_i` is the i-th base vector field for the coordinate system in
which the expression of `w` is given.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import twoform_to_matrix, TensorProduct
>>> TP = TensorProduct
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[1, 0],
[0, 1]])
>>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[x, 0],
[0, 1]])
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2)
Matrix([
[ 1, 0],
[-1/2, 1]])
"""
if covariant_order(expr) != 2 or contravariant_order(expr):
raise ValueError('The input expression is not a two-form.')
coord_sys = _find_coords(expr)
if len(coord_sys) != 1:
raise ValueError('The input expression concerns more than one '
'coordinate systems, hence there is no unambiguous '
'way to choose a coordinate system for the matrix.')
coord_sys = coord_sys.pop()
vectors = coord_sys.base_vectors()
expr = expr.expand()
matrix_content = [[expr.rcall(v1, v2) for v1 in vectors]
for v2 in vectors]
return Matrix(matrix_content)
def metric_to_Christoffel_1st(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of first kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Christoffel_1st, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]]
"""
matrix = twoform_to_matrix(expr)
if not matrix.is_symmetric():
raise ValueError(
'The two-form representing the metric is not symmetric.')
coord_sys = _find_coords(expr).pop()
deriv_matrices = [matrix.applyfunc(d) for d in coord_sys.base_vectors()]
indices = list(range(coord_sys.dim))
christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Christoffel_2nd(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of second kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]]
"""
ch_1st = metric_to_Christoffel_1st(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
# XXX workaround, inverting a matrix does not work if it contains non
# symbols
#matrix = twoform_to_matrix(expr).inv()
matrix = twoform_to_matrix(expr)
s_fields = set()
for e in matrix:
s_fields.update(e.atoms(BaseScalarField))
s_fields = list(s_fields)
dums = coord_sys.symbols
matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields)))
# XXX end of workaround
christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices])
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Riemann_components(expr):
"""Return the components of the Riemann tensor expressed in a given basis.
Given a metric it calculates the components of the Riemann tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from sympy import exp
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Riemann_components, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]
>>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \
R2.r**2*TP(R2.dtheta, R2.dtheta)
>>> non_trivial_metric
exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta)
>>> riemann = metric_to_Riemann_components(non_trivial_metric)
>>> riemann[0, :, :, :]
[[[0, 0], [0, 0]], [[0, exp(-2*rho)*rho], [-exp(-2*rho)*rho, 0]]]
>>> riemann[1, :, :, :]
[[[0, -1/rho], [1/rho, 0]], [[0, 0], [0, 0]]]
"""
ch_2nd = metric_to_Christoffel_2nd(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
deriv_ch = [[[[d(ch_2nd[i, j, k])
for d in coord_sys.base_vectors()]
for k in indices]
for j in indices]
for i in indices]
riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices])
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
return ImmutableDenseNDimArray(riemann)
def metric_to_Ricci_components(expr):
"""Return the components of the Ricci tensor expressed in a given basis.
Given a metric it calculates the components of the Ricci tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from sympy import exp
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Ricci_components, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[0, 0], [0, 0]]
>>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \
R2.r**2*TP(R2.dtheta, R2.dtheta)
>>> non_trivial_metric
exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta)
>>> metric_to_Ricci_components(non_trivial_metric)
[[1/rho, 0], [0, exp(-2*rho)*rho]]
"""
riemann = metric_to_Riemann_components(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
ricci = [[Add(*[riemann[k, i, k, j] for k in indices])
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(ricci)
###############################################################################
# Classes for deprecation
###############################################################################
class _deprecated_container:
# This class gives deprecation warning.
# When deprecated features are completely deleted, this should be removed as well.
# See https://github.com/sympy/sympy/pull/19368
def __init__(self, feature, useinstead, issue, version, data):
super().__init__(data)
self.feature = feature
self.useinstead = useinstead
self.issue = issue
self.version = version
def warn(self):
SymPyDeprecationWarning(
feature=self.feature,
useinstead=self.useinstead,
issue=self.issue,
deprecated_since_version=self.version).warn()
def __iter__(self):
self.warn()
return super().__iter__()
def __getitem__(self, key):
self.warn()
return super().__getitem__(key)
def __contains__(self, key):
self.warn()
return super().__contains__(key)
class _deprecated_list(_deprecated_container, list):
pass
class _deprecated_dict(_deprecated_container, dict):
pass
# Import at end to avoid cyclic imports
from sympy.simplify.simplify import simplify
|
1df17a48fef5d18ef213165c49eb08d2eb156838967443c498cb72303dc9b4e6 | from sympy.printing.pycode import PythonCodePrinter
""" This module collects utilities for rendering Python code. """
def render_as_module(content, standard='python3'):
"""Renders Python code as a module (with the required imports).
Parameters
==========
standard :
See the parameter ``standard`` in
:meth:`sympy.printing.pycode.pycode`
"""
printer = PythonCodePrinter({'standard':standard})
pystr = printer.doprint(content)
if printer._settings['fully_qualified_modules']:
module_imports_str = '\n'.join('import %s' % k for k in printer.module_imports)
else:
module_imports_str = '\n'.join(['from %s import %s' % (k, ', '.join(v)) for
k, v in printer.module_imports.items()])
return module_imports_str + '\n\n' + pystr
|
19bf80903558faf22ecaca360fff3112d92637f4d66362f0f5c03179583c1a63 | """
AST nodes specific to the C family of languages
"""
from sympy.codegen.ast import (
Attribute, Declaration, Node, String, Token, Type, none,
FunctionCall, CodeBlock
)
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.sympify import sympify
void = Type('void')
restrict = Attribute('restrict') # guarantees no pointer aliasing
volatile = Attribute('volatile')
static = Attribute('static')
def alignof(arg):
""" Generate of FunctionCall instance for calling 'alignof' """
return FunctionCall('alignof', [String(arg) if isinstance(arg, str) else arg])
def sizeof(arg):
""" Generate of FunctionCall instance for calling 'sizeof'
Examples
========
>>> from sympy.codegen.ast import real
>>> from sympy.codegen.cnodes import sizeof
>>> from sympy import ccode
>>> ccode(sizeof(real))
'sizeof(double)'
"""
return FunctionCall('sizeof', [String(arg) if isinstance(arg, str) else arg])
class CommaOperator(Basic):
""" Represents the comma operator in C """
def __new__(cls, *args):
return Basic.__new__(cls, *[sympify(arg) for arg in args])
class Label(Node):
""" Label for use with e.g. goto statement.
Examples
========
>>> from sympy import ccode, Symbol
>>> from sympy.codegen.cnodes import Label, PreIncrement
>>> print(ccode(Label('foo')))
foo:
>>> print(ccode(Label('bar', [PreIncrement(Symbol('a'))])))
bar:
++(a);
"""
__slots__ = ('name', 'body')
defaults = {'body': none}
_construct_name = String
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class goto(Token):
""" Represents goto in C """
__slots__ = ('label',)
_construct_label = Label
class PreDecrement(Basic):
""" Represents the pre-decrement operator
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cnodes import PreDecrement
>>> from sympy import ccode
>>> ccode(PreDecrement(x))
'--(x)'
"""
nargs = 1
class PostDecrement(Basic):
""" Represents the post-decrement operator """
nargs = 1
class PreIncrement(Basic):
""" Represents the pre-increment operator """
nargs = 1
class PostIncrement(Basic):
""" Represents the post-increment operator """
nargs = 1
class struct(Node):
""" Represents a struct in C """
__slots__ = ('name', 'declarations')
defaults = {'name': none}
_construct_name = String
@classmethod
def _construct_declarations(cls, args):
return Tuple(*[Declaration(arg) for arg in args])
class union(struct):
""" Represents a union in C """
|
52755b8a4cf6be3268f03b44d368a96ba644b027b9957393febd7a1790725b84 | import math
from sympy.sets.sets import Interval
from sympy.calculus.singularities import is_increasing, is_decreasing
from sympy.codegen.rewriting import Optimization
from sympy.core.function import UndefinedFunction
"""
This module collects classes useful for approimate rewriting of expressions.
This can be beneficial when generating numeric code for which performance is
of greater importance than precision (e.g. for preconditioners used in iterative
methods).
"""
class SumApprox(Optimization):
"""
Approximates sum by neglecting small terms.
Explanation
===========
If terms are expressions which can be determined to be monotonic, then
bounds for those expressions are added.
Parameters
==========
bounds : dict
Mapping expressions to length 2 tuple of bounds (low, high).
reltol : number
Threshold for when to ignore a term. Taken relative to the largest
lower bound among bounds.
Examples
========
>>> from sympy import exp
>>> from sympy.abc import x, y, z
>>> from sympy.codegen.rewriting import optimize
>>> from sympy.codegen.approximations import SumApprox
>>> bounds = {x: (-1, 1), y: (1000, 2000), z: (-10, 3)}
>>> sum_approx3 = SumApprox(bounds, reltol=1e-3)
>>> sum_approx2 = SumApprox(bounds, reltol=1e-2)
>>> sum_approx1 = SumApprox(bounds, reltol=1e-1)
>>> expr = 3*(x + y + exp(z))
>>> optimize(expr, [sum_approx3])
3*(x + y + exp(z))
>>> optimize(expr, [sum_approx2])
3*y + 3*exp(z)
>>> optimize(expr, [sum_approx1])
3*y
"""
def __init__(self, bounds, reltol, **kwargs):
super().__init__(**kwargs)
self.bounds = bounds
self.reltol = reltol
def __call__(self, expr):
return expr.factor().replace(self.query, lambda arg: self.value(arg))
def query(self, expr):
return expr.is_Add
def value(self, add):
for term in add.args:
if term.is_number or term in self.bounds or len(term.free_symbols) != 1:
continue
fs, = term.free_symbols
if fs not in self.bounds:
continue
intrvl = Interval(*self.bounds[fs])
if is_increasing(term, intrvl, fs):
self.bounds[term] = (
term.subs({fs: self.bounds[fs][0]}),
term.subs({fs: self.bounds[fs][1]})
)
elif is_decreasing(term, intrvl, fs):
self.bounds[term] = (
term.subs({fs: self.bounds[fs][1]}),
term.subs({fs: self.bounds[fs][0]})
)
else:
return add
if all(term.is_number or term in self.bounds for term in add.args):
bounds = [(term, term) if term.is_number else self.bounds[term] for term in add.args]
largest_abs_guarantee = 0
for lo, hi in bounds:
if lo <= 0 <= hi:
continue
largest_abs_guarantee = max(largest_abs_guarantee,
min(abs(lo), abs(hi)))
new_terms = []
for term, (lo, hi) in zip(add.args, bounds):
if max(abs(lo), abs(hi)) >= largest_abs_guarantee*self.reltol:
new_terms.append(term)
return add.func(*new_terms)
else:
return add
class SeriesApprox(Optimization):
""" Approximates functions by expanding them as a series.
Parameters
==========
bounds : dict
Mapping expressions to length 2 tuple of bounds (low, high).
reltol : number
Threshold for when to ignore a term. Taken relative to the largest
lower bound among bounds.
max_order : int
Largest order to include in series expansion
n_point_checks : int (even)
The validity of an expansion (with respect to reltol) is checked at
discrete points (linearly spaced over the bounds of the variable). The
number of points used in this numerical check is given by this number.
Examples
========
>>> from sympy import sin, pi
>>> from sympy.abc import x, y
>>> from sympy.codegen.rewriting import optimize
>>> from sympy.codegen.approximations import SeriesApprox
>>> bounds = {x: (-.1, .1), y: (pi-1, pi+1)}
>>> series_approx2 = SeriesApprox(bounds, reltol=1e-2)
>>> series_approx3 = SeriesApprox(bounds, reltol=1e-3)
>>> series_approx8 = SeriesApprox(bounds, reltol=1e-8)
>>> expr = sin(x)*sin(y)
>>> optimize(expr, [series_approx2])
x*(-y + (y - pi)**3/6 + pi)
>>> optimize(expr, [series_approx3])
(-x**3/6 + x)*sin(y)
>>> optimize(expr, [series_approx8])
sin(x)*sin(y)
"""
def __init__(self, bounds, reltol, max_order=4, n_point_checks=4, **kwargs):
super().__init__(**kwargs)
self.bounds = bounds
self.reltol = reltol
self.max_order = max_order
if n_point_checks % 2 == 1:
raise ValueError("Checking the solution at expansion point is not helpful")
self.n_point_checks = n_point_checks
self._prec = math.ceil(-math.log10(self.reltol))
def __call__(self, expr):
return expr.factor().replace(self.query, lambda arg: self.value(arg))
def query(self, expr):
return (expr.is_Function and not isinstance(expr, UndefinedFunction)
and len(expr.args) == 1)
def value(self, fexpr):
free_symbols = fexpr.free_symbols
if len(free_symbols) != 1:
return fexpr
symb, = free_symbols
if symb not in self.bounds:
return fexpr
lo, hi = self.bounds[symb]
x0 = (lo + hi)/2
cheapest = None
for n in range(self.max_order+1, 0, -1):
fseri = fexpr.series(symb, x0=x0, n=n).removeO()
n_ok = True
for idx in range(self.n_point_checks):
x = lo + idx*(hi - lo)/(self.n_point_checks - 1)
val = fseri.xreplace({symb: x})
ref = fexpr.xreplace({symb: x})
if abs((1 - val/ref).evalf(self._prec)) > self.reltol:
n_ok = False
break
if n_ok:
cheapest = fseri
else:
break
if cheapest is None:
return fexpr
else:
return cheapest
|
1906f2d917dd5a3dd910d031a7d53b9c641356260b3918a99ec433fa104cdc3e | """
AST nodes specific to Fortran.
The functions defined in this module allows the user to express functions such as ``dsign``
as a SymPy function for symbolic manipulation.
"""
from sympy.codegen.ast import (
Attribute, CodeBlock, FunctionCall, Node, none, String,
Token, _mk_Tuple, Variable
)
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import Function
from sympy.core.numbers import Float, Integer
from sympy.core.sympify import sympify
from sympy.logic import true, false
from sympy.utilities.iterables import iterable
pure = Attribute('pure')
elemental = Attribute('elemental') # (all elemental procedures are also pure)
intent_in = Attribute('intent_in')
intent_out = Attribute('intent_out')
intent_inout = Attribute('intent_inout')
allocatable = Attribute('allocatable')
class Program(Token):
""" Represents a 'program' block in Fortran.
Examples
========
>>> from sympy.codegen.ast import Print
>>> from sympy.codegen.fnodes import Program
>>> prog = Program('myprogram', [Print([42])])
>>> from sympy import fcode
>>> print(fcode(prog, source_format='free'))
program myprogram
print *, 42
end program
"""
__slots__ = ('name', 'body')
_construct_name = String
_construct_body = staticmethod(lambda body: CodeBlock(*body))
class use_rename(Token):
""" Represents a renaming in a use statement in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import use_rename, use
>>> from sympy import fcode
>>> ren = use_rename("thingy", "convolution2d")
>>> print(fcode(ren, source_format='free'))
thingy => convolution2d
>>> full = use('signallib', only=['snr', ren])
>>> print(fcode(full, source_format='free'))
use signallib, only: snr, thingy => convolution2d
"""
__slots__ = ('local', 'original')
_construct_local = String
_construct_original = String
def _name(arg):
if hasattr(arg, 'name'):
return arg.name
else:
return String(arg)
class use(Token):
""" Represents a use statement in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import use
>>> from sympy import fcode
>>> fcode(use('signallib'), source_format='free')
'use signallib'
>>> fcode(use('signallib', [('metric', 'snr')]), source_format='free')
'use signallib, metric => snr'
>>> fcode(use('signallib', only=['snr', 'convolution2d']), source_format='free')
'use signallib, only: snr, convolution2d'
"""
__slots__ = ('namespace', 'rename', 'only')
defaults = {'rename': none, 'only': none}
_construct_namespace = staticmethod(_name)
_construct_rename = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else use_rename(*arg) for arg in args]))
_construct_only = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else _name(arg) for arg in args]))
class Module(Token):
""" Represents a module in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import Module
>>> from sympy import fcode
>>> print(fcode(Module('signallib', ['implicit none'], []), source_format='free'))
module signallib
implicit none
<BLANKLINE>
contains
<BLANKLINE>
<BLANKLINE>
end module
"""
__slots__ = ('name', 'declarations', 'definitions')
defaults = {'declarations': Tuple()}
_construct_name = String
_construct_declarations = staticmethod(lambda arg: CodeBlock(*arg))
_construct_definitions = staticmethod(lambda arg: CodeBlock(*arg))
class Subroutine(Node):
""" Represents a subroutine in Fortran.
Examples
========
>>> from sympy import fcode, symbols
>>> from sympy.codegen.ast import Print
>>> from sympy.codegen.fnodes import Subroutine
>>> x, y = symbols('x y', real=True)
>>> sub = Subroutine('mysub', [x, y], [Print([x**2 + y**2, x*y])])
>>> print(fcode(sub, source_format='free', standard=2003))
subroutine mysub(x, y)
real*8 :: x
real*8 :: y
print *, x**2 + y**2, x*y
end subroutine
"""
__slots__ = ('name', 'parameters', 'body', 'attrs')
_construct_name = String
_construct_parameters = staticmethod(lambda params: Tuple(*map(Variable.deduced, params)))
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class SubroutineCall(Token):
""" Represents a call to a subroutine in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import SubroutineCall
>>> from sympy import fcode
>>> fcode(SubroutineCall('mysub', 'x y'.split()))
' call mysub(x, y)'
"""
__slots__ = ('name', 'subroutine_args')
_construct_name = staticmethod(_name)
_construct_subroutine_args = staticmethod(_mk_Tuple)
class Do(Token):
""" Represents a Do loop in in Fortran.
Examples
========
>>> from sympy import fcode, symbols
>>> from sympy.codegen.ast import aug_assign, Print
>>> from sympy.codegen.fnodes import Do
>>> i, n = symbols('i n', integer=True)
>>> r = symbols('r', real=True)
>>> body = [aug_assign(r, '+', 1/i), Print([i, r])]
>>> do1 = Do(body, i, 1, n)
>>> print(fcode(do1, source_format='free'))
do i = 1, n
r = r + 1d0/i
print *, i, r
end do
>>> do2 = Do(body, i, 1, n, 2)
>>> print(fcode(do2, source_format='free'))
do i = 1, n, 2
r = r + 1d0/i
print *, i, r
end do
"""
__slots__ = ('body', 'counter', 'first', 'last', 'step', 'concurrent')
defaults = {'step': Integer(1), 'concurrent': false}
_construct_body = staticmethod(lambda body: CodeBlock(*body))
_construct_counter = staticmethod(sympify)
_construct_first = staticmethod(sympify)
_construct_last = staticmethod(sympify)
_construct_step = staticmethod(sympify)
_construct_concurrent = staticmethod(lambda arg: true if arg else false)
class ArrayConstructor(Token):
""" Represents an array constructor.
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import ArrayConstructor
>>> ac = ArrayConstructor([1, 2, 3])
>>> fcode(ac, standard=95, source_format='free')
'(/1, 2, 3/)'
>>> fcode(ac, standard=2003, source_format='free')
'[1, 2, 3]'
"""
__slots__ = ('elements',)
_construct_elements = staticmethod(_mk_Tuple)
class ImpliedDoLoop(Token):
""" Represents an implied do loop in Fortran.
Examples
========
>>> from sympy import Symbol, fcode
>>> from sympy.codegen.fnodes import ImpliedDoLoop, ArrayConstructor
>>> i = Symbol('i', integer=True)
>>> idl = ImpliedDoLoop(i**3, i, -3, 3, 2) # -27, -1, 1, 27
>>> ac = ArrayConstructor([-28, idl, 28]) # -28, -27, -1, 1, 27, 28
>>> fcode(ac, standard=2003, source_format='free')
'[-28, (i**3, i = -3, 3, 2), 28]'
"""
__slots__ = ('expr', 'counter', 'first', 'last', 'step')
defaults = {'step': Integer(1)}
_construct_expr = staticmethod(sympify)
_construct_counter = staticmethod(sympify)
_construct_first = staticmethod(sympify)
_construct_last = staticmethod(sympify)
_construct_step = staticmethod(sympify)
class Extent(Basic):
""" Represents a dimension extent.
Examples
========
>>> from sympy.codegen.fnodes import Extent
>>> e = Extent(-3, 3) # -3, -2, -1, 0, 1, 2, 3
>>> from sympy import fcode
>>> fcode(e, source_format='free')
'-3:3'
>>> from sympy.codegen.ast import Variable, real
>>> from sympy.codegen.fnodes import dimension, intent_out
>>> dim = dimension(e, e)
>>> arr = Variable('x', real, attrs=[dim, intent_out])
>>> fcode(arr.as_Declaration(), source_format='free', standard=2003)
'real*8, dimension(-3:3, -3:3), intent(out) :: x'
"""
def __new__(cls, *args):
if len(args) == 2:
low, high = args
return Basic.__new__(cls, sympify(low), sympify(high))
elif len(args) == 0 or (len(args) == 1 and args[0] in (':', None)):
return Basic.__new__(cls) # assumed shape
else:
raise ValueError("Expected 0 or 2 args (or one argument == None or ':')")
def _sympystr(self, printer):
if len(self.args) == 0:
return ':'
return '%d:%d' % self.args
assumed_extent = Extent() # or Extent(':'), Extent(None)
def dimension(*args):
""" Creates a 'dimension' Attribute with (up to 7) extents.
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import dimension, intent_in
>>> dim = dimension('2', ':') # 2 rows, runtime determined number of columns
>>> from sympy.codegen.ast import Variable, integer
>>> arr = Variable('a', integer, attrs=[dim, intent_in])
>>> fcode(arr.as_Declaration(), source_format='free', standard=2003)
'integer*4, dimension(2, :), intent(in) :: a'
"""
if len(args) > 7:
raise ValueError("Fortran only supports up to 7 dimensional arrays")
parameters = []
for arg in args:
if isinstance(arg, Extent):
parameters.append(arg)
elif isinstance(arg, str):
if arg == ':':
parameters.append(Extent())
else:
parameters.append(String(arg))
elif iterable(arg):
parameters.append(Extent(*arg))
else:
parameters.append(sympify(arg))
if len(args) == 0:
raise ValueError("Need at least one dimension")
return Attribute('dimension', parameters)
assumed_size = dimension('*')
def array(symbol, dim, intent=None, *, attrs=(), value=None, type=None):
""" Convenience function for creating a Variable instance for a Fortran array.
Parameters
==========
symbol : symbol
dim : Attribute or iterable
If dim is an ``Attribute`` it need to have the name 'dimension'. If it is
not an ``Attribute``, then it is passsed to :func:`dimension` as ``*dim``
intent : str
One of: 'in', 'out', 'inout' or None
\\*\\*kwargs:
Keyword arguments for ``Variable`` ('type' & 'value')
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.ast import integer, real
>>> from sympy.codegen.fnodes import array
>>> arr = array('a', '*', 'in', type=integer)
>>> print(fcode(arr.as_Declaration(), source_format='free', standard=2003))
integer*4, dimension(*), intent(in) :: a
>>> x = array('x', [3, ':', ':'], intent='out', type=real)
>>> print(fcode(x.as_Declaration(value=1), source_format='free', standard=2003))
real*8, dimension(3, :, :), intent(out) :: x = 1
"""
if isinstance(dim, Attribute):
if str(dim.name) != 'dimension':
raise ValueError("Got an unexpected Attribute argument as dim: %s" % str(dim))
else:
dim = dimension(*dim)
attrs = list(attrs) + [dim]
if intent is not None:
if intent not in (intent_in, intent_out, intent_inout):
intent = {'in': intent_in, 'out': intent_out, 'inout': intent_inout}[intent]
attrs.append(intent)
if type is None:
return Variable.deduced(symbol, value=value, attrs=attrs)
else:
return Variable(symbol, type, value=value, attrs=attrs)
def _printable(arg):
return String(arg) if isinstance(arg, str) else sympify(arg)
def allocated(array):
""" Creates an AST node for a function call to Fortran's "allocated(...)"
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import allocated
>>> alloc = allocated('x')
>>> fcode(alloc, source_format='free')
'allocated(x)'
"""
return FunctionCall('allocated', [_printable(array)])
def lbound(array, dim=None, kind=None):
""" Creates an AST node for a function call to Fortran's "lbound(...)"
Parameters
==========
array : Symbol or String
dim : expr
kind : expr
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import lbound
>>> lb = lbound('arr', dim=2)
>>> fcode(lb, source_format='free')
'lbound(arr, 2)'
"""
return FunctionCall(
'lbound',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def ubound(array, dim=None, kind=None):
return FunctionCall(
'ubound',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def shape(source, kind=None):
""" Creates an AST node for a function call to Fortran's "shape(...)"
Parameters
==========
source : Symbol or String
kind : expr
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import shape
>>> shp = shape('x')
>>> fcode(shp, source_format='free')
'shape(x)'
"""
return FunctionCall(
'shape',
[_printable(source)] +
([_printable(kind)] if kind else [])
)
def size(array, dim=None, kind=None):
""" Creates an AST node for a function call to Fortran's "size(...)"
Examples
========
>>> from sympy import fcode, Symbol
>>> from sympy.codegen.ast import FunctionDefinition, real, Return
>>> from sympy.codegen.fnodes import array, sum_, size
>>> a = Symbol('a', real=True)
>>> body = [Return((sum_(a**2)/size(a))**.5)]
>>> arr = array(a, dim=[':'], intent='in')
>>> fd = FunctionDefinition(real, 'rms', [arr], body)
>>> print(fcode(fd, source_format='free', standard=2003))
real*8 function rms(a)
real*8, dimension(:), intent(in) :: a
rms = sqrt(sum(a**2)*1d0/size(a))
end function
"""
return FunctionCall(
'size',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def reshape(source, shape, pad=None, order=None):
""" Creates an AST node for a function call to Fortran's "reshape(...)"
Parameters
==========
source : Symbol or String
shape : ArrayExpr
"""
return FunctionCall(
'reshape',
[_printable(source), _printable(shape)] +
([_printable(pad)] if pad else []) +
([_printable(order)] if pad else [])
)
def bind_C(name=None):
""" Creates an Attribute ``bind_C`` with a name.
Parameters
==========
name : str
Examples
========
>>> from sympy import fcode, Symbol
>>> from sympy.codegen.ast import FunctionDefinition, real, Return
>>> from sympy.codegen.fnodes import array, sum_, bind_C
>>> a = Symbol('a', real=True)
>>> s = Symbol('s', integer=True)
>>> arr = array(a, dim=[s], intent='in')
>>> body = [Return((sum_(a**2)/s)**.5)]
>>> fd = FunctionDefinition(real, 'rms', [arr, s], body, attrs=[bind_C('rms')])
>>> print(fcode(fd, source_format='free', standard=2003))
real*8 function rms(a, s) bind(C, name="rms")
real*8, dimension(s), intent(in) :: a
integer*4 :: s
rms = sqrt(sum(a**2)/s)
end function
"""
return Attribute('bind_C', [String(name)] if name else [])
class GoTo(Token):
""" Represents a goto statement in Fortran
Examples
========
>>> from sympy.codegen.fnodes import GoTo
>>> go = GoTo([10, 20, 30], 'i')
>>> from sympy import fcode
>>> fcode(go, source_format='free')
'go to (10, 20, 30), i'
"""
__slots__ = ('labels', 'expr')
defaults = {'expr': none}
_construct_labels = staticmethod(_mk_Tuple)
_construct_expr = staticmethod(sympify)
class FortranReturn(Token):
""" AST node explicitly mapped to a fortran "return".
Explanation
===========
Because a return statement in fortran is different from C, and
in order to aid reuse of our codegen ASTs the ordinary
``.codegen.ast.Return`` is interpreted as assignment to
the result variable of the function. If one for some reason needs
to generate a fortran RETURN statement, this node should be used.
Examples
========
>>> from sympy.codegen.fnodes import FortranReturn
>>> from sympy import fcode
>>> fcode(FortranReturn('x'))
' return x'
"""
__slots__ = ('return_value',)
defaults = {'return_value': none}
_construct_return_value = staticmethod(sympify)
class FFunction(Function):
_required_standard = 77
def _fcode(self, printer):
name = self.__class__.__name__
if printer._settings['standard'] < self._required_standard:
raise NotImplementedError("%s requires Fortran %d or newer" %
(name, self._required_standard))
return '{}({})'.format(name, ', '.join(map(printer._print, self.args)))
class F95Function(FFunction):
_required_standard = 95
class isign(FFunction):
""" Fortran sign intrinsic for integer arguments. """
nargs = 2
class dsign(FFunction):
""" Fortran sign intrinsic for double precision arguments. """
nargs = 2
class cmplx(FFunction):
""" Fortran complex conversion function. """
nargs = 2 # may be extended to (2, 3) at a later point
class kind(FFunction):
""" Fortran kind function. """
nargs = 1
class merge(F95Function):
""" Fortran merge function """
nargs = 3
class _literal(Float):
_token = None # type: str
_decimals = None # type: int
def _fcode(self, printer, *args, **kwargs):
mantissa, sgnd_ex = ('%.{}e'.format(self._decimals) % self).split('e')
mantissa = mantissa.strip('0').rstrip('.')
ex_sgn, ex_num = sgnd_ex[0], sgnd_ex[1:].lstrip('0')
ex_sgn = '' if ex_sgn == '+' else ex_sgn
return (mantissa or '0') + self._token + ex_sgn + (ex_num or '0')
class literal_sp(_literal):
""" Fortran single precision real literal """
_token = 'e'
_decimals = 9
class literal_dp(_literal):
""" Fortran double precision real literal """
_token = 'd'
_decimals = 17
class sum_(Token, Expr):
__slots__ = ('array', 'dim', 'mask')
defaults = {'dim': none, 'mask': none}
_construct_array = staticmethod(sympify)
_construct_dim = staticmethod(sympify)
class product_(Token, Expr):
__slots__ = ('array', 'dim', 'mask')
defaults = {'dim': none, 'mask': none}
_construct_array = staticmethod(sympify)
_construct_dim = staticmethod(sympify)
|
8885997a9a6fcf2d6d5f555f11fb209161e667ca59b422c58abfb4432e7f96dc | from sympy.core.function import Add, ArgumentIndexError, Function
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import default_sort_key
from sympy.functions.elementary.exponential import exp, log
def _logaddexp(x1, x2, *, evaluate=True):
return log(Add(exp(x1, evaluate=evaluate), exp(x2, evaluate=evaluate), evaluate=evaluate))
_two = S.One*2
_ln2 = log(_two)
def _lb(x, *, evaluate=True):
return log(x, evaluate=evaluate)/_ln2
def _exp2(x, *, evaluate=True):
return Pow(_two, x, evaluate=evaluate)
def _logaddexp2(x1, x2, *, evaluate=True):
return _lb(Add(_exp2(x1, evaluate=evaluate),
_exp2(x2, evaluate=evaluate), evaluate=evaluate))
class logaddexp(Function):
""" Logarithm of the sum of exponentiations of the inputs.
Helper class for use with e.g. numpy.logaddexp
See Also
========
https://numpy.org/doc/stable/reference/generated/numpy.logaddexp.html
"""
nargs = 2
def __new__(cls, *args):
return Function.__new__(cls, *sorted(args, key=default_sort_key))
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
wrt, other = self.args
elif argindex == 2:
other, wrt = self.args
else:
raise ArgumentIndexError(self, argindex)
return S.One/(S.One + exp(other-wrt))
def _eval_rewrite_as_log(self, x1, x2, **kwargs):
return _logaddexp(x1, x2)
def _eval_evalf(self, *args, **kwargs):
return self.rewrite(log).evalf(*args, **kwargs)
def _eval_simplify(self, *args, **kwargs):
a, b = map(lambda x: x.simplify(**kwargs), self.args)
candidate = _logaddexp(a, b)
if candidate != _logaddexp(a, b, evaluate=False):
return candidate
else:
return logaddexp(a, b)
class logaddexp2(Function):
""" Logarithm of the sum of exponentiations of the inputs in base-2.
Helper class for use with e.g. numpy.logaddexp2
See Also
========
https://numpy.org/doc/stable/reference/generated/numpy.logaddexp2.html
"""
nargs = 2
def __new__(cls, *args):
return Function.__new__(cls, *sorted(args, key=default_sort_key))
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
wrt, other = self.args
elif argindex == 2:
other, wrt = self.args
else:
raise ArgumentIndexError(self, argindex)
return S.One/(S.One + _exp2(other-wrt))
def _eval_rewrite_as_log(self, x1, x2, **kwargs):
return _logaddexp2(x1, x2)
def _eval_evalf(self, *args, **kwargs):
return self.rewrite(log).evalf(*args, **kwargs)
def _eval_simplify(self, *args, **kwargs):
a, b = map(lambda x: x.simplify(**kwargs).factor(), self.args)
candidate = _logaddexp2(a, b)
if candidate != _logaddexp2(a, b, evaluate=False):
return candidate
else:
return logaddexp2(a, b)
|
fe2b12401cd0ce7d712a462160417b9d52b5b026bb9fbf10a3902809e36cf1e5 | from sympy.core.containers import Tuple
from sympy.core.numbers import oo
from sympy.core.relational import (Gt, Lt)
from sympy.core.symbol import (Dummy, Symbol)
from sympy.functions.elementary.complexes import Abs
from sympy.logic.boolalg import And
from sympy.codegen.ast import (
Assignment, AddAugmentedAssignment, CodeBlock, Declaration, FunctionDefinition,
Print, Return, Scope, While, Variable, Pointer, real
)
""" This module collects functions for constructing ASTs representing algorithms. """
def newtons_method(expr, wrt, atol=1e-12, delta=None, debug=False,
itermax=None, counter=None):
""" Generates an AST for Newton-Raphson method (a root-finding algorithm).
Explanation
===========
Returns an abstract syntax tree (AST) based on ``sympy.codegen.ast`` for Netwon's
method of root-finding.
Parameters
==========
expr : expression
wrt : Symbol
With respect to, i.e. what is the variable.
atol : number or expr
Absolute tolerance (stopping criterion)
delta : Symbol
Will be a ``Dummy`` if ``None``.
debug : bool
Whether to print convergence information during iterations
itermax : number or expr
Maximum number of iterations.
counter : Symbol
Will be a ``Dummy`` if ``None``.
Examples
========
>>> from sympy import symbols, cos
>>> from sympy.codegen.ast import Assignment
>>> from sympy.codegen.algorithms import newtons_method
>>> x, dx, atol = symbols('x dx atol')
>>> expr = cos(x) - x**3
>>> algo = newtons_method(expr, x, atol, dx)
>>> algo.has(Assignment(dx, -expr/expr.diff(x)))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Newton%27s_method
"""
if delta is None:
delta = Dummy()
Wrapper = Scope
name_d = 'delta'
else:
Wrapper = lambda x: x
name_d = delta.name
delta_expr = -expr/expr.diff(wrt)
whl_bdy = [Assignment(delta, delta_expr), AddAugmentedAssignment(wrt, delta)]
if debug:
prnt = Print([wrt, delta], r"{}=%12.5g {}=%12.5g\n".format(wrt.name, name_d))
whl_bdy = [whl_bdy[0], prnt] + whl_bdy[1:]
req = Gt(Abs(delta), atol)
declars = [Declaration(Variable(delta, type=real, value=oo))]
if itermax is not None:
counter = counter or Dummy(integer=True)
v_counter = Variable.deduced(counter, 0)
declars.append(Declaration(v_counter))
whl_bdy.append(AddAugmentedAssignment(counter, 1))
req = And(req, Lt(counter, itermax))
whl = While(req, CodeBlock(*whl_bdy))
blck = declars + [whl]
return Wrapper(CodeBlock(*blck))
def _symbol_of(arg):
if isinstance(arg, Declaration):
arg = arg.variable.symbol
elif isinstance(arg, Variable):
arg = arg.symbol
return arg
def newtons_method_function(expr, wrt, params=None, func_name="newton", attrs=Tuple(), *, delta=None, **kwargs):
""" Generates an AST for a function implementing the Newton-Raphson method.
Parameters
==========
expr : expression
wrt : Symbol
With respect to, i.e. what is the variable
params : iterable of symbols
Symbols appearing in expr that are taken as constants during the iterations
(these will be accepted as parameters to the generated function).
func_name : str
Name of the generated function.
attrs : Tuple
Attribute instances passed as ``attrs`` to ``FunctionDefinition``.
\\*\\*kwargs :
Keyword arguments passed to :func:`sympy.codegen.algorithms.newtons_method`.
Examples
========
>>> from sympy import symbols, cos
>>> from sympy.codegen.algorithms import newtons_method_function
>>> from sympy.codegen.pyutils import render_as_module
>>> x = symbols('x')
>>> expr = cos(x) - x**3
>>> func = newtons_method_function(expr, x)
>>> py_mod = render_as_module(func) # source code as string
>>> namespace = {}
>>> exec(py_mod, namespace, namespace)
>>> res = eval('newton(0.5)', namespace)
>>> abs(res - 0.865474033102) < 1e-12
True
See Also
========
sympy.codegen.algorithms.newtons_method
"""
if params is None:
params = (wrt,)
pointer_subs = {p.symbol: Symbol('(*%s)' % p.symbol.name)
for p in params if isinstance(p, Pointer)}
if delta is None:
delta = Symbol('d_' + wrt.name)
if expr.has(delta):
delta = None # will use Dummy
algo = newtons_method(expr, wrt, delta=delta, **kwargs).xreplace(pointer_subs)
if isinstance(algo, Scope):
algo = algo.body
not_in_params = expr.free_symbols.difference({_symbol_of(p) for p in params})
if not_in_params:
raise ValueError("Missing symbols in params: %s" % ', '.join(map(str, not_in_params)))
declars = tuple(Variable(p, real) for p in params)
body = CodeBlock(algo, Return(wrt))
return FunctionDefinition(real, func_name, declars, body, attrs=attrs)
|
68d83eb67e8d337a66d24a9ca0d8e0c7b2d2f27ec092b91b16153549c363e1d4 | from sympy.core import Tuple
class List(Tuple):
"""Represents a (frozen) (Python) list (for code printing purposes)."""
def __eq__(self, other):
if isinstance(other, list):
return self == List(*other)
else:
return self.args == other
|
eacd5bcba5fd8ba60d4ee0a078ef9529bba0beeeb03daa32877e9263b0122cdc | """
Classes and functions useful for rewriting expressions for optimized code
generation. Some languages (or standards thereof), e.g. C99, offer specialized
math functions for better performance and/or precision.
Using the ``optimize`` function in this module, together with a collection of
rules (represented as instances of ``Optimization``), one can rewrite the
expressions for this purpose::
>>> from sympy import Symbol, exp, log
>>> from sympy.codegen.rewriting import optimize, optims_c99
>>> x = Symbol('x')
>>> optimize(3*exp(2*x) - 3, optims_c99)
3*expm1(2*x)
>>> optimize(exp(2*x) - 1 - exp(-33), optims_c99)
expm1(2*x) - exp(-33)
>>> optimize(log(3*x + 3), optims_c99)
log1p(x) + log(3)
>>> optimize(log(2*x + 3), optims_c99)
log(2*x + 3)
The ``optims_c99`` imported above is tuple containing the following instances
(which may be imported from ``sympy.codegen.rewriting``):
- ``expm1_opt``
- ``log1p_opt``
- ``exp2_opt``
- ``log2_opt``
- ``log2const_opt``
"""
from sympy.core.function import expand_log
from sympy.core.singleton import S
from sympy.core.symbol import Wild
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.miscellaneous import (Max, Min)
from sympy.functions.elementary.trigonometric import (cos, sin, sinc)
from sympy.assumptions import Q, ask
from sympy.codegen.cfunctions import log1p, log2, exp2, expm1
from sympy.codegen.matrix_nodes import MatrixSolve
from sympy.core.expr import UnevaluatedExpr
from sympy.core.power import Pow
from sympy.codegen.numpy_nodes import logaddexp, logaddexp2
from sympy.codegen.scipy_nodes import cosm1
from sympy.core.mul import Mul
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.utilities.iterables import sift
class Optimization:
""" Abstract base class for rewriting optimization.
Subclasses should implement ``__call__`` taking an expression
as argument.
Parameters
==========
cost_function : callable returning number
priority : number
"""
def __init__(self, cost_function=None, priority=1):
self.cost_function = cost_function
self.priority=priority
def cheapest(self, *args):
return sorted(args, key=self.cost_function)[0]
class ReplaceOptim(Optimization):
""" Rewriting optimization calling replace on expressions.
Explanation
===========
The instance can be used as a function on expressions for which
it will apply the ``replace`` method (see
:meth:`sympy.core.basic.Basic.replace`).
Parameters
==========
query :
First argument passed to replace.
value :
Second argument passed to replace.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.rewriting import ReplaceOptim
>>> from sympy.codegen.cfunctions import exp2
>>> x = Symbol('x')
>>> exp2_opt = ReplaceOptim(lambda p: p.is_Pow and p.base == 2,
... lambda p: exp2(p.exp))
>>> exp2_opt(2**x)
exp2(x)
"""
def __init__(self, query, value, **kwargs):
super().__init__(**kwargs)
self.query = query
self.value = value
def __call__(self, expr):
return expr.replace(self.query, self.value)
def optimize(expr, optimizations):
""" Apply optimizations to an expression.
Parameters
==========
expr : expression
optimizations : iterable of ``Optimization`` instances
The optimizations will be sorted with respect to ``priority`` (highest first).
Examples
========
>>> from sympy import log, Symbol
>>> from sympy.codegen.rewriting import optims_c99, optimize
>>> x = Symbol('x')
>>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99)
log1p(x**2) + log2(x + 3)
"""
for optim in sorted(optimizations, key=lambda opt: opt.priority, reverse=True):
new_expr = optim(expr)
if optim.cost_function is None:
expr = new_expr
else:
expr = optim.cheapest(expr, new_expr)
return expr
exp2_opt = ReplaceOptim(
lambda p: p.is_Pow and p.base == 2,
lambda p: exp2(p.exp)
)
_d = Wild('d', properties=[lambda x: x.is_Dummy])
_u = Wild('u', properties=[lambda x: not x.is_number and not x.is_Add])
_v = Wild('v')
_w = Wild('w')
_n = Wild('n', properties=[lambda x: x.is_number])
sinc_opt1 = ReplaceOptim(
sin(_w)/_w, sinc(_w)
)
sinc_opt2 = ReplaceOptim(
sin(_n*_w)/_w, _n*sinc(_n*_w)
)
sinc_opts = (sinc_opt1, sinc_opt2)
log2_opt = ReplaceOptim(_v*log(_w)/log(2), _v*log2(_w), cost_function=lambda expr: expr.count(
lambda e: ( # division & eval of transcendentals are expensive floating point operations...
e.is_Pow and e.exp.is_negative # division
or (isinstance(e, (log, log2)) and not e.args[0].is_number)) # transcendental
)
)
log2const_opt = ReplaceOptim(log(2)*log2(_w), log(_w))
logsumexp_2terms_opt = ReplaceOptim(
lambda l: (isinstance(l, log)
and l.args[0].is_Add
and len(l.args[0].args) == 2
and all(isinstance(t, exp) for t in l.args[0].args)),
lambda l: (
Max(*[e.args[0] for e in l.args[0].args]) +
log1p(exp(Min(*[e.args[0] for e in l.args[0].args])))
)
)
class FuncMinusOneOptim(ReplaceOptim):
"""Specialization of ReplaceOptim for functions evaluating "f(x) - 1".
Explanation
===========
Numerical functions which go toward one as x go toward zero is often best
implemented by a dedicated function in order to avoid catastrophic
cancellation. One such example is ``expm1(x)`` in the C standard library
which evaluates ``exp(x) - 1``. Such functions preserves many more
significant digits when its argument is much smaller than one, compared
to subtracting one afterwards.
Parameters
==========
func :
The function which is subtracted by one.
func_m_1 :
The specialized function evaluating ``func(x) - 1``.
opportunistic : bool
When ``True``, apply the transformation as long as the magnitude of the
remaining number terms decreases. When ``False``, only apply the
transformation if it completely eliminates the number term.
Examples
========
>>> from sympy import symbols, exp
>>> from sympy.codegen.rewriting import FuncMinusOneOptim
>>> from sympy.codegen.cfunctions import expm1
>>> x, y = symbols('x y')
>>> expm1_opt = FuncMinusOneOptim(exp, expm1)
>>> expm1_opt(exp(x) + 2*exp(5*y) - 3)
expm1(x) + 2*expm1(5*y)
"""
def __init__(self, func, func_m_1, opportunistic=True):
weight = 10 # <-- this is an arbitrary number (heuristic)
super().__init__(lambda e: e.is_Add, self.replace_in_Add,
cost_function=lambda expr: expr.count_ops() - weight*expr.count(func_m_1))
self.func = func
self.func_m_1 = func_m_1
self.opportunistic = opportunistic
def _group_Add_terms(self, add):
numbers, non_num = sift(add.args, lambda arg: arg.is_number, binary=True)
numsum = sum(numbers)
terms_with_func, other = sift(non_num, lambda arg: arg.has(self.func), binary=True)
return numsum, terms_with_func, other
def replace_in_Add(self, e):
""" passed as second argument to Basic.replace(...) """
numsum, terms_with_func, other_non_num_terms = self._group_Add_terms(e)
if numsum == 0:
return e
substituted, untouched = [], []
for with_func in terms_with_func:
if with_func.is_Mul:
func, coeff = sift(with_func.args, lambda arg: arg.func == self.func, binary=True)
if len(func) == 1 and len(coeff) == 1:
func, coeff = func[0], coeff[0]
else:
coeff = None
elif with_func.func == self.func:
func, coeff = with_func, S.One
else:
coeff = None
if coeff is not None and coeff.is_number and sign(coeff) == -sign(numsum):
if self.opportunistic:
do_substitute = abs(coeff+numsum) < abs(numsum)
else:
do_substitute = coeff+numsum == 0
if do_substitute: # advantageous substitution
numsum += coeff
substituted.append(coeff*self.func_m_1(*func.args))
continue
untouched.append(with_func)
return e.func(numsum, *substituted, *untouched, *other_non_num_terms)
def __call__(self, expr):
alt1 = super().__call__(expr)
alt2 = super().__call__(expr.factor())
return self.cheapest(alt1, alt2)
expm1_opt = FuncMinusOneOptim(exp, expm1)
cosm1_opt = FuncMinusOneOptim(cos, cosm1)
log1p_opt = ReplaceOptim(
lambda e: isinstance(e, log),
lambda l: expand_log(l.replace(
log, lambda arg: log(arg.factor())
)).replace(log(_u+1), log1p(_u))
)
def create_expand_pow_optimization(limit, *, base_req=lambda b: b.is_symbol):
""" Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``.
Explanation
===========
The requirements for expansions are that the base needs to be a symbol
and the exponent needs to be an Integer (and be less than or equal to
``limit``).
Parameters
==========
limit : int
The highest power which is expanded into multiplication.
base_req : function returning bool
Requirement on base for expansion to happen, default is to return
the ``is_symbol`` attribute of the base.
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.codegen.rewriting import create_expand_pow_optimization
>>> x = Symbol('x')
>>> expand_opt = create_expand_pow_optimization(3)
>>> expand_opt(x**5 + x**3)
x**5 + x*x*x
>>> expand_opt(x**5 + x**3 + sin(x)**3)
x**5 + sin(x)**3 + x*x*x
>>> opt2 = create_expand_pow_optimization(3, base_req=lambda b: not b.is_Function)
>>> opt2((x+1)**2 + sin(x)**2)
sin(x)**2 + (x + 1)*(x + 1)
"""
return ReplaceOptim(
lambda e: e.is_Pow and base_req(e.base) and e.exp.is_Integer and abs(e.exp) <= limit,
lambda p: (
UnevaluatedExpr(Mul(*([p.base]*+p.exp), evaluate=False)) if p.exp > 0 else
1/UnevaluatedExpr(Mul(*([p.base]*-p.exp), evaluate=False))
))
# Optimization procedures for turning A**(-1) * x into MatrixSolve(A, x)
def _matinv_predicate(expr):
# TODO: We should be able to support more than 2 elements
if expr.is_MatMul and len(expr.args) == 2:
left, right = expr.args
if left.is_Inverse and right.shape[1] == 1:
inv_arg = left.arg
if isinstance(inv_arg, MatrixSymbol):
return bool(ask(Q.fullrank(left.arg)))
return False
def _matinv_transform(expr):
left, right = expr.args
inv_arg = left.arg
return MatrixSolve(inv_arg, right)
matinv_opt = ReplaceOptim(_matinv_predicate, _matinv_transform)
logaddexp_opt = ReplaceOptim(log(exp(_v)+exp(_w)), logaddexp(_v, _w))
logaddexp2_opt = ReplaceOptim(log(Pow(2, _v)+Pow(2, _w)), logaddexp2(_v, _w)*log(2))
# Collections of optimizations:
optims_c99 = (expm1_opt, log1p_opt, exp2_opt, log2_opt, log2const_opt)
optims_numpy = optims_c99 + (logaddexp_opt, logaddexp2_opt,) + sinc_opts
optims_scipy = (cosm1_opt,)
|
c0bab771489e82fdefb61503ec9c88a73cf263214182e61b977ed5f0e6bde961 | """
Types used to represent a full function/module as an Abstract Syntax Tree.
Most types are small, and are merely used as tokens in the AST. A tree diagram
has been included below to illustrate the relationships between the AST types.
AST Type Tree
-------------
::
*Basic*
|
|
CodegenAST
|
|--->AssignmentBase
| |--->Assignment
| |--->AugmentedAssignment
| |--->AddAugmentedAssignment
| |--->SubAugmentedAssignment
| |--->MulAugmentedAssignment
| |--->DivAugmentedAssignment
| |--->ModAugmentedAssignment
|
|--->CodeBlock
|
|
|--->Token
|--->Attribute
|--->For
|--->String
| |--->QuotedString
| |--->Comment
|--->Type
| |--->IntBaseType
| | |--->_SizedIntType
| | |--->SignedIntType
| | |--->UnsignedIntType
| |--->FloatBaseType
| |--->FloatType
| |--->ComplexBaseType
| |--->ComplexType
|--->Node
| |--->Variable
| | |---> Pointer
| |--->FunctionPrototype
| |--->FunctionDefinition
|--->Element
|--->Declaration
|--->While
|--->Scope
|--->Stream
|--->Print
|--->FunctionCall
|--->BreakToken
|--->ContinueToken
|--->NoneToken
|--->Return
Predefined types
----------------
A number of ``Type`` instances are provided in the ``sympy.codegen.ast`` module
for convenience. Perhaps the two most common ones for code-generation (of numeric
codes) are ``float32`` and ``float64`` (known as single and double precision respectively).
There are also precision generic versions of Types (for which the codeprinters selects the
underlying data type at time of printing): ``real``, ``integer``, ``complex_``, ``bool_``.
The other ``Type`` instances defined are:
- ``intc``: Integer type used by C's "int".
- ``intp``: Integer type used by C's "unsigned".
- ``int8``, ``int16``, ``int32``, ``int64``: n-bit integers.
- ``uint8``, ``uint16``, ``uint32``, ``uint64``: n-bit unsigned integers.
- ``float80``: known as "extended precision" on modern x86/amd64 hardware.
- ``complex64``: Complex number represented by two ``float32`` numbers
- ``complex128``: Complex number represented by two ``float64`` numbers
Using the nodes
---------------
It is possible to construct simple algorithms using the AST nodes. Let's construct a loop applying
Newton's method::
>>> from sympy import symbols, cos
>>> from sympy.codegen.ast import While, Assignment, aug_assign, Print
>>> t, dx, x = symbols('tol delta val')
>>> expr = cos(x) - x**3
>>> whl = While(abs(dx) > t, [
... Assignment(dx, -expr/expr.diff(x)),
... aug_assign(x, '+', dx),
... Print([x])
... ])
>>> from sympy import pycode
>>> py_str = pycode(whl)
>>> print(py_str)
while (abs(delta) > tol):
delta = (val**3 - math.cos(val))/(-3*val**2 - math.sin(val))
val += delta
print(val)
>>> import math
>>> tol, val, delta = 1e-5, 0.5, float('inf')
>>> exec(py_str)
1.1121416371
0.909672693737
0.867263818209
0.865477135298
0.865474033111
>>> print('%3.1g' % (math.cos(val) - val**3))
-3e-11
If we want to generate Fortran code for the same while loop we simple call ``fcode``::
>>> from sympy import fcode
>>> print(fcode(whl, standard=2003, source_format='free'))
do while (abs(delta) > tol)
delta = (val**3 - cos(val))/(-3*val**2 - sin(val))
val = val + delta
print *, val
end do
There is a function constructing a loop (or a complete function) like this in
:mod:`sympy.codegen.algorithms`.
"""
from typing import Any, Dict as tDict, List
from collections import defaultdict
from sympy.core.relational import (Ge, Gt, Le, Lt)
from sympy.core import Symbol, Tuple, Dummy
from sympy.core.basic import Basic
from sympy.core.expr import Expr
from sympy.core.numbers import Float, Integer, oo
from sympy.core.sympify import _sympify, sympify, SympifyError
from sympy.utilities.iterables import (iterable, topological_sort,
numbered_symbols, filter_symbols)
def _mk_Tuple(args):
"""
Create a SymPy Tuple object from an iterable, converting Python strings to
AST strings.
Parameters
==========
args: iterable
Arguments to :class:`sympy.Tuple`.
Returns
=======
sympy.Tuple
"""
args = [String(arg) if isinstance(arg, str) else arg for arg in args]
return Tuple(*args)
class CodegenAST(Basic):
pass
class Token(CodegenAST):
""" Base class for the AST types.
Explanation
===========
Defining fields are set in ``__slots__``. Attributes (defined in __slots__)
are only allowed to contain instances of Basic (unless atomic, see
``String``). The arguments to ``__new__()`` correspond to the attributes in
the order defined in ``__slots__`. The ``defaults`` class attribute is a
dictionary mapping attribute names to their default values.
Subclasses should not need to override the ``__new__()`` method. They may
define a class or static method named ``_construct_<attr>`` for each
attribute to process the value passed to ``__new__()``. Attributes listed
in the class attribute ``not_in_args`` are not passed to :class:`~.Basic`.
"""
__slots__ = ()
defaults = {} # type: tDict[str, Any]
not_in_args = [] # type: List[str]
indented_args = ['body']
@property
def is_Atom(self):
return len(self.__slots__) == 0
@classmethod
def _get_constructor(cls, attr):
""" Get the constructor function for an attribute by name. """
return getattr(cls, '_construct_%s' % attr, lambda x: x)
@classmethod
def _construct(cls, attr, arg):
""" Construct an attribute value from argument passed to ``__new__()``. """
# arg may be ``NoneToken()``, so comparation is done using == instead of ``is`` operator
if arg == None:
return cls.defaults.get(attr, none)
else:
if isinstance(arg, Dummy): # SymPy's replace uses Dummy instances
return arg
else:
return cls._get_constructor(attr)(arg)
def __new__(cls, *args, **kwargs):
# Pass through existing instances when given as sole argument
if len(args) == 1 and not kwargs and isinstance(args[0], cls):
return args[0]
if len(args) > len(cls.__slots__):
raise ValueError("Too many arguments (%d), expected at most %d" % (len(args), len(cls.__slots__)))
attrvals = []
# Process positional arguments
for attrname, argval in zip(cls.__slots__, args):
if attrname in kwargs:
raise TypeError('Got multiple values for attribute %r' % attrname)
attrvals.append(cls._construct(attrname, argval))
# Process keyword arguments
for attrname in cls.__slots__[len(args):]:
if attrname in kwargs:
argval = kwargs.pop(attrname)
elif attrname in cls.defaults:
argval = cls.defaults[attrname]
else:
raise TypeError('No value for %r given and attribute has no default' % attrname)
attrvals.append(cls._construct(attrname, argval))
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % ' '.join(kwargs))
# Parent constructor
basic_args = [
val for attr, val in zip(cls.__slots__, attrvals)
if attr not in cls.not_in_args
]
obj = CodegenAST.__new__(cls, *basic_args)
# Set attributes
for attr, arg in zip(cls.__slots__, attrvals):
setattr(obj, attr, arg)
return obj
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def _hashable_content(self):
return tuple([getattr(self, attr) for attr in self.__slots__])
def __hash__(self):
return super().__hash__()
def _joiner(self, k, indent_level):
return (',\n' + ' '*indent_level) if k in self.indented_args else ', '
def _indented(self, printer, k, v, *args, **kwargs):
il = printer._context['indent_level']
def _print(arg):
if isinstance(arg, Token):
return printer._print(arg, *args, joiner=self._joiner(k, il), **kwargs)
else:
return printer._print(arg, *args, **kwargs)
if isinstance(v, Tuple):
joined = self._joiner(k, il).join([_print(arg) for arg in v.args])
if k in self.indented_args:
return '(\n' + ' '*il + joined + ',\n' + ' '*(il - 4) + ')'
else:
return ('({0},)' if len(v.args) == 1 else '({0})').format(joined)
else:
return _print(v)
def _sympyrepr(self, printer, *args, joiner=', ', **kwargs):
from sympy.printing.printer import printer_context
exclude = kwargs.get('exclude', ())
values = [getattr(self, k) for k in self.__slots__]
indent_level = printer._context.get('indent_level', 0)
arg_reprs = []
for i, (attr, value) in enumerate(zip(self.__slots__, values)):
if attr in exclude:
continue
# Skip attributes which have the default value
if attr in self.defaults and value == self.defaults[attr]:
continue
ilvl = indent_level + 4 if attr in self.indented_args else 0
with printer_context(printer, indent_level=ilvl):
indented = self._indented(printer, attr, value, *args, **kwargs)
arg_reprs.append(('{1}' if i == 0 else '{0}={1}').format(attr, indented.lstrip()))
return "{}({})".format(self.__class__.__name__, joiner.join(arg_reprs))
_sympystr = _sympyrepr
def __repr__(self): # sympy.core.Basic.__repr__ uses sstr
from sympy.printing import srepr
return srepr(self)
def kwargs(self, exclude=(), apply=None):
""" Get instance's attributes as dict of keyword arguments.
Parameters
==========
exclude : collection of str
Collection of keywords to exclude.
apply : callable, optional
Function to apply to all values.
"""
kwargs = {k: getattr(self, k) for k in self.__slots__ if k not in exclude}
if apply is not None:
return {k: apply(v) for k, v in kwargs.items()}
else:
return kwargs
class BreakToken(Token):
""" Represents 'break' in C/Python ('exit' in Fortran).
Use the premade instance ``break_`` or instantiate manually.
Examples
========
>>> from sympy import ccode, fcode
>>> from sympy.codegen.ast import break_
>>> ccode(break_)
'break'
>>> fcode(break_, source_format='free')
'exit'
"""
break_ = BreakToken()
class ContinueToken(Token):
""" Represents 'continue' in C/Python ('cycle' in Fortran)
Use the premade instance ``continue_`` or instantiate manually.
Examples
========
>>> from sympy import ccode, fcode
>>> from sympy.codegen.ast import continue_
>>> ccode(continue_)
'continue'
>>> fcode(continue_, source_format='free')
'cycle'
"""
continue_ = ContinueToken()
class NoneToken(Token):
""" The AST equivalence of Python's NoneType
The corresponding instance of Python's ``None`` is ``none``.
Examples
========
>>> from sympy.codegen.ast import none, Variable
>>> from sympy import pycode
>>> print(pycode(Variable('x').as_Declaration(value=none)))
x = None
"""
def __eq__(self, other):
return other is None or isinstance(other, NoneToken)
def _hashable_content(self):
return ()
def __hash__(self):
return super().__hash__()
none = NoneToken()
class AssignmentBase(CodegenAST):
""" Abstract base class for Assignment and AugmentedAssignment.
Attributes:
===========
op : str
Symbol for assignment operator, e.g. "=", "+=", etc.
"""
def __new__(cls, lhs, rhs):
lhs = _sympify(lhs)
rhs = _sympify(rhs)
cls._check_args(lhs, rhs)
return super().__new__(cls, lhs, rhs)
@property
def lhs(self):
return self.args[0]
@property
def rhs(self):
return self.args[1]
@classmethod
def _check_args(cls, lhs, rhs):
""" Check arguments to __new__ and raise exception if any problems found.
Derived classes may wish to override this.
"""
from sympy.matrices.expressions.matexpr import (
MatrixElement, MatrixSymbol)
from sympy.tensor.indexed import Indexed
# Tuple of things that can be on the lhs of an assignment
assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed, Element, Variable)
if not isinstance(lhs, assignable):
raise TypeError("Cannot assign to lhs of type %s." % type(lhs))
# Indexed types implement shape, but don't define it until later. This
# causes issues in assignment validation. For now, matrices are defined
# as anything with a shape that is not an Indexed
lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed)
rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed)
# If lhs and rhs have same structure, then this assignment is ok
if lhs_is_mat:
if not rhs_is_mat:
raise ValueError("Cannot assign a scalar to a matrix.")
elif lhs.shape != rhs.shape:
raise ValueError("Dimensions of lhs and rhs do not align.")
elif rhs_is_mat and not lhs_is_mat:
raise ValueError("Cannot assign a matrix to a scalar.")
class Assignment(AssignmentBase):
"""
Represents variable assignment for code generation.
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols, MatrixSymbol, Matrix
>>> from sympy.codegen.ast import Assignment
>>> x, y, z = symbols('x, y, z')
>>> Assignment(x, y)
Assignment(x, y)
>>> Assignment(x, 0)
Assignment(x, 0)
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
Assignment(A, Matrix([[x, y, z]]))
>>> Assignment(A[0, 1], x)
Assignment(A[0, 1], x)
"""
op = ':='
class AugmentedAssignment(AssignmentBase):
"""
Base class for augmented assignments.
Attributes:
===========
binop : str
Symbol for binary operation being applied in the assignment, such as "+",
"*", etc.
"""
binop = None # type: str
@property
def op(self):
return self.binop + '='
class AddAugmentedAssignment(AugmentedAssignment):
binop = '+'
class SubAugmentedAssignment(AugmentedAssignment):
binop = '-'
class MulAugmentedAssignment(AugmentedAssignment):
binop = '*'
class DivAugmentedAssignment(AugmentedAssignment):
binop = '/'
class ModAugmentedAssignment(AugmentedAssignment):
binop = '%'
# Mapping from binary op strings to AugmentedAssignment subclasses
augassign_classes = {
cls.binop: cls for cls in [
AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment,
DivAugmentedAssignment, ModAugmentedAssignment
]
}
def aug_assign(lhs, op, rhs):
"""
Create 'lhs op= rhs'.
Explanation
===========
Represents augmented variable assignment for code generation. This is a
convenience function. You can also use the AugmentedAssignment classes
directly, like AddAugmentedAssignment(x, y).
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
op : str
Operator (+, -, /, \\*, %).
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import aug_assign
>>> x, y = symbols('x, y')
>>> aug_assign(x, '+', y)
AddAugmentedAssignment(x, y)
"""
if op not in augassign_classes:
raise ValueError("Unrecognized operator %s" % op)
return augassign_classes[op](lhs, rhs)
class CodeBlock(CodegenAST):
"""
Represents a block of code.
Explanation
===========
For now only assignments are supported. This restriction will be lifted in
the future.
Useful attributes on this object are:
``left_hand_sides``:
Tuple of left-hand sides of assignments, in order.
``left_hand_sides``:
Tuple of right-hand sides of assignments, in order.
``free_symbols``: Free symbols of the expressions in the right-hand sides
which do not appear in the left-hand side of an assignment.
Useful methods on this object are:
``topological_sort``:
Class method. Return a CodeBlock with assignments
sorted so that variables are assigned before they
are used.
``cse``:
Return a new CodeBlock with common subexpressions eliminated and
pulled out as assignments.
Examples
========
>>> from sympy import symbols, ccode
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y = symbols('x y')
>>> c = CodeBlock(Assignment(x, 1), Assignment(y, x + 1))
>>> print(ccode(c))
x = 1;
y = x + 1;
"""
def __new__(cls, *args):
left_hand_sides = []
right_hand_sides = []
for i in args:
if isinstance(i, Assignment):
lhs, rhs = i.args
left_hand_sides.append(lhs)
right_hand_sides.append(rhs)
obj = CodegenAST.__new__(cls, *args)
obj.left_hand_sides = Tuple(*left_hand_sides)
obj.right_hand_sides = Tuple(*right_hand_sides)
return obj
def __iter__(self):
return iter(self.args)
def _sympyrepr(self, printer, *args, **kwargs):
il = printer._context.get('indent_level', 0)
joiner = ',\n' + ' '*il
joined = joiner.join(map(printer._print, self.args))
return ('{}(\n'.format(' '*(il-4) + self.__class__.__name__,) +
' '*il + joined + '\n' + ' '*(il - 4) + ')')
_sympystr = _sympyrepr
@property
def free_symbols(self):
return super().free_symbols - set(self.left_hand_sides)
@classmethod
def topological_sort(cls, assignments):
"""
Return a CodeBlock with topologically sorted assignments so that
variables are assigned before they are used.
Examples
========
The existing order of assignments is preserved as much as possible.
This function assumes that variables are assigned to only once.
This is a class constructor so that the default constructor for
CodeBlock can error when variables are used before they are assigned.
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y, z = symbols('x y z')
>>> assignments = [
... Assignment(x, y + z),
... Assignment(y, z + 1),
... Assignment(z, 2),
... ]
>>> CodeBlock.topological_sort(assignments)
CodeBlock(
Assignment(z, 2),
Assignment(y, z + 1),
Assignment(x, y + z)
)
"""
if not all(isinstance(i, Assignment) for i in assignments):
# Will support more things later
raise NotImplementedError("CodeBlock.topological_sort only supports Assignments")
if any(isinstance(i, AugmentedAssignment) for i in assignments):
raise NotImplementedError("CodeBlock.topological_sort doesn't yet work with AugmentedAssignments")
# Create a graph where the nodes are assignments and there is a directed edge
# between nodes that use a variable and nodes that assign that
# variable, like
# [(x := 1, y := x + 1), (x := 1, z := y + z), (y := x + 1, z := y + z)]
# If we then topologically sort these nodes, they will be in
# assignment order, like
# x := 1
# y := x + 1
# z := y + z
# A = The nodes
#
# enumerate keeps nodes in the same order they are already in if
# possible. It will also allow us to handle duplicate assignments to
# the same variable when those are implemented.
A = list(enumerate(assignments))
# var_map = {variable: [nodes for which this variable is assigned to]}
# like {x: [(1, x := y + z), (4, x := 2 * w)], ...}
var_map = defaultdict(list)
for node in A:
i, a = node
var_map[a.lhs].append(node)
# E = Edges in the graph
E = []
for dst_node in A:
i, a = dst_node
for s in a.rhs.free_symbols:
for src_node in var_map[s]:
E.append((src_node, dst_node))
ordered_assignments = topological_sort([A, E])
# De-enumerate the result
return cls(*[a for i, a in ordered_assignments])
def cse(self, symbols=None, optimizations=None, postprocess=None,
order='canonical'):
"""
Return a new code block with common subexpressions eliminated.
Explanation
===========
See the docstring of :func:`sympy.simplify.cse_main.cse` for more
information.
Examples
========
>>> from sympy import symbols, sin
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y, z = symbols('x y z')
>>> c = CodeBlock(
... Assignment(x, 1),
... Assignment(y, sin(x) + 1),
... Assignment(z, sin(x) - 1),
... )
...
>>> c.cse()
CodeBlock(
Assignment(x, 1),
Assignment(x0, sin(x)),
Assignment(y, x0 + 1),
Assignment(z, x0 - 1)
)
"""
from sympy.simplify.cse_main import cse
# Check that the CodeBlock only contains assignments to unique variables
if not all(isinstance(i, Assignment) for i in self.args):
# Will support more things later
raise NotImplementedError("CodeBlock.cse only supports Assignments")
if any(isinstance(i, AugmentedAssignment) for i in self.args):
raise NotImplementedError("CodeBlock.cse doesn't yet work with AugmentedAssignments")
for i, lhs in enumerate(self.left_hand_sides):
if lhs in self.left_hand_sides[:i]:
raise NotImplementedError("Duplicate assignments to the same "
"variable are not yet supported (%s)" % lhs)
# Ensure new symbols for subexpressions do not conflict with existing
existing_symbols = self.atoms(Symbol)
if symbols is None:
symbols = numbered_symbols()
symbols = filter_symbols(symbols, existing_symbols)
replacements, reduced_exprs = cse(list(self.right_hand_sides),
symbols=symbols, optimizations=optimizations, postprocess=postprocess,
order=order)
new_block = [Assignment(var, expr) for var, expr in
zip(self.left_hand_sides, reduced_exprs)]
new_assignments = [Assignment(var, expr) for var, expr in replacements]
return self.topological_sort(new_assignments + new_block)
class For(Token):
"""Represents a 'for-loop' in the code.
Expressions are of the form:
"for target in iter:
body..."
Parameters
==========
target : symbol
iter : iterable
body : CodeBlock or iterable
! When passed an iterable it is used to instantiate a CodeBlock.
Examples
========
>>> from sympy import symbols, Range
>>> from sympy.codegen.ast import aug_assign, For
>>> x, i, j, k = symbols('x i j k')
>>> for_i = For(i, Range(10), [aug_assign(x, '+', i*j*k)])
>>> for_i # doctest: -NORMALIZE_WHITESPACE
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
>>> for_ji = For(j, Range(7), [for_i])
>>> for_ji # doctest: -NORMALIZE_WHITESPACE
For(j, iterable=Range(0, 7, 1), body=CodeBlock(
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
))
>>> for_kji =For(k, Range(5), [for_ji])
>>> for_kji # doctest: -NORMALIZE_WHITESPACE
For(k, iterable=Range(0, 5, 1), body=CodeBlock(
For(j, iterable=Range(0, 7, 1), body=CodeBlock(
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
))
))
"""
__slots__ = ('target', 'iterable', 'body')
_construct_target = staticmethod(_sympify)
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
@classmethod
def _construct_iterable(cls, itr):
if not iterable(itr):
raise TypeError("iterable must be an iterable")
if isinstance(itr, list): # _sympify errors on lists because they are mutable
itr = tuple(itr)
return _sympify(itr)
class String(Token):
""" SymPy object representing a string.
Atomic object which is not an expression (as opposed to Symbol).
Parameters
==========
text : str
Examples
========
>>> from sympy.codegen.ast import String
>>> f = String('foo')
>>> f
foo
>>> str(f)
'foo'
>>> f.text
'foo'
>>> print(repr(f))
String('foo')
"""
__slots__ = ('text',)
not_in_args = ['text']
is_Atom = True
@classmethod
def _construct_text(cls, text):
if not isinstance(text, str):
raise TypeError("Argument text is not a string type.")
return text
def _sympystr(self, printer, *args, **kwargs):
return self.text
class QuotedString(String):
""" Represents a string which should be printed with quotes. """
class Comment(String):
""" Represents a comment. """
class Node(Token):
""" Subclass of Token, carrying the attribute 'attrs' (Tuple)
Examples
========
>>> from sympy.codegen.ast import Node, value_const, pointer_const
>>> n1 = Node([value_const])
>>> n1.attr_params('value_const') # get the parameters of attribute (by name)
()
>>> from sympy.codegen.fnodes import dimension
>>> n2 = Node([value_const, dimension(5, 3)])
>>> n2.attr_params(value_const) # get the parameters of attribute (by Attribute instance)
()
>>> n2.attr_params('dimension') # get the parameters of attribute (by name)
(5, 3)
>>> n2.attr_params(pointer_const) is None
True
"""
__slots__ = ('attrs',)
defaults = {'attrs': Tuple()} # type: tDict[str, Any]
_construct_attrs = staticmethod(_mk_Tuple)
def attr_params(self, looking_for):
""" Returns the parameters of the Attribute with name ``looking_for`` in self.attrs """
for attr in self.attrs:
if str(attr.name) == str(looking_for):
return attr.parameters
class Type(Token):
""" Represents a type.
Explanation
===========
The naming is a super-set of NumPy naming. Type has a classmethod
``from_expr`` which offer type deduction. It also has a method
``cast_check`` which casts the argument to its type, possibly raising an
exception if rounding error is not within tolerances, or if the value is not
representable by the underlying data type (e.g. unsigned integers).
Parameters
==========
name : str
Name of the type, e.g. ``object``, ``int16``, ``float16`` (where the latter two
would use the ``Type`` sub-classes ``IntType`` and ``FloatType`` respectively).
If a ``Type`` instance is given, the said instance is returned.
Examples
========
>>> from sympy.codegen.ast import Type
>>> t = Type.from_expr(42)
>>> t
integer
>>> print(repr(t))
IntBaseType(String('integer'))
>>> from sympy.codegen.ast import uint8
>>> uint8.cast_check(-1) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Minimum value for data type bigger than new value.
>>> from sympy.codegen.ast import float32
>>> v6 = 0.123456
>>> float32.cast_check(v6)
0.123456
>>> v10 = 12345.67894
>>> float32.cast_check(v10) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> boost_mp50 = Type('boost::multiprecision::cpp_dec_float_50')
>>> from sympy import cxxcode
>>> from sympy.codegen.ast import Declaration, Variable
>>> cxxcode(Declaration(Variable('x', type=boost_mp50)))
'boost::multiprecision::cpp_dec_float_50 x'
References
==========
.. [1] https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
__slots__ = ('name',)
_construct_name = String
def _sympystr(self, printer, *args, **kwargs):
return str(self.name)
@classmethod
def from_expr(cls, expr):
""" Deduces type from an expression or a ``Symbol``.
Parameters
==========
expr : number or SymPy object
The type will be deduced from type or properties.
Examples
========
>>> from sympy.codegen.ast import Type, integer, complex_
>>> Type.from_expr(2) == integer
True
>>> from sympy import Symbol
>>> Type.from_expr(Symbol('z', complex=True)) == complex_
True
>>> Type.from_expr(sum) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Could not deduce type from expr.
Raises
======
ValueError when type deduction fails.
"""
if isinstance(expr, (float, Float)):
return real
if isinstance(expr, (int, Integer)) or getattr(expr, 'is_integer', False):
return integer
if getattr(expr, 'is_real', False):
return real
if isinstance(expr, complex) or getattr(expr, 'is_complex', False):
return complex_
if isinstance(expr, bool) or getattr(expr, 'is_Relational', False):
return bool_
else:
raise ValueError("Could not deduce type from expr.")
def _check(self, value):
pass
def cast_check(self, value, rtol=None, atol=0, precision_targets=None):
""" Casts a value to the data type of the instance.
Parameters
==========
value : number
rtol : floating point number
Relative tolerance. (will be deduced if not given).
atol : floating point number
Absolute tolerance (in addition to ``rtol``).
type_aliases : dict
Maps substitutions for Type, e.g. {integer: int64, real: float32}
Examples
========
>>> from sympy.codegen.ast import integer, float32, int8
>>> integer.cast_check(3.0) == 3
True
>>> float32.cast_check(1e-40) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Minimum value for data type bigger than new value.
>>> int8.cast_check(256) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Maximum value for data type smaller than new value.
>>> v10 = 12345.67894
>>> float32.cast_check(v10) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> from sympy.codegen.ast import float64
>>> float64.cast_check(v10)
12345.67894
>>> from sympy import Float
>>> v18 = Float('0.123456789012345646')
>>> float64.cast_check(v18)
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> from sympy.codegen.ast import float80
>>> float80.cast_check(v18)
0.123456789012345649
"""
val = sympify(value)
ten = Integer(10)
exp10 = getattr(self, 'decimal_dig', None)
if rtol is None:
rtol = 1e-15 if exp10 is None else 2.0*ten**(-exp10)
def tol(num):
return atol + rtol*abs(num)
new_val = self.cast_nocheck(value)
self._check(new_val)
delta = new_val - val
if abs(delta) > tol(val): # rounding, e.g. int(3.5) != 3.5
raise ValueError("Casting gives a significantly different value.")
return new_val
class IntBaseType(Type):
""" Integer base type, contains no size information. """
__slots__ = ('name',)
cast_nocheck = lambda self, i: Integer(int(i))
class _SizedIntType(IntBaseType):
__slots__ = ('name', 'nbits',)
_construct_nbits = Integer
def _check(self, value):
if value < self.min:
raise ValueError("Value is too small: %d < %d" % (value, self.min))
if value > self.max:
raise ValueError("Value is too big: %d > %d" % (value, self.max))
class SignedIntType(_SizedIntType):
""" Represents a signed integer type. """
@property
def min(self):
return -2**(self.nbits-1)
@property
def max(self):
return 2**(self.nbits-1) - 1
class UnsignedIntType(_SizedIntType):
""" Represents an unsigned integer type. """
@property
def min(self):
return 0
@property
def max(self):
return 2**self.nbits - 1
two = Integer(2)
class FloatBaseType(Type):
""" Represents a floating point number type. """
cast_nocheck = Float
class FloatType(FloatBaseType):
""" Represents a floating point type with fixed bit width.
Base 2 & one sign bit is assumed.
Parameters
==========
name : str
Name of the type.
nbits : integer
Number of bits used (storage).
nmant : integer
Number of bits used to represent the mantissa.
nexp : integer
Number of bits used to represent the mantissa.
Examples
========
>>> from sympy import S
>>> from sympy.codegen.ast import FloatType
>>> half_precision = FloatType('f16', nbits=16, nmant=10, nexp=5)
>>> half_precision.max
65504
>>> half_precision.tiny == S(2)**-14
True
>>> half_precision.eps == S(2)**-10
True
>>> half_precision.dig == 3
True
>>> half_precision.decimal_dig == 5
True
>>> half_precision.cast_check(1.0)
1.0
>>> half_precision.cast_check(1e5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Maximum value for data type smaller than new value.
"""
__slots__ = ('name', 'nbits', 'nmant', 'nexp',)
_construct_nbits = _construct_nmant = _construct_nexp = Integer
@property
def max_exponent(self):
""" The largest positive number n, such that 2**(n - 1) is a representable finite value. """
# cf. C++'s ``std::numeric_limits::max_exponent``
return two**(self.nexp - 1)
@property
def min_exponent(self):
""" The lowest negative number n, such that 2**(n - 1) is a valid normalized number. """
# cf. C++'s ``std::numeric_limits::min_exponent``
return 3 - self.max_exponent
@property
def max(self):
""" Maximum value representable. """
return (1 - two**-(self.nmant+1))*two**self.max_exponent
@property
def tiny(self):
""" The minimum positive normalized value. """
# See C macros: FLT_MIN, DBL_MIN, LDBL_MIN
# or C++'s ``std::numeric_limits::min``
# or numpy.finfo(dtype).tiny
return two**(self.min_exponent - 1)
@property
def eps(self):
""" Difference between 1.0 and the next representable value. """
return two**(-self.nmant)
@property
def dig(self):
""" Number of decimal digits that are guaranteed to be preserved in text.
When converting text -> float -> text, you are guaranteed that at least ``dig``
number of digits are preserved with respect to rounding or overflow.
"""
from sympy.functions import floor, log
return floor(self.nmant * log(2)/log(10))
@property
def decimal_dig(self):
""" Number of digits needed to store & load without loss.
Explanation
===========
Number of decimal digits needed to guarantee that two consecutive conversions
(float -> text -> float) to be idempotent. This is useful when one do not want
to loose precision due to rounding errors when storing a floating point value
as text.
"""
from sympy.functions import ceiling, log
return ceiling((self.nmant + 1) * log(2)/log(10) + 1)
def cast_nocheck(self, value):
""" Casts without checking if out of bounds or subnormal. """
if value == oo: # float(oo) or oo
return float(oo)
elif value == -oo: # float(-oo) or -oo
return float(-oo)
return Float(str(sympify(value).evalf(self.decimal_dig)), self.decimal_dig)
def _check(self, value):
if value < -self.max:
raise ValueError("Value is too small: %d < %d" % (value, -self.max))
if value > self.max:
raise ValueError("Value is too big: %d > %d" % (value, self.max))
if abs(value) < self.tiny:
raise ValueError("Smallest (absolute) value for data type bigger than new value.")
class ComplexBaseType(FloatBaseType):
def cast_nocheck(self, value):
""" Casts without checking if out of bounds or subnormal. """
from sympy.functions import re, im
return (
super().cast_nocheck(re(value)) +
super().cast_nocheck(im(value))*1j
)
def _check(self, value):
from sympy.functions import re, im
super()._check(re(value))
super()._check(im(value))
class ComplexType(ComplexBaseType, FloatType):
""" Represents a complex floating point number. """
# NumPy types:
intc = IntBaseType('intc')
intp = IntBaseType('intp')
int8 = SignedIntType('int8', 8)
int16 = SignedIntType('int16', 16)
int32 = SignedIntType('int32', 32)
int64 = SignedIntType('int64', 64)
uint8 = UnsignedIntType('uint8', 8)
uint16 = UnsignedIntType('uint16', 16)
uint32 = UnsignedIntType('uint32', 32)
uint64 = UnsignedIntType('uint64', 64)
float16 = FloatType('float16', 16, nexp=5, nmant=10) # IEEE 754 binary16, Half precision
float32 = FloatType('float32', 32, nexp=8, nmant=23) # IEEE 754 binary32, Single precision
float64 = FloatType('float64', 64, nexp=11, nmant=52) # IEEE 754 binary64, Double precision
float80 = FloatType('float80', 80, nexp=15, nmant=63) # x86 extended precision (1 integer part bit), "long double"
float128 = FloatType('float128', 128, nexp=15, nmant=112) # IEEE 754 binary128, Quadruple precision
float256 = FloatType('float256', 256, nexp=19, nmant=236) # IEEE 754 binary256, Octuple precision
complex64 = ComplexType('complex64', nbits=64, **float32.kwargs(exclude=('name', 'nbits')))
complex128 = ComplexType('complex128', nbits=128, **float64.kwargs(exclude=('name', 'nbits')))
# Generic types (precision may be chosen by code printers):
untyped = Type('untyped')
real = FloatBaseType('real')
integer = IntBaseType('integer')
complex_ = ComplexBaseType('complex')
bool_ = Type('bool')
class Attribute(Token):
""" Attribute (possibly parametrized)
For use with :class:`sympy.codegen.ast.Node` (which takes instances of
``Attribute`` as ``attrs``).
Parameters
==========
name : str
parameters : Tuple
Examples
========
>>> from sympy.codegen.ast import Attribute
>>> volatile = Attribute('volatile')
>>> volatile
volatile
>>> print(repr(volatile))
Attribute(String('volatile'))
>>> a = Attribute('foo', [1, 2, 3])
>>> a
foo(1, 2, 3)
>>> a.parameters == (1, 2, 3)
True
"""
__slots__ = ('name', 'parameters')
defaults = {'parameters': Tuple()}
_construct_name = String
_construct_parameters = staticmethod(_mk_Tuple)
def _sympystr(self, printer, *args, **kwargs):
result = str(self.name)
if self.parameters:
result += '(%s)' % ', '.join(map(lambda arg: printer._print(
arg, *args, **kwargs), self.parameters))
return result
value_const = Attribute('value_const')
pointer_const = Attribute('pointer_const')
class Variable(Node):
""" Represents a variable.
Parameters
==========
symbol : Symbol
type : Type (optional)
Type of the variable.
attrs : iterable of Attribute instances
Will be stored as a Tuple.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Variable, float32, integer
>>> x = Symbol('x')
>>> v = Variable(x, type=float32)
>>> v.attrs
()
>>> v == Variable('x')
False
>>> v == Variable('x', type=float32)
True
>>> v
Variable(x, type=float32)
One may also construct a ``Variable`` instance with the type deduced from
assumptions about the symbol using the ``deduced`` classmethod:
>>> i = Symbol('i', integer=True)
>>> v = Variable.deduced(i)
>>> v.type == integer
True
>>> v == Variable('i')
False
>>> from sympy.codegen.ast import value_const
>>> value_const in v.attrs
False
>>> w = Variable('w', attrs=[value_const])
>>> w
Variable(w, attrs=(value_const,))
>>> value_const in w.attrs
True
>>> w.as_Declaration(value=42)
Declaration(Variable(w, value=42, attrs=(value_const,)))
"""
__slots__ = ('symbol', 'type', 'value') + Node.__slots__
defaults = Node.defaults.copy()
defaults.update({'type': untyped, 'value': none})
_construct_symbol = staticmethod(sympify)
_construct_value = staticmethod(sympify)
@classmethod
def deduced(cls, symbol, value=None, attrs=Tuple(), cast_check=True):
""" Alt. constructor with type deduction from ``Type.from_expr``.
Deduces type primarily from ``symbol``, secondarily from ``value``.
Parameters
==========
symbol : Symbol
value : expr
(optional) value of the variable.
attrs : iterable of Attribute instances
cast_check : bool
Whether to apply ``Type.cast_check`` on ``value``.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Variable, complex_
>>> n = Symbol('n', integer=True)
>>> str(Variable.deduced(n).type)
'integer'
>>> x = Symbol('x', real=True)
>>> v = Variable.deduced(x)
>>> v.type
real
>>> z = Symbol('z', complex=True)
>>> Variable.deduced(z).type == complex_
True
"""
if isinstance(symbol, Variable):
return symbol
try:
type_ = Type.from_expr(symbol)
except ValueError:
type_ = Type.from_expr(value)
if value is not None and cast_check:
value = type_.cast_check(value)
return cls(symbol, type=type_, value=value, attrs=attrs)
def as_Declaration(self, **kwargs):
""" Convenience method for creating a Declaration instance.
Explanation
===========
If the variable of the Declaration need to wrap a modified
variable keyword arguments may be passed (overriding e.g.
the ``value`` of the Variable instance).
Examples
========
>>> from sympy.codegen.ast import Variable, NoneToken
>>> x = Variable('x')
>>> decl1 = x.as_Declaration()
>>> # value is special NoneToken() which must be tested with == operator
>>> decl1.variable.value is None # won't work
False
>>> decl1.variable.value == None # not PEP-8 compliant
True
>>> decl1.variable.value == NoneToken() # OK
True
>>> decl2 = x.as_Declaration(value=42.0)
>>> decl2.variable.value == 42
True
"""
kw = self.kwargs()
kw.update(kwargs)
return Declaration(self.func(**kw))
def _relation(self, rhs, op):
try:
rhs = _sympify(rhs)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, rhs))
return op(self, rhs, evaluate=False)
__lt__ = lambda self, other: self._relation(other, Lt)
__le__ = lambda self, other: self._relation(other, Le)
__ge__ = lambda self, other: self._relation(other, Ge)
__gt__ = lambda self, other: self._relation(other, Gt)
class Pointer(Variable):
""" Represents a pointer. See ``Variable``.
Examples
========
Can create instances of ``Element``:
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Pointer
>>> i = Symbol('i', integer=True)
>>> p = Pointer('x')
>>> p[i+1]
Element(x, indices=(i + 1,))
"""
def __getitem__(self, key):
try:
return Element(self.symbol, key)
except TypeError:
return Element(self.symbol, (key,))
class Element(Token):
""" Element in (a possibly N-dimensional) array.
Examples
========
>>> from sympy.codegen.ast import Element
>>> elem = Element('x', 'ijk')
>>> elem.symbol.name == 'x'
True
>>> elem.indices
(i, j, k)
>>> from sympy import ccode
>>> ccode(elem)
'x[i][j][k]'
>>> ccode(Element('x', 'ijk', strides='lmn', offset='o'))
'x[i*l + j*m + k*n + o]'
"""
__slots__ = ('symbol', 'indices', 'strides', 'offset')
defaults = {'strides': none, 'offset': none}
_construct_symbol = staticmethod(sympify)
_construct_indices = staticmethod(lambda arg: Tuple(*arg))
_construct_strides = staticmethod(lambda arg: Tuple(*arg))
_construct_offset = staticmethod(sympify)
class Declaration(Token):
""" Represents a variable declaration
Parameters
==========
variable : Variable
Examples
========
>>> from sympy.codegen.ast import Declaration, NoneToken, untyped
>>> z = Declaration('z')
>>> z.variable.type == untyped
True
>>> # value is special NoneToken() which must be tested with == operator
>>> z.variable.value is None # won't work
False
>>> z.variable.value == None # not PEP-8 compliant
True
>>> z.variable.value == NoneToken() # OK
True
"""
__slots__ = ('variable',)
_construct_variable = Variable
class While(Token):
""" Represents a 'for-loop' in the code.
Expressions are of the form:
"while condition:
body..."
Parameters
==========
condition : expression convertible to Boolean
body : CodeBlock or iterable
When passed an iterable it is used to instantiate a CodeBlock.
Examples
========
>>> from sympy import symbols, Gt, Abs
>>> from sympy.codegen import aug_assign, Assignment, While
>>> x, dx = symbols('x dx')
>>> expr = 1 - x**2
>>> whl = While(Gt(Abs(dx), 1e-9), [
... Assignment(dx, -expr/expr.diff(x)),
... aug_assign(x, '+', dx)
... ])
"""
__slots__ = ('condition', 'body')
_construct_condition = staticmethod(lambda cond: _sympify(cond))
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class Scope(Token):
""" Represents a scope in the code.
Parameters
==========
body : CodeBlock or iterable
When passed an iterable it is used to instantiate a CodeBlock.
"""
__slots__ = ('body',)
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class Stream(Token):
""" Represents a stream.
There are two predefined Stream instances ``stdout`` & ``stderr``.
Parameters
==========
name : str
Examples
========
>>> from sympy import Symbol
>>> from sympy.printing.pycode import pycode
>>> from sympy.codegen.ast import Print, stderr, QuotedString
>>> print(pycode(Print(['x'], file=stderr)))
print(x, file=sys.stderr)
>>> x = Symbol('x')
>>> print(pycode(Print([QuotedString('x')], file=stderr))) # print literally "x"
print("x", file=sys.stderr)
"""
__slots__ = ('name',)
_construct_name = String
stdout = Stream('stdout')
stderr = Stream('stderr')
class Print(Token):
""" Represents print command in the code.
Parameters
==========
formatstring : str
*args : Basic instances (or convertible to such through sympify)
Examples
========
>>> from sympy.codegen.ast import Print
>>> from sympy import pycode
>>> print(pycode(Print('x y'.split(), "coordinate: %12.5g %12.5g")))
print("coordinate: %12.5g %12.5g" % (x, y))
"""
__slots__ = ('print_args', 'format_string', 'file')
defaults = {'format_string': none, 'file': none}
_construct_print_args = staticmethod(_mk_Tuple)
_construct_format_string = QuotedString
_construct_file = Stream
class FunctionPrototype(Node):
""" Represents a function prototype
Allows the user to generate forward declaration in e.g. C/C++.
Parameters
==========
return_type : Type
name : str
parameters: iterable of Variable instances
attrs : iterable of Attribute instances
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import real, FunctionPrototype
>>> from sympy import ccode
>>> x, y = symbols('x y', real=True)
>>> fp = FunctionPrototype(real, 'foo', [x, y])
>>> ccode(fp)
'double foo(double x, double y)'
"""
__slots__ = ('return_type', 'name', 'parameters', 'attrs')
_construct_return_type = Type
_construct_name = String
@staticmethod
def _construct_parameters(args):
def _var(arg):
if isinstance(arg, Declaration):
return arg.variable
elif isinstance(arg, Variable):
return arg
else:
return Variable.deduced(arg)
return Tuple(*map(_var, args))
@classmethod
def from_FunctionDefinition(cls, func_def):
if not isinstance(func_def, FunctionDefinition):
raise TypeError("func_def is not an instance of FunctionDefiniton")
return cls(**func_def.kwargs(exclude=('body',)))
class FunctionDefinition(FunctionPrototype):
""" Represents a function definition in the code.
Parameters
==========
return_type : Type
name : str
parameters: iterable of Variable instances
body : CodeBlock or iterable
attrs : iterable of Attribute instances
Examples
========
>>> from sympy import ccode, symbols
>>> from sympy.codegen.ast import real, FunctionPrototype
>>> x, y = symbols('x y', real=True)
>>> fp = FunctionPrototype(real, 'foo', [x, y])
>>> ccode(fp)
'double foo(double x, double y)'
>>> from sympy.codegen.ast import FunctionDefinition, Return
>>> body = [Return(x*y)]
>>> fd = FunctionDefinition.from_FunctionPrototype(fp, body)
>>> print(ccode(fd))
double foo(double x, double y){
return x*y;
}
"""
__slots__ = FunctionPrototype.__slots__[:-1] + ('body', 'attrs')
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
@classmethod
def from_FunctionPrototype(cls, func_proto, body):
if not isinstance(func_proto, FunctionPrototype):
raise TypeError("func_proto is not an instance of FunctionPrototype")
return cls(body=body, **func_proto.kwargs())
class Return(Token):
""" Represents a return command in the code.
Parameters
==========
return : Basic
Examples
========
>>> from sympy.codegen.ast import Return
>>> from sympy.printing.pycode import pycode
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> print(pycode(Return(x)))
return x
"""
__slots__ = ('return',)
_construct_return=staticmethod(_sympify)
class FunctionCall(Token, Expr):
""" Represents a call to a function in the code.
Parameters
==========
name : str
function_args : Tuple
Examples
========
>>> from sympy.codegen.ast import FunctionCall
>>> from sympy import pycode
>>> fcall = FunctionCall('foo', 'bar baz'.split())
>>> print(pycode(fcall))
foo(bar, baz)
"""
__slots__ = ('name', 'function_args')
_construct_name = String
_construct_function_args = staticmethod(lambda args: Tuple(*args))
|
569add05ba306e6f1d47b981d45f49f74f17db9068d0268839703b8d5beb89ff | """
This module contains SymPy functions mathcin corresponding to special math functions in the
C standard library (since C99, also available in C++11).
The functions defined in this module allows the user to express functions such as ``expm1``
as a SymPy function for symbolic manipulation.
"""
from sympy.core.function import ArgumentIndexError, Function
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.miscellaneous import sqrt
def _expm1(x):
return exp(x) - S.One
class expm1(Function):
"""
Represents the exponential function minus one.
Explanation
===========
The benefit of using ``expm1(x)`` over ``exp(x) - 1``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import expm1
>>> '%.0e' % expm1(1e-99).evalf()
'1e-99'
>>> from math import exp
>>> exp(1e-99) - 1
0.0
>>> expm1(x).diff(x)
exp(x)
See Also
========
log1p
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return exp(*self.args)
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _expm1(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
return exp(arg) - S.One
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
exp_arg = exp.eval(arg)
if exp_arg is not None:
return exp_arg - S.One
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
def _log1p(x):
return log(x + S.One)
class log1p(Function):
"""
Represents the natural logarithm of a number plus one.
Explanation
===========
The benefit of using ``log1p(x)`` over ``log(x + 1)``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log1p
>>> from sympy import expand_log
>>> '%.0e' % expand_log(log1p(1e-99)).evalf()
'1e-99'
>>> from math import log
>>> log(1 + 1e-99)
0.0
>>> log1p(x).diff(x)
1/(x + 1)
See Also
========
expm1
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(self.args[0] + S.One)
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _log1p(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log1p(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
@classmethod
def eval(cls, arg):
if arg.is_Rational:
return log(arg + S.One)
elif not arg.is_Float: # not safe to add 1 to Float
return log.eval(arg + S.One)
elif arg.is_number:
return log(Rational(arg) + S.One)
def _eval_is_real(self):
return (self.args[0] + S.One).is_nonnegative
def _eval_is_finite(self):
if (self.args[0] + S.One).is_zero:
return False
return self.args[0].is_finite
def _eval_is_positive(self):
return self.args[0].is_positive
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_is_nonnegative(self):
return self.args[0].is_nonnegative
_Two = S(2)
def _exp2(x):
return Pow(_Two, x)
class exp2(Function):
"""
Represents the exponential function with base two.
Explanation
===========
The benefit of using ``exp2(x)`` over ``2**x``
is that the latter is not as efficient under finite precision
arithmetic.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import exp2
>>> exp2(2).evalf() == 4
True
>>> exp2(x).diff(x)
log(2)*exp2(x)
See Also
========
log2
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return self*log(_Two)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _exp2(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
def _eval_expand_func(self, **hints):
return _exp2(*self.args)
@classmethod
def eval(cls, arg):
if arg.is_number:
return _exp2(arg)
def _log2(x):
return log(x)/log(_Two)
class log2(Function):
"""
Represents the logarithm function with base two.
Explanation
===========
The benefit of using ``log2(x)`` over ``log(x)/log(2)``
is that the latter is not as efficient under finite precision
arithmetic.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log2
>>> log2(4).evalf() == 2
True
>>> log2(x).diff(x)
1/(x*log(2))
See Also
========
exp2
log10
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(log(_Two)*self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_number:
result = log.eval(arg, base=_Two)
if result.is_Atom:
return result
elif arg.is_Pow and arg.base == _Two:
return arg.exp
def _eval_evalf(self, *args, **kwargs):
return self.rewrite(log).evalf(*args, **kwargs)
def _eval_expand_func(self, **hints):
return _log2(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log2(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _fma(x, y, z):
return x*y + z
class fma(Function):
"""
Represents "fused multiply add".
Explanation
===========
The benefit of using ``fma(x, y, z)`` over ``x*y + z``
is that, under finite precision arithmetic, the former is
supported by special instructions on some CPUs.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.codegen.cfunctions import fma
>>> fma(x, y, z).diff(x)
y
"""
nargs = 3
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex in (1, 2):
return self.args[2 - argindex]
elif argindex == 3:
return S.One
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _fma(*self.args)
def _eval_rewrite_as_tractable(self, arg, limitvar=None, **kwargs):
return _fma(arg)
_Ten = S(10)
def _log10(x):
return log(x)/log(_Ten)
class log10(Function):
"""
Represents the logarithm function with base ten.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log10
>>> log10(100).evalf() == 2
True
>>> log10(x).diff(x)
1/(x*log(10))
See Also
========
log2
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(log(_Ten)*self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_number:
result = log.eval(arg, base=_Ten)
if result.is_Atom:
return result
elif arg.is_Pow and arg.base == _Ten:
return arg.exp
def _eval_expand_func(self, **hints):
return _log10(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log10(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _Sqrt(x):
return Pow(x, S.Half)
class Sqrt(Function): # 'sqrt' already defined in sympy.functions.elementary.miscellaneous
"""
Represents the square root function.
Explanation
===========
The reason why one would use ``Sqrt(x)`` over ``sqrt(x)``
is that the latter is internally represented as ``Pow(x, S.Half)`` which
may not be what one wants when doing code-generation.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import Sqrt
>>> Sqrt(x)
Sqrt(x)
>>> Sqrt(x).diff(x)
1/(2*sqrt(x))
See Also
========
Cbrt
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return Pow(self.args[0], Rational(-1, 2))/_Two
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _Sqrt(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _Sqrt(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
def _Cbrt(x):
return Pow(x, Rational(1, 3))
class Cbrt(Function): # 'cbrt' already defined in sympy.functions.elementary.miscellaneous
"""
Represents the cube root function.
Explanation
===========
The reason why one would use ``Cbrt(x)`` over ``cbrt(x)``
is that the latter is internally represented as ``Pow(x, Rational(1, 3))`` which
may not be what one wants when doing code-generation.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import Cbrt
>>> Cbrt(x)
Cbrt(x)
>>> Cbrt(x).diff(x)
1/(3*x**(2/3))
See Also
========
Sqrt
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return Pow(self.args[0], Rational(-_Two/3))/3
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _Cbrt(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _Cbrt(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
def _hypot(x, y):
return sqrt(Pow(x, 2) + Pow(y, 2))
class hypot(Function):
"""
Represents the hypotenuse function.
Explanation
===========
The hypotenuse function is provided by e.g. the math library
in the C99 standard, hence one may want to represent the function
symbolically when doing code-generation.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.codegen.cfunctions import hypot
>>> hypot(3, 4).evalf() == 5
True
>>> hypot(x, y)
hypot(x, y)
>>> hypot(x, y).diff(x)
x/hypot(x, y)
"""
nargs = 2
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex in (1, 2):
return 2*self.args[argindex-1]/(_Two*self.func(*self.args))
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _hypot(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _hypot(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
|
86ed2d0b71c542a3e9a2d08c95b93fc46f45849a3fadcea9dc98b98a8f38bbbb | """
Additional AST nodes for operations on matrices. The nodes in this module
are meant to represent optimization of matrix expressions within codegen's
target languages that cannot be represented by SymPy expressions.
As an example, we can use :meth:`sympy.codegen.rewriting.optimize` and the
``matin_opt`` optimization provided in :mod:`sympy.codegen.rewriting` to
transform matrix multiplication under certain assumptions:
>>> from sympy import symbols, MatrixSymbol
>>> n = symbols('n', integer=True)
>>> A = MatrixSymbol('A', n, n)
>>> x = MatrixSymbol('x', n, 1)
>>> expr = A**(-1) * x
>>> from sympy.assumptions import assuming, Q
>>> from sympy.codegen.rewriting import matinv_opt, optimize
>>> with assuming(Q.fullrank(A)):
... optimize(expr, [matinv_opt])
MatrixSolve(A, vector=x)
"""
from .ast import Token
from sympy.matrices import MatrixExpr
from sympy.core.sympify import sympify
class MatrixSolve(Token, MatrixExpr):
"""Represents an operation to solve a linear matrix equation.
Parameters
==========
matrix : MatrixSymbol
Matrix representing the coefficients of variables in the linear
equation. This matrix must be square and full-rank (i.e. all columns must
be linearly independent) for the solving operation to be valid.
vector : MatrixSymbol
One-column matrix representing the solutions to the equations
represented in ``matrix``.
Examples
========
>>> from sympy import symbols, MatrixSymbol
>>> from sympy.codegen.matrix_nodes import MatrixSolve
>>> n = symbols('n', integer=True)
>>> A = MatrixSymbol('A', n, n)
>>> x = MatrixSymbol('x', n, 1)
>>> from sympy.printing.numpy import NumPyPrinter
>>> NumPyPrinter().doprint(MatrixSolve(A, x))
'numpy.linalg.solve(A, x)'
>>> from sympy import octave_code
>>> octave_code(MatrixSolve(A, x))
'A \\\\ x'
"""
__slots__ = ('matrix', 'vector')
_construct_matrix = staticmethod(sympify)
@property
def shape(self):
return self.vector.shape
|
e410c44a162a2af7781718166aa74de02007eed18ff9d99382f3fe5dc616548a | """
This file contains some classical ciphers and routines
implementing a linear-feedback shift register (LFSR)
and the Diffie-Hellman key exchange.
.. warning::
This module is intended for educational purposes only. Do not use the
functions in this module for real cryptographic applications. If you wish
to encrypt real data, we recommend using something like the `cryptography
<https://cryptography.io/en/latest/>`_ module.
"""
from string import whitespace, ascii_uppercase as uppercase, printable
from functools import reduce
import warnings
from itertools import cycle
from sympy.ntheory.generate import nextprime
from sympy.core import Rational, Symbol
from sympy.core.numbers import igcdex, mod_inverse, igcd
from sympy.matrices import Matrix
from sympy.ntheory import isprime, primitive_root, factorint
from sympy.polys.domains import FF
from sympy.polys.polytools import gcd, Poly
from sympy.utilities.misc import as_int, filldedent, translate
from sympy.utilities.iterables import uniq, multiset
from sympy.testing.randtest import _randrange, _randint
class NonInvertibleCipherWarning(RuntimeWarning):
"""A warning raised if the cipher is not invertible."""
def __init__(self, msg):
self.fullMessage = msg
def __str__(self):
return '\n\t' + self.fullMessage
def warn(self, stacklevel=2):
warnings.warn(self, stacklevel=stacklevel)
def AZ(s=None):
"""Return the letters of ``s`` in uppercase. In case more than
one string is passed, each of them will be processed and a list
of upper case strings will be returned.
Examples
========
>>> from sympy.crypto.crypto import AZ
>>> AZ('Hello, world!')
'HELLOWORLD'
>>> AZ('Hello, world!'.split())
['HELLO', 'WORLD']
See Also
========
check_and_join
"""
if not s:
return uppercase
t = isinstance(s, str)
if t:
s = [s]
rv = [check_and_join(i.upper().split(), uppercase, filter=True)
for i in s]
if t:
return rv[0]
return rv
bifid5 = AZ().replace('J', '')
bifid6 = AZ() + '0123456789'
bifid10 = printable
def padded_key(key, symbols):
"""Return a string of the distinct characters of ``symbols`` with
those of ``key`` appearing first. A ValueError is raised if
a) there are duplicate characters in ``symbols`` or
b) there are characters in ``key`` that are not in ``symbols``.
Examples
========
>>> from sympy.crypto.crypto import padded_key
>>> padded_key('PUPPY', 'OPQRSTUVWXY')
'PUYOQRSTVWX'
>>> padded_key('RSA', 'ARTIST')
Traceback (most recent call last):
...
ValueError: duplicate characters in symbols: T
"""
syms = list(uniq(symbols))
if len(syms) != len(symbols):
extra = ''.join(sorted({
i for i in symbols if symbols.count(i) > 1}))
raise ValueError('duplicate characters in symbols: %s' % extra)
extra = set(key) - set(syms)
if extra:
raise ValueError(
'characters in key but not symbols: %s' % ''.join(
sorted(extra)))
key0 = ''.join(list(uniq(key)))
# remove from syms characters in key0
return key0 + translate(''.join(syms), None, key0)
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of ``phrase`` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase
String or list of strings to be returned as a string.
symbols
Iterable of characters allowed in ``phrase``.
If ``symbols`` is ``None``, no checking is performed.
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv
def _prep(msg, key, alp, default=None):
if not alp:
if not default:
alp = AZ()
msg = AZ(msg)
key = AZ(key)
else:
alp = default
else:
alp = ''.join(alp)
key = check_and_join(key, alp, filter=True)
msg = check_and_join(msg, alp, filter=True)
return msg, key, alp
def cycle_list(k, n):
"""
Returns the elements of the list ``range(n)`` shifted to the
left by ``k`` (so the list starts with ``k`` (mod ``n``)).
Examples
========
>>> from sympy.crypto.crypto import cycle_list
>>> cycle_list(3, 10)
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
"""
k = k % n
return list(range(k, n)) + list(range(k))
######## shift cipher examples ############
def encipher_shift(msg, key, symbols=None):
"""
Performs shift cipher encryption on plaintext msg, and returns the
ciphertext.
Parameters
==========
key : int
The secret key.
msg : str
Plaintext of upper-case letters.
Returns
=======
str
Ciphertext of upper-case letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
There is also a convenience function that does this with the
original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
adding ``(k mod 26)`` to each element in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
The shift cipher is also called the Caesar cipher, after
Julius Caesar, who, according to Suetonius, used it with a
shift of three to protect messages of military significance.
Caesar's nephew Augustus reportedly used a similar cipher, but
with a right shift of 1.
References
==========
.. [1] https://en.wikipedia.org/wiki/Caesar_cipher
.. [2] http://mathworld.wolfram.com/CaesarsMethod.html
See Also
========
decipher_shift
"""
msg, _, A = _prep(msg, '', symbols)
shift = len(A) - key % len(A)
key = A[shift:] + A[:shift]
return translate(msg, key, A)
def decipher_shift(msg, key, symbols=None):
"""
Return the text by shifting the characters of ``msg`` to the
left by the amount given by ``key``.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
Or use this function with the original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
return encipher_shift(msg, -key, symbols)
def encipher_rot13(msg, symbols=None):
"""
Performs the ROT13 encryption on a given plaintext ``msg``.
Explanation
===========
ROT13 is a substitution cipher which substitutes each letter
in the plaintext message for the letter furthest away from it
in the English alphabet.
Equivalently, it is just a Caeser (shift) cipher with a shift
key of 13 (midway point of the alphabet).
References
==========
.. [1] https://en.wikipedia.org/wiki/ROT13
See Also
========
decipher_rot13
encipher_shift
"""
return encipher_shift(msg, 13, symbols)
def decipher_rot13(msg, symbols=None):
"""
Performs the ROT13 decryption on a given plaintext ``msg``.
Explanation
============
``decipher_rot13`` is equivalent to ``encipher_rot13`` as both
``decipher_shift`` with a key of 13 and ``encipher_shift`` key with a
key of 13 will return the same results. Nonetheless,
``decipher_rot13`` has nonetheless been explicitly defined here for
consistency.
Examples
========
>>> from sympy.crypto.crypto import encipher_rot13, decipher_rot13
>>> msg = 'GONAVYBEATARMY'
>>> ciphertext = encipher_rot13(msg);ciphertext
'TBANILORNGNEZL'
>>> decipher_rot13(ciphertext)
'GONAVYBEATARMY'
>>> encipher_rot13(msg) == decipher_rot13(msg)
True
>>> msg == decipher_rot13(ciphertext)
True
"""
return decipher_shift(msg, 13, symbols)
######## affine cipher examples ############
def encipher_affine(msg, key, symbols=None, _inverse=False):
r"""
Performs the affine cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Explanation
===========
Encryption is based on the map `x \rightarrow ax+b` (mod `N`)
where ``N`` is the number of characters in the alphabet.
Decryption is based on the map `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
In particular, for the map to be invertible, we need
`\mathrm{gcd}(a, N) = 1` and an error will be raised if this is
not true.
Parameters
==========
msg : str
Characters that appear in ``symbols``.
a, b : int, int
A pair integers, with ``gcd(a, N) = 1`` (the secret key).
symbols
String of characters (default = uppercase letters).
When no symbols are given, ``msg`` is converted to upper case
letters and all other characters are ignored.
Returns
=======
ct
String of characters (the ciphertext message)
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
replacing ``x`` by ``a*x + b (mod N)``, for each element
``x`` in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
This is a straightforward generalization of the shift cipher with
the added complexity of requiring 2 characters to be deciphered in
order to recover the key.
References
==========
.. [1] https://en.wikipedia.org/wiki/Affine_cipher
See Also
========
decipher_affine
"""
msg, _, A = _prep(msg, '', symbols)
N = len(A)
a, b = key
assert gcd(a, N) == 1
if _inverse:
c = mod_inverse(a, N)
d = -b*c
a, b = c, d
B = ''.join([A[(a*i + b) % N] for i in range(N)])
return translate(msg, A, B)
def decipher_affine(msg, key, symbols=None):
r"""
Return the deciphered text that was made from the mapping,
`x \rightarrow ax+b` (mod `N`), where ``N`` is the
number of characters in the alphabet. Deciphering is done by
reciphering with a new key: `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
Examples
========
>>> from sympy.crypto.crypto import encipher_affine, decipher_affine
>>> msg = "GO NAVY BEAT ARMY"
>>> key = (3, 1)
>>> encipher_affine(msg, key)
'TROBMVENBGBALV'
>>> decipher_affine(_, key)
'GONAVYBEATARMY'
See Also
========
encipher_affine
"""
return encipher_affine(msg, key, symbols, _inverse=True)
def encipher_atbash(msg, symbols=None):
r"""
Enciphers a given ``msg`` into its Atbash ciphertext and returns it.
Explanation
===========
Atbash is a substitution cipher originally used to encrypt the Hebrew
alphabet. Atbash works on the principle of mapping each alphabet to its
reverse / counterpart (i.e. a would map to z, b to y etc.)
Atbash is functionally equivalent to the affine cipher with ``a = 25``
and ``b = 25``
See Also
========
decipher_atbash
"""
return encipher_affine(msg, (25, 25), symbols)
def decipher_atbash(msg, symbols=None):
r"""
Deciphers a given ``msg`` using Atbash cipher and returns it.
Explanation
===========
``decipher_atbash`` is functionally equivalent to ``encipher_atbash``.
However, it has still been added as a separate function to maintain
consistency.
Examples
========
>>> from sympy.crypto.crypto import encipher_atbash, decipher_atbash
>>> msg = 'GONAVYBEATARMY'
>>> encipher_atbash(msg)
'TLMZEBYVZGZINB'
>>> decipher_atbash(msg)
'TLMZEBYVZGZINB'
>>> encipher_atbash(msg) == decipher_atbash(msg)
True
>>> msg == encipher_atbash(encipher_atbash(msg))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Atbash
See Also
========
encipher_atbash
"""
return decipher_affine(msg, (25, 25), symbols)
#################### substitution cipher ###########################
def encipher_substitution(msg, old, new=None):
r"""
Returns the ciphertext obtained by replacing each character that
appears in ``old`` with the corresponding character in ``new``.
If ``old`` is a mapping, then new is ignored and the replacements
defined by ``old`` are used.
Explanation
===========
This is a more general than the affine cipher in that the key can
only be recovered by determining the mapping for each symbol.
Though in practice, once a few symbols are recognized the mappings
for other characters can be quickly guessed.
Examples
========
>>> from sympy.crypto.crypto import encipher_substitution, AZ
>>> old = 'OEYAG'
>>> new = '034^6'
>>> msg = AZ("go navy! beat army!")
>>> ct = encipher_substitution(msg, old, new); ct
'60N^V4B3^T^RM4'
To decrypt a substitution, reverse the last two arguments:
>>> encipher_substitution(ct, new, old)
'GONAVYBEATARMY'
In the special case where ``old`` and ``new`` are a permutation of
order 2 (representing a transposition of characters) their order
is immaterial:
>>> old = 'NAVY'
>>> new = 'ANYV'
>>> encipher = lambda x: encipher_substitution(x, old, new)
>>> encipher('NAVY')
'ANYV'
>>> encipher(_)
'NAVY'
The substitution cipher, in general, is a method
whereby "units" (not necessarily single characters) of plaintext
are replaced with ciphertext according to a regular system.
>>> ords = dict(zip('abc', ['\\%i' % ord(i) for i in 'abc']))
>>> print(encipher_substitution('abc', ords))
\97\98\99
References
==========
.. [1] https://en.wikipedia.org/wiki/Substitution_cipher
"""
return translate(msg, old, new)
######################################################################
#################### Vigenere cipher examples ########################
######################################################################
def encipher_vigenere(msg, key, symbols=None):
"""
Performs the Vigenere cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Examples
========
>>> from sympy.crypto.crypto import encipher_vigenere, AZ
>>> key = "encrypt"
>>> msg = "meet me on monday"
>>> encipher_vigenere(msg, key)
'QRGKKTHRZQEBPR'
Section 1 of the Kryptos sculpture at the CIA headquarters
uses this cipher and also changes the order of the
alphabet [2]_. Here is the first line of that section of
the sculpture:
>>> from sympy.crypto.crypto import decipher_vigenere, padded_key
>>> alp = padded_key('KRYPTOS', AZ())
>>> key = 'PALIMPSEST'
>>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ'
>>> decipher_vigenere(msg, key, alp)
'BETWEENSUBTLESHADINGANDTHEABSENC'
Explanation
===========
The Vigenere cipher is named after Blaise de Vigenere, a sixteenth
century diplomat and cryptographer, by a historical accident.
Vigenere actually invented a different and more complicated cipher.
The so-called *Vigenere cipher* was actually invented
by Giovan Batista Belaso in 1553.
This cipher was used in the 1800's, for example, during the American
Civil War. The Confederacy used a brass cipher disk to implement the
Vigenere cipher (now on display in the NSA Museum in Fort
Meade) [1]_.
The Vigenere cipher is a generalization of the shift cipher.
Whereas the shift cipher shifts each letter by the same amount
(that amount being the key of the shift cipher) the Vigenere
cipher shifts a letter by an amount determined by the key (which is
a word or phrase known only to the sender and receiver).
For example, if the key was a single letter, such as "C", then the
so-called Vigenere cipher is actually a shift cipher with a
shift of `2` (since "C" is the 2nd letter of the alphabet, if
you start counting at `0`). If the key was a word with two
letters, such as "CA", then the so-called Vigenere cipher will
shift letters in even positions by `2` and letters in odd positions
are left alone (shifted by `0`, since "A" is the 0th letter, if
you start counting at `0`).
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
(the plaintext)
``key``: a string of characters that appear in ``symbols``
(the secret key)
``symbols``: a string of letters defining the alphabet
OUTPUT:
``ct``: string of characters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``key`` a list ``L1`` of
corresponding integers. Let ``n1 = len(L1)``.
2. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
3. Break ``L2`` up sequentially into sublists of size
``n1``; the last sublist may be smaller than ``n1``
4. For each of these sublists ``L`` of ``L2``, compute a
new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)``
to the ``i``-th element in the sublist, for each ``i``.
5. Assemble these lists ``C`` by concatenation into a new
list of length ``n2``.
6. Compute from the new list a string ``ct`` of
corresponding letters.
Once it is known that the key is, say, `n` characters long,
frequency analysis can be applied to every `n`-th letter of
the ciphertext to determine the plaintext. This method is
called *Kasiski examination* (although it was first discovered
by Babbage). If they key is as long as the message and is
comprised of randomly selected characters -- a one-time pad -- the
message is theoretically unbreakable.
The cipher Vigenere actually discovered is an "auto-key" cipher
described as follows.
ALGORITHM:
INPUT:
``key``: a string of letters (the secret key)
``msg``: string of letters (the plaintext message)
OUTPUT:
``ct``: string of upper-case letters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
2. Let ``n1`` be the length of the key. Append to the
string ``key`` the first ``n2 - n1`` characters of
the plaintext message. Compute from this string (also of
length ``n2``) a list ``L1`` of integers corresponding
to the letter numbers in the first step.
3. Compute a new list ``C`` given by
``C[i] = L1[i] + L2[i] (mod N)``.
4. Compute from the new list a string ``ct`` of letters
corresponding to the new integers.
To decipher the auto-key ciphertext, the key is used to decipher
the first ``n1`` characters and then those characters become the
key to decipher the next ``n1`` characters, etc...:
>>> m = AZ('go navy, beat army! yes you can'); m
'GONAVYBEATARMYYESYOUCAN'
>>> key = AZ('gold bug'); n1 = len(key); n2 = len(m)
>>> auto_key = key + m[:n2 - n1]; auto_key
'GOLDBUGGONAVYBEATARMYYE'
>>> ct = encipher_vigenere(m, auto_key); ct
'MCYDWSHKOGAMKZCELYFGAYR'
>>> n1 = len(key)
>>> pt = []
>>> while ct:
... part, ct = ct[:n1], ct[n1:]
... pt.append(decipher_vigenere(part, key))
... key = pt[-1]
...
>>> ''.join(pt) == m
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Vigenere_cipher
.. [2] http://web.archive.org/web/20071116100808/
.. [3] http://filebox.vt.edu/users/batman/kryptos.html
(short URL: https://goo.gl/ijr22d)
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
key = [map[c] for c in key]
N = len(map)
k = len(key)
rv = []
for i, m in enumerate(msg):
rv.append(A[(map[m] + key[i % k]) % N])
rv = ''.join(rv)
return rv
def decipher_vigenere(msg, key, symbols=None):
"""
Decode using the Vigenere cipher.
Examples
========
>>> from sympy.crypto.crypto import decipher_vigenere
>>> key = "encrypt"
>>> ct = "QRGK kt HRZQE BPR"
>>> decipher_vigenere(ct, key)
'MEETMEONMONDAY'
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
N = len(A) # normally, 26
K = [map[c] for c in key]
n = len(K)
C = [map[c] for c in msg]
rv = ''.join([A[(-K[i % n] + c) % N] for i, c in enumerate(C)])
return rv
#################### Hill cipher ########################
def encipher_hill(msg, key, symbols=None, pad="Q"):
r"""
Return the Hill cipher encryption of ``msg``.
Explanation
===========
The Hill cipher [1]_, invented by Lester S. Hill in the 1920's [2]_,
was the first polygraphic cipher in which it was practical
(though barely) to operate on more than three symbols at once.
The following discussion assumes an elementary knowledge of
matrices.
First, each letter is first encoded as a number starting with 0.
Suppose your message `msg` consists of `n` capital letters, with no
spaces. This may be regarded an `n`-tuple M of elements of
`Z_{26}` (if the letters are those of the English alphabet). A key
in the Hill cipher is a `k x k` matrix `K`, all of whose entries
are in `Z_{26}`, such that the matrix `K` is invertible (i.e., the
linear transformation `K: Z_{N}^k \rightarrow Z_{N}^k`
is one-to-one).
Parameters
==========
msg
Plaintext message of `n` upper-case letters.
key
A `k \times k` invertible matrix `K`, all of whose entries are
in `Z_{26}` (or whatever number of symbols are being used).
pad
Character (default "Q") to use to make length of text be a
multiple of ``k``.
Returns
=======
ct
Ciphertext of upper-case letters.
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L`` of
corresponding integers. Let ``n = len(L)``.
2. Break the list ``L`` up into ``t = ceiling(n/k)``
sublists ``L_1``, ..., ``L_t`` of size ``k`` (with
the last list "padded" to ensure its size is
``k``).
3. Compute new list ``C_1``, ..., ``C_t`` given by
``C[i] = K*L_i`` (arithmetic is done mod N), for each
``i``.
4. Concatenate these into a list ``C = C_1 + ... + C_t``.
5. Compute from ``C`` a string ``ct`` of corresponding
letters. This has length ``k*t``.
References
==========
.. [1] https://en.wikipedia.org/wiki/Hill_cipher
.. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet,
The American Mathematical Monthly Vol.36, June-July 1929,
pp.306-312.
See Also
========
decipher_hill
"""
assert key.is_square
assert len(pad) == 1
msg, pad, A = _prep(msg, pad, symbols)
map = {c: i for i, c in enumerate(A)}
P = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(P)
m, r = divmod(n, k)
if r:
P = P + [map[pad]]*(k - r)
m += 1
rv = ''.join([A[c % N] for j in range(m) for c in
list(key*Matrix(k, 1, [P[i]
for i in range(k*j, k*(j + 1))]))])
return rv
def decipher_hill(msg, key, symbols=None):
"""
Deciphering is the same as enciphering but using the inverse of the
key matrix.
Examples
========
>>> from sympy.crypto.crypto import encipher_hill, decipher_hill
>>> from sympy import Matrix
>>> key = Matrix([[1, 2], [3, 5]])
>>> encipher_hill("meet me on monday", key)
'UEQDUEODOCTCWQ'
>>> decipher_hill(_, key)
'MEETMEONMONDAY'
When the length of the plaintext (stripped of invalid characters)
is not a multiple of the key dimension, extra characters will
appear at the end of the enciphered and deciphered text. In order to
decipher the text, those characters must be included in the text to
be deciphered. In the following, the key has a dimension of 4 but
the text is 2 short of being a multiple of 4 so two characters will
be added.
>>> key = Matrix([[1, 1, 1, 2], [0, 1, 1, 0],
... [2, 2, 3, 4], [1, 1, 0, 1]])
>>> msg = "ST"
>>> encipher_hill(msg, key)
'HJEB'
>>> decipher_hill(_, key)
'STQQ'
>>> encipher_hill(msg, key, pad="Z")
'ISPK'
>>> decipher_hill(_, key)
'STZZ'
If the last two characters of the ciphertext were ignored in
either case, the wrong plaintext would be recovered:
>>> decipher_hill("HD", key)
'ORMV'
>>> decipher_hill("IS", key)
'UIKY'
See Also
========
encipher_hill
"""
assert key.is_square
msg, _, A = _prep(msg, '', symbols)
map = {c: i for i, c in enumerate(A)}
C = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(C)
m, r = divmod(n, k)
if r:
C = C + [0]*(k - r)
m += 1
key_inv = key.inv_mod(N)
rv = ''.join([A[p % N] for j in range(m) for p in
list(key_inv*Matrix(
k, 1, [C[i] for i in range(k*j, k*(j + 1))]))])
return rv
#################### Bifid cipher ########################
def encipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses an `n \times n`
Polybius square.
Parameters
==========
msg
Plaintext string.
key
Short string for key.
Duplicate characters are ignored and then it is padded with the
characters in ``symbols`` that were not in the short key.
symbols
`n \times n` characters defining the alphabet.
(default is string.printable)
Returns
=======
ciphertext
Ciphertext using Bifid5 cipher without spaces.
See Also
========
decipher_bifid, encipher_bifid5, encipher_bifid6
References
==========
.. [1] https://en.wikipedia.org/wiki/Bifid_cipher
"""
msg, key, A = _prep(msg, key, symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the fractionalization
row_col = {ch: divmod(i, N) for i, ch in enumerate(long_key)}
r, c = zip(*[row_col[x] for x in msg])
rc = r + c
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join(ch[i] for i in zip(rc[::2], rc[1::2]))
return rv
def decipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `n \times n`
Polybius square.
Parameters
==========
msg
Ciphertext string.
key
Short string for key.
Duplicate characters are ignored and then it is padded with the
characters in symbols that were not in the short key.
symbols
`n \times n` characters defining the alphabet.
(default=string.printable, a `10 \times 10` matrix)
Returns
=======
deciphered
Deciphered text.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid, decipher_bifid, AZ)
Do an encryption using the bifid5 alphabet:
>>> alp = AZ().replace('J', '')
>>> ct = AZ("meet me on monday!")
>>> key = AZ("gold bug")
>>> encipher_bifid(ct, key, alp)
'IEILHHFSTSFQYE'
When entering the text or ciphertext, spaces are ignored so it
can be formatted as desired. Re-entering the ciphertext from the
preceding, putting 4 characters per line and padding with an extra
J, does not cause problems for the deciphering:
>>> decipher_bifid('''
... IEILH
... HFSTS
... FQYEJ''', key, alp)
'MEETMEONMONDAY'
When no alphabet is given, all 100 printable characters will be
used:
>>> key = ''
>>> encipher_bifid('hello world!', key)
'bmtwmg-bIo*w'
>>> decipher_bifid(_, key)
'hello world!'
If the key is changed, a different encryption is obtained:
>>> key = 'gold bug'
>>> encipher_bifid('hello world!', 'gold_bug')
'hg2sfuei7t}w'
And if the key used to decrypt the message is not exact, the
original text will not be perfectly obtained:
>>> decipher_bifid(_, 'gold pug')
'heldo~wor6d!'
"""
msg, _, A = _prep(msg, '', symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the reverse fractionalization
row_col = {
ch: divmod(i, N) for i, ch in enumerate(long_key)}
rc = [i for c in msg for i in row_col[c]]
n = len(msg)
rc = zip(*(rc[:n], rc[n:]))
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join(ch[i] for i in rc)
return rv
def bifid_square(key):
"""Return characters of ``key`` arranged in a square.
Examples
========
>>> from sympy.crypto.crypto import (
... bifid_square, AZ, padded_key, bifid5)
>>> bifid_square(AZ().replace('J', ''))
Matrix([
[A, B, C, D, E],
[F, G, H, I, K],
[L, M, N, O, P],
[Q, R, S, T, U],
[V, W, X, Y, Z]])
>>> bifid_square(padded_key(AZ('gold bug!'), bifid5))
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
See Also
========
padded_key
"""
A = ''.join(uniq(''.join(key)))
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
n = int(n)
f = lambda i, j: Symbol(A[n*i + j])
rv = Matrix(n, n, f)
return rv
def encipher_bifid5(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Explanation
===========
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square. The letter "J" is ignored so it must be replaced
with something else (traditionally an "I") before encryption.
ALGORITHM: (5x5 case)
STEPS:
0. Create the `5 \times 5` Polybius square ``S`` associated
to ``key`` as follows:
a) moving from left-to-right, top-to-bottom,
place the letters of the key into a `5 \times 5`
matrix,
b) if the key has less than 25 letters, add the
letters of the alphabet not in the key until the
`5 \times 5` square is filled.
1. Create a list ``P`` of pairs of numbers which are the
coordinates in the Polybius square of the letters in
``msg``.
2. Let ``L1`` be the list of all first coordinates of ``P``
(length of ``L1 = n``), let ``L2`` be the list of all
second coordinates of ``P`` (so the length of ``L2``
is also ``n``).
3. Let ``L`` be the concatenation of ``L1`` and ``L2``
(length ``L = 2*n``), except that consecutive numbers
are paired ``(L[2*i], L[2*i + 1])``. You can regard
``L`` as a list of pairs of length ``n``.
4. Let ``C`` be the list of all letters which are of the
form ``S[i, j]``, for all ``(i, j)`` in ``L``. As a
string, this is the ciphertext of ``msg``.
Parameters
==========
msg : str
Plaintext string.
Converted to upper case and filtered of anything but all letters
except J.
key
Short string for key; non-alphabetic letters, J and duplicated
characters are ignored and then, if the length is less than 25
characters, it is padded with other letters of the alphabet
(in alphabetical order).
Returns
=======
ct
Ciphertext (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid5, decipher_bifid5)
"J" will be omitted unless it is replaced with something else:
>>> round_trip = lambda m, k: \
... decipher_bifid5(encipher_bifid5(m, k), k)
>>> key = 'a'
>>> msg = "JOSIE"
>>> round_trip(msg, key)
'OSIE'
>>> round_trip(msg.replace("J", "I"), key)
'IOSIE'
>>> j = "QIQ"
>>> round_trip(msg.replace("J", j), key).replace(j, "J")
'JOSIE'
Notes
=====
The Bifid cipher was invented around 1901 by Felix Delastelle.
It is a *fractional substitution* cipher, where letters are
replaced by pairs of symbols from a smaller alphabet. The
cipher uses a `5 \times 5` square filled with some ordering of the
alphabet, except that "J" is replaced with "I" (this is a so-called
Polybius square; there is a `6 \times 6` analog if you add back in
"J" and also append onto the usual 26 letter alphabet, the digits
0, 1, ..., 9).
According to Helen Gaines' book *Cryptanalysis*, this type of cipher
was used in the field by the German Army during World War I.
See Also
========
decipher_bifid5, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return encipher_bifid(msg, '', key)
def decipher_bifid5(msg, key):
r"""
Return the Bifid cipher decryption of ``msg``.
Explanation
===========
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square; the letter "J" is ignored unless a ``key`` of
length 25 is used.
Parameters
==========
msg
Ciphertext string.
key
Short string for key; duplicated characters are ignored and if
the length is less then 25 characters, it will be padded with
other letters from the alphabet omitting "J".
Non-alphabetic characters are ignored.
Returns
=======
plaintext
Plaintext from Bifid5 cipher (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid5, decipher_bifid5
>>> key = "gold bug"
>>> encipher_bifid5('meet me on friday', key)
'IEILEHFSTSFXEE'
>>> encipher_bifid5('meet me on monday', key)
'IEILHHFSTSFQYE'
>>> decipher_bifid5(_, key)
'MEETMEONMONDAY'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return decipher_bifid(msg, '', key)
def bifid5_square(key=None):
r"""
5x5 Polybius square.
Produce the Polybius square for the `5 \times 5` Bifid cipher.
Examples
========
>>> from sympy.crypto.crypto import bifid5_square
>>> bifid5_square("gold bug")
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
"""
if not key:
key = bifid5
else:
_, key, _ = _prep('', key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return bifid_square(key)
def encipher_bifid6(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
Parameters
==========
msg
Plaintext string (digits okay).
key
Short string for key (digits okay).
If ``key`` is less than 36 characters long, the square will be
filled with letters A through Z and digits 0 through 9.
Returns
=======
ciphertext
Ciphertext from Bifid cipher (all caps, no spaces).
See Also
========
decipher_bifid6, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return encipher_bifid(msg, '', key)
def decipher_bifid6(msg, key):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
Parameters
==========
msg
Ciphertext string (digits okay); converted to upper case
key
Short string for key (digits okay).
If ``key`` is less than 36 characters long, the square will be
filled with letters A through Z and digits 0 through 9.
All letters are converted to uppercase.
Returns
=======
plaintext
Plaintext from Bifid cipher (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid6, decipher_bifid6
>>> key = "gold bug"
>>> encipher_bifid6('meet me on monday at 8am', key)
'KFKLJJHF5MMMKTFRGPL'
>>> decipher_bifid6(_, key)
'MEETMEONMONDAYAT8AM'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return decipher_bifid(msg, '', key)
def bifid6_square(key=None):
r"""
6x6 Polybius square.
Produces the Polybius square for the `6 \times 6` Bifid cipher.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
Examples
========
>>> from sympy.crypto.crypto import bifid6_square
>>> key = "gold bug"
>>> bifid6_square(key)
Matrix([
[G, O, L, D, B, U],
[A, C, E, F, H, I],
[J, K, M, N, P, Q],
[R, S, T, V, W, X],
[Y, Z, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9]])
"""
if not key:
key = bifid6
else:
_, key, _ = _prep('', key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return bifid_square(key)
#################### RSA #############################
def _decipher_rsa_crt(i, d, factors):
"""Decipher RSA using chinese remainder theorem from the information
of the relatively-prime factors of the modulus.
Parameters
==========
i : integer
Ciphertext
d : integer
The exponent component.
factors : list of relatively-prime integers
The integers given must be coprime and the product must equal
the modulus component of the original RSA key.
Examples
========
How to decrypt RSA with CRT:
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
>>> primes = [61, 53]
>>> e = 17
>>> args = primes + [e]
>>> puk = rsa_public_key(*args)
>>> prk = rsa_private_key(*args)
>>> from sympy.crypto.crypto import encipher_rsa, _decipher_rsa_crt
>>> msg = 65
>>> crt_primes = primes
>>> encrypted = encipher_rsa(msg, puk)
>>> decrypted = _decipher_rsa_crt(encrypted, prk[1], primes)
>>> decrypted
65
"""
from sympy.ntheory.modular import crt
moduluses = [pow(i, d, p) for p in factors]
result = crt(factors, moduluses)
if not result:
raise ValueError("CRT failed")
return result[0]
def _rsa_key(*args, public=True, private=True, totient='Euler', index=None, multipower=None):
r"""A private subroutine to generate RSA key
Parameters
==========
public, private : bool, optional
Flag to generate either a public key, a private key.
totient : 'Euler' or 'Carmichael'
Different notation used for totient.
multipower : bool, optional
Flag to bypass warning for multipower RSA.
"""
from sympy.ntheory import totient as _euler
from sympy.ntheory import reduced_totient as _carmichael
if len(args) < 2:
return False
if totient not in ('Euler', 'Carmichael'):
raise ValueError(
"The argument totient={} should either be " \
"'Euler', 'Carmichalel'." \
.format(totient))
if totient == 'Euler':
_totient = _euler
else:
_totient = _carmichael
if index is not None:
index = as_int(index)
if totient != 'Carmichael':
raise ValueError(
"Setting the 'index' keyword argument requires totient"
"notation to be specified as 'Carmichael'.")
primes, e = args[:-1], args[-1]
if not all(isprime(p) for p in primes):
new_primes = []
for i in primes:
new_primes.extend(factorint(i, multiple=True))
primes = new_primes
n = reduce(lambda i, j: i*j, primes)
tally = multiset(primes)
if all(v == 1 for v in tally.values()):
multiple = list(tally.keys())
phi = _totient._from_distinct_primes(*multiple)
else:
if not multipower:
NonInvertibleCipherWarning(
'Non-distinctive primes found in the factors {}. '
'The cipher may not be decryptable for some numbers '
'in the complete residue system Z[{}], but the cipher '
'can still be valid if you restrict the domain to be '
'the reduced residue system Z*[{}]. You can pass '
'the flag multipower=True if you want to suppress this '
'warning.'
.format(primes, n, n)
).warn()
phi = _totient._from_factors(tally)
if igcd(e, phi) == 1:
if public and not private:
if isinstance(index, int):
e = e % phi
e += index * phi
return n, e
if private and not public:
d = mod_inverse(e, phi)
if isinstance(index, int):
d += index * phi
return n, d
return False
def rsa_public_key(*args, **kwargs):
r"""Return the RSA *public key* pair, `(n, e)`
Parameters
==========
args : naturals
If specified as `p, q, e` where `p` and `q` are distinct primes
and `e` is a desired public exponent of the RSA, `n = p q` and
`e` will be verified against the totient
`\phi(n)` (Euler totient) or `\lambda(n)` (Carmichael totient)
to be `\gcd(e, \phi(n)) = 1` or `\gcd(e, \lambda(n)) = 1`.
If specified as `p_1, p_2, \dots, p_n, e` where
`p_1, p_2, \dots, p_n` are specified as primes,
and `e` is specified as a desired public exponent of the RSA,
it will be able to form a multi-prime RSA, which is a more
generalized form of the popular 2-prime RSA.
It can also be possible to form a single-prime RSA by specifying
the argument as `p, e`, which can be considered a trivial case
of a multiprime RSA.
Furthermore, it can be possible to form a multi-power RSA by
specifying two or more pairs of the primes to be same.
However, unlike the two-distinct prime RSA or multi-prime
RSA, not every numbers in the complete residue system
(`\mathbb{Z}_n`) will be decryptable since the mapping
`\mathbb{Z}_{n} \rightarrow \mathbb{Z}_{n}`
will not be bijective.
(Only except for the trivial case when
`e = 1`
or more generally,
.. math::
e \in \left \{ 1 + k \lambda(n)
\mid k \in \mathbb{Z} \land k \geq 0 \right \}
when RSA reduces to the identity.)
However, the RSA can still be decryptable for the numbers in the
reduced residue system (`\mathbb{Z}_n^{\times}`), since the
mapping
`\mathbb{Z}_{n}^{\times} \rightarrow \mathbb{Z}_{n}^{\times}`
can still be bijective.
If you pass a non-prime integer to the arguments
`p_1, p_2, \dots, p_n`, the particular number will be
prime-factored and it will become either a multi-prime RSA or a
multi-power RSA in its canonical form, depending on whether the
product equals its radical or not.
`p_1 p_2 \dots p_n = \text{rad}(p_1 p_2 \dots p_n)`
totient : bool, optional
If ``'Euler'``, it uses Euler's totient `\phi(n)` which is
:meth:`sympy.ntheory.factor_.totient` in SymPy.
If ``'Carmichael'``, it uses Carmichael's totient `\lambda(n)`
which is :meth:`sympy.ntheory.factor_.reduced_totient` in SymPy.
Unlike private key generation, this is a trivial keyword for
public key generation because
`\gcd(e, \phi(n)) = 1 \iff \gcd(e, \lambda(n)) = 1`.
index : nonnegative integer, optional
Returns an arbitrary solution of a RSA public key at the index
specified at `0, 1, 2, \dots`. This parameter needs to be
specified along with ``totient='Carmichael'``.
Similarly to the non-uniquenss of a RSA private key as described
in the ``index`` parameter documentation in
:meth:`rsa_private_key`, RSA public key is also not unique and
there is an infinite number of RSA public exponents which
can behave in the same manner.
From any given RSA public exponent `e`, there are can be an
another RSA public exponent `e + k \lambda(n)` where `k` is an
integer, `\lambda` is a Carmichael's totient function.
However, considering only the positive cases, there can be
a principal solution of a RSA public exponent `e_0` in
`0 < e_0 < \lambda(n)`, and all the other solutions
can be canonicalzed in a form of `e_0 + k \lambda(n)`.
``index`` specifies the `k` notation to yield any possible value
an RSA public key can have.
An example of computing any arbitrary RSA public key:
>>> from sympy.crypto.crypto import rsa_public_key
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=0)
(3233, 17)
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=1)
(3233, 797)
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=2)
(3233, 1577)
multipower : bool, optional
Any pair of non-distinct primes found in the RSA specification
will restrict the domain of the cryptosystem, as noted in the
explaination of the parameter ``args``.
SymPy RSA key generator may give a warning before dispatching it
as a multi-power RSA, however, you can disable the warning if
you pass ``True`` to this keyword.
Returns
=======
(n, e) : int, int
`n` is a product of any arbitrary number of primes given as
the argument.
`e` is relatively prime (coprime) to the Euler totient
`\phi(n)`.
False
Returned if less than two arguments are given, or `e` is
not relatively prime to the modulus.
Examples
========
>>> from sympy.crypto.crypto import rsa_public_key
A public key of a two-prime RSA:
>>> p, q, e = 3, 5, 7
>>> rsa_public_key(p, q, e)
(15, 7)
>>> rsa_public_key(p, q, 30)
False
A public key of a multiprime RSA:
>>> primes = [2, 3, 5, 7, 11, 13]
>>> e = 7
>>> args = primes + [e]
>>> rsa_public_key(*args)
(30030, 7)
Notes
=====
Although the RSA can be generalized over any modulus `n`, using
two large primes had became the most popular specification because a
product of two large primes is usually the hardest to factor
relatively to the digits of `n` can have.
However, it may need further understanding of the time complexities
of each prime-factoring algorithms to verify the claim.
See Also
========
rsa_private_key
encipher_rsa
decipher_rsa
References
==========
.. [1] https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29
.. [2] http://cacr.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
.. [3] https://link.springer.com/content/pdf/10.1007%2FBFb0055738.pdf
.. [4] http://www.itiis.org/digital-library/manuscript/1381
"""
return _rsa_key(*args, public=True, private=False, **kwargs)
def rsa_private_key(*args, **kwargs):
r"""Return the RSA *private key* pair, `(n, d)`
Parameters
==========
args : naturals
The keyword is identical to the ``args`` in
:meth:`rsa_public_key`.
totient : bool, optional
If ``'Euler'``, it uses Euler's totient convention `\phi(n)`
which is :meth:`sympy.ntheory.factor_.totient` in SymPy.
If ``'Carmichael'``, it uses Carmichael's totient convention
`\lambda(n)` which is
:meth:`sympy.ntheory.factor_.reduced_totient` in SymPy.
There can be some output differences for private key generation
as examples below.
Example using Euler's totient:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Euler')
(3233, 2753)
Example using Carmichael's totient:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Carmichael')
(3233, 413)
index : nonnegative integer, optional
Returns an arbitrary solution of a RSA private key at the index
specified at `0, 1, 2, \dots`. This parameter needs to be
specified along with ``totient='Carmichael'``.
RSA private exponent is a non-unique solution of
`e d \mod \lambda(n) = 1` and it is possible in any form of
`d + k \lambda(n)`, where `d` is an another
already-computed private exponent, and `\lambda` is a
Carmichael's totient function, and `k` is any integer.
However, considering only the positive cases, there can be
a principal solution of a RSA private exponent `d_0` in
`0 < d_0 < \lambda(n)`, and all the other solutions
can be canonicalzed in a form of `d_0 + k \lambda(n)`.
``index`` specifies the `k` notation to yield any possible value
an RSA private key can have.
An example of computing any arbitrary RSA private key:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=0)
(3233, 413)
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=1)
(3233, 1193)
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=2)
(3233, 1973)
multipower : bool, optional
The keyword is identical to the ``multipower`` in
:meth:`rsa_public_key`.
Returns
=======
(n, d) : int, int
`n` is a product of any arbitrary number of primes given as
the argument.
`d` is the inverse of `e` (mod `\phi(n)`) where `e` is the
exponent given, and `\phi` is a Euler totient.
False
Returned if less than two arguments are given, or `e` is
not relatively prime to the totient of the modulus.
Examples
========
>>> from sympy.crypto.crypto import rsa_private_key
A private key of a two-prime RSA:
>>> p, q, e = 3, 5, 7
>>> rsa_private_key(p, q, e)
(15, 7)
>>> rsa_private_key(p, q, 30)
False
A private key of a multiprime RSA:
>>> primes = [2, 3, 5, 7, 11, 13]
>>> e = 7
>>> args = primes + [e]
>>> rsa_private_key(*args)
(30030, 823)
See Also
========
rsa_public_key
encipher_rsa
decipher_rsa
References
==========
.. [1] https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29
.. [2] http://cacr.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
.. [3] https://link.springer.com/content/pdf/10.1007%2FBFb0055738.pdf
.. [4] http://www.itiis.org/digital-library/manuscript/1381
"""
return _rsa_key(*args, public=False, private=True, **kwargs)
def _encipher_decipher_rsa(i, key, factors=None):
n, d = key
if not factors:
return pow(i, d, n)
def _is_coprime_set(l):
is_coprime_set = True
for i in range(len(l)):
for j in range(i+1, len(l)):
if igcd(l[i], l[j]) != 1:
is_coprime_set = False
break
return is_coprime_set
prod = reduce(lambda i, j: i*j, factors)
if prod == n and _is_coprime_set(factors):
return _decipher_rsa_crt(i, d, factors)
return _encipher_decipher_rsa(i, key, factors=None)
def encipher_rsa(i, key, factors=None):
r"""Encrypt the plaintext with RSA.
Parameters
==========
i : integer
The plaintext to be encrypted for.
key : (n, e) where n, e are integers
`n` is the modulus of the key and `e` is the exponent of the
key. The encryption is computed by `i^e \bmod n`.
The key can either be a public key or a private key, however,
the message encrypted by a public key can only be decrypted by
a private key, and vice versa, as RSA is an asymmetric
cryptography system.
factors : list of coprime integers
This is identical to the keyword ``factors`` in
:meth:`decipher_rsa`.
Notes
=====
Some specifications may make the RSA not cryptographically
meaningful.
For example, `0`, `1` will remain always same after taking any
number of exponentiation, thus, should be avoided.
Furthermore, if `i^e < n`, `i` may easily be figured out by taking
`e` th root.
And also, specifying the exponent as `1` or in more generalized form
as `1 + k \lambda(n)` where `k` is an nonnegative integer,
`\lambda` is a carmichael totient, the RSA becomes an identity
mapping.
Examples
========
>>> from sympy.crypto.crypto import encipher_rsa
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
Public Key Encryption:
>>> p, q, e = 3, 5, 7
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, puk)
3
Private Key Encryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, prk)
3
Encryption using chinese remainder theorem:
>>> encipher_rsa(msg, prk, factors=[p, q])
3
"""
return _encipher_decipher_rsa(i, key, factors=factors)
def decipher_rsa(i, key, factors=None):
r"""Decrypt the ciphertext with RSA.
Parameters
==========
i : integer
The ciphertext to be decrypted for.
key : (n, d) where n, d are integers
`n` is the modulus of the key and `d` is the exponent of the
key. The decryption is computed by `i^d \bmod n`.
The key can either be a public key or a private key, however,
the message encrypted by a public key can only be decrypted by
a private key, and vice versa, as RSA is an asymmetric
cryptography system.
factors : list of coprime integers
As the modulus `n` created from RSA key generation is composed
of arbitrary prime factors
`n = {p_1}^{k_1}{p_2}^{k_2}\dots{p_n}^{k_n}` where
`p_1, p_2, \dots, p_n` are distinct primes and
`k_1, k_2, \dots, k_n` are positive integers, chinese remainder
theorem can be used to compute `i^d \bmod n` from the
fragmented modulo operations like
.. math::
i^d \bmod {p_1}^{k_1}, i^d \bmod {p_2}^{k_2}, \dots,
i^d \bmod {p_n}^{k_n}
or like
.. math::
i^d \bmod {p_1}^{k_1}{p_2}^{k_2},
i^d \bmod {p_3}^{k_3}, \dots ,
i^d \bmod {p_n}^{k_n}
as long as every moduli does not share any common divisor each
other.
The raw primes used in generating the RSA key pair can be a good
option.
Note that the speed advantage of using this is only viable for
very large cases (Like 2048-bit RSA keys) since the
overhead of using pure Python implementation of
:meth:`sympy.ntheory.modular.crt` may overcompensate the
theoritical speed advantage.
Notes
=====
See the ``Notes`` section in the documentation of
:meth:`encipher_rsa`
Examples
========
>>> from sympy.crypto.crypto import decipher_rsa, encipher_rsa
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
Public Key Encryption and Decryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> new_msg = encipher_rsa(msg, prk)
>>> new_msg
3
>>> decipher_rsa(new_msg, puk)
12
Private Key Encryption and Decryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> new_msg = encipher_rsa(msg, puk)
>>> new_msg
3
>>> decipher_rsa(new_msg, prk)
12
Decryption using chinese remainder theorem:
>>> decipher_rsa(new_msg, prk, factors=[p, q])
12
See Also
========
encipher_rsa
"""
return _encipher_decipher_rsa(i, key, factors=factors)
#################### kid krypto (kid RSA) #############################
def kid_rsa_public_key(a, b, A, B):
r"""
Kid RSA is a version of RSA useful to teach grade school children
since it does not involve exponentiation.
Explanation
===========
Alice wants to talk to Bob. Bob generates keys as follows.
Key generation:
* Select positive integers `a, b, A, B` at random.
* Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1)//M`.
* The *public key* is `(n, e)`. Bob sends these to Alice.
* The *private key* is `(n, d)`, which Bob keeps secret.
Encryption: If `p` is the plaintext message then the
ciphertext is `c = p e \pmod n`.
Decryption: If `c` is the ciphertext message then the
plaintext is `p = c d \pmod n`.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_public_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_public_key(a, b, A, B)
(369, 58)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, e
def kid_rsa_private_key(a, b, A, B):
"""
Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1) / M`. The *private key* is `d`, which Bob
keeps secret.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_private_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_private_key(a, b, A, B)
(369, 70)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, d
def encipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_kid_rsa, kid_rsa_public_key)
>>> msg = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> key = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(msg, key)
161
"""
n, e = key
return (msg*e) % n
def decipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the private key.
Examples
========
>>> from sympy.crypto.crypto import (
... kid_rsa_public_key, kid_rsa_private_key,
... decipher_kid_rsa, encipher_kid_rsa)
>>> a, b, A, B = 3, 4, 5, 6
>>> d = kid_rsa_private_key(a, b, A, B)
>>> msg = 200
>>> pub = kid_rsa_public_key(a, b, A, B)
>>> pri = kid_rsa_private_key(a, b, A, B)
>>> ct = encipher_kid_rsa(msg, pub)
>>> decipher_kid_rsa(ct, pri)
200
"""
n, d = key
return (msg*d) % n
#################### Morse Code ######################################
morse_char = {
".-": "A", "-...": "B",
"-.-.": "C", "-..": "D",
".": "E", "..-.": "F",
"--.": "G", "....": "H",
"..": "I", ".---": "J",
"-.-": "K", ".-..": "L",
"--": "M", "-.": "N",
"---": "O", ".--.": "P",
"--.-": "Q", ".-.": "R",
"...": "S", "-": "T",
"..-": "U", "...-": "V",
".--": "W", "-..-": "X",
"-.--": "Y", "--..": "Z",
"-----": "0", ".----": "1",
"..---": "2", "...--": "3",
"....-": "4", ".....": "5",
"-....": "6", "--...": "7",
"---..": "8", "----.": "9",
".-.-.-": ".", "--..--": ",",
"---...": ":", "-.-.-.": ";",
"..--..": "?", "-....-": "-",
"..--.-": "_", "-.--.": "(",
"-.--.-": ")", ".----.": "'",
"-...-": "=", ".-.-.": "+",
"-..-.": "/", ".--.-.": "@",
"...-..-": "$", "-.-.--": "!"}
char_morse = {v: k for k, v in morse_char.items()}
def encode_morse(msg, sep='|', mapping=None):
"""
Encodes a plaintext into popular Morse Code with letters
separated by ``sep`` and words by a double ``sep``.
Examples
========
>>> from sympy.crypto.crypto import encode_morse
>>> msg = 'ATTACK RIGHT FLANK'
>>> encode_morse(msg)
'.-|-|-|.-|-.-.|-.-||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-'
References
==========
.. [1] https://en.wikipedia.org/wiki/Morse_code
"""
mapping = mapping or char_morse
assert sep not in mapping
word_sep = 2*sep
mapping[" "] = word_sep
suffix = msg and msg[-1] in whitespace
# normalize whitespace
msg = (' ' if word_sep else '').join(msg.split())
# omit unmapped chars
chars = set(''.join(msg.split()))
ok = set(mapping.keys())
msg = translate(msg, None, ''.join(chars - ok))
morsestring = []
words = msg.split()
for word in words:
morseword = []
for letter in word:
morseletter = mapping[letter]
morseword.append(morseletter)
word = sep.join(morseword)
morsestring.append(word)
return word_sep.join(morsestring) + (word_sep if suffix else '')
def decode_morse(msg, sep='|', mapping=None):
"""
Decodes a Morse Code with letters separated by ``sep``
(default is '|') and words by `word_sep` (default is '||)
into plaintext.
Examples
========
>>> from sympy.crypto.crypto import decode_morse
>>> mc = '--|---|...-|.||.|.-|...|-'
>>> decode_morse(mc)
'MOVE EAST'
References
==========
.. [1] https://en.wikipedia.org/wiki/Morse_code
"""
mapping = mapping or morse_char
word_sep = 2*sep
characterstring = []
words = msg.strip(word_sep).split(word_sep)
for word in words:
letters = word.split(sep)
chars = [mapping[c] for c in letters]
word = ''.join(chars)
characterstring.append(word)
rv = " ".join(characterstring)
return rv
#################### LFSRs ##########################################
def lfsr_sequence(key, fill, n):
r"""
This function creates an LFSR sequence.
Parameters
==========
key : list
A list of finite field elements, `[c_0, c_1, \ldots, c_k].`
fill : list
The list of the initial terms of the LFSR sequence,
`[x_0, x_1, \ldots, x_k].`
n
Number of terms of the sequence that the function returns.
Returns
=======
L
The LFSR sequence defined by
`x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for
`n \leq k`.
Notes
=====
S. Golomb [G]_ gives a list of three statistical properties a
sequence of numbers `a = \{a_n\}_{n=1}^\infty`,
`a_n \in \{0,1\}`, should display to be considered
"random". Define the autocorrelation of `a` to be
.. math::
C(k) = C(k,a) = \lim_{N\rightarrow \infty} {1\over N}\sum_{n=1}^N (-1)^{a_n + a_{n+k}}.
In the case where `a` is periodic with period
`P` then this reduces to
.. math::
C(k) = {1\over P}\sum_{n=1}^P (-1)^{a_n + a_{n+k}}.
Assume `a` is periodic with period `P`.
- balance:
.. math::
\left|\sum_{n=1}^P(-1)^{a_n}\right| \leq 1.
- low autocorrelation:
.. math::
C(k) = \left\{ \begin{array}{cc} 1,& k = 0,\\ \epsilon, & k \ne 0. \end{array} \right.
(For sequences satisfying these first two properties, it is known
that `\epsilon = -1/P` must hold.)
- proportional runs property: In each period, half the runs have
length `1`, one-fourth have length `2`, etc.
Moreover, there are as many runs of `1`'s as there are of
`0`'s.
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> lfsr_sequence(key, fill, 10)
[1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2,
1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2]
References
==========
.. [G] Solomon Golomb, Shift register sequences, Aegean Park Press,
Laguna Hills, Ca, 1967
"""
if not isinstance(key, list):
raise TypeError("key must be a list")
if not isinstance(fill, list):
raise TypeError("fill must be a list")
p = key[0].mod
F = FF(p)
s = fill
k = len(fill)
L = []
for i in range(n):
s0 = s[:]
L.append(s[0])
s = s[1:k]
x = sum([int(key[i]*s0[i]) for i in range(k)])
s.append(F(x))
return L # use [x.to_int() for x in L] for int version
def lfsr_autocorrelation(L, P, k):
"""
This function computes the LFSR autocorrelation function.
Parameters
==========
L
A periodic sequence of elements of `GF(2)`.
L must have length larger than P.
P
The period of L.
k : int
An integer `k` (`0 < k < P`).
Returns
=======
autocorrelation
The k-th value of the autocorrelation of the LFSR L.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_autocorrelation)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_autocorrelation(s, 15, 7)
-1/15
>>> lfsr_autocorrelation(s, 15, 0)
1
"""
if not isinstance(L, list):
raise TypeError("L (=%s) must be a list" % L)
P = int(P)
k = int(k)
L0 = L[:P] # slices makes a copy
L1 = L0 + L0[:k]
L2 = [(-1)**(L1[i].to_int() + L1[i + k].to_int()) for i in range(P)]
tot = sum(L2)
return Rational(tot, P)
def lfsr_connection_polynomial(s):
"""
This function computes the LFSR connection polynomial.
Parameters
==========
s
A sequence of elements of even length, with entries in a finite
field.
Returns
=======
C(x)
The connection polynomial of a minimal LFSR yielding s.
This implements the algorithm in section 3 of J. L. Massey's
article [M]_.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_connection_polynomial)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**4 + x + 1
>>> fill = [F(1), F(0), F(0), F(1)]
>>> key = [F(1), F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(1), F(0)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x**2 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x + 1
References
==========
.. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127,
Jan 1969.
"""
# Initialization:
p = s[0].mod
x = Symbol("x")
C = 1*x**0
B = 1*x**0
m = 1
b = 1*x**0
L = 0
N = 0
while N < len(s):
if L > 0:
dC = Poly(C).degree()
r = min(L + 1, dC + 1)
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i)
for i in range(1, dC + 1)]
d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int()
for i in range(1, r)])) % p
if L == 0:
d = s[N].to_int()*x**0
if d == 0:
m += 1
N += 1
if d > 0:
if 2*L > N:
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
m += 1
N += 1
else:
T = C
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
L = N + 1 - L
m = 1
b = d
B = T
N += 1
dC = Poly(C).degree()
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
return sum([coeffsC[i] % p*x**i for i in range(dC + 1)
if coeffsC[i] is not None])
#################### ElGamal #############################
def elgamal_private_key(digit=10, seed=None):
r"""
Return three number tuple as private key.
Explanation
===========
Elgamal encryption is based on the mathmatical problem
called the Discrete Logarithm Problem (DLP). For example,
`a^{b} \equiv c \pmod p`
In general, if ``a`` and ``b`` are known, ``ct`` is easily
calculated. If ``b`` is unknown, it is hard to use
``a`` and ``ct`` to get ``b``.
Parameters
==========
digit : int
Minimum number of binary digits for key.
Returns
=======
tuple : (p, r, d)
p = prime number.
r = primitive root.
d = random number.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.testing.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.ntheory import is_primitive_root, isprime
>>> a, b, _ = elgamal_private_key()
>>> isprime(a)
True
>>> is_primitive_root(b, a)
True
"""
randrange = _randrange(seed)
p = nextprime(2**digit)
return p, primitive_root(p), randrange(2, p)
def elgamal_public_key(key):
r"""
Return three number tuple as public key.
Parameters
==========
key : (p, r, e)
Tuple generated by ``elgamal_private_key``.
Returns
=======
tuple : (p, r, e)
`e = r**d \bmod p`
`d` is a random number in private key.
Examples
========
>>> from sympy.crypto.crypto import elgamal_public_key
>>> elgamal_public_key((1031, 14, 636))
(1031, 14, 212)
"""
p, r, e = key
return p, r, pow(r, e, p)
def encipher_elgamal(i, key, seed=None):
r"""
Encrypt message with public key.
Explanation
===========
``i`` is a plaintext message expressed as an integer.
``key`` is public key (p, r, e). In order to encrypt
a message, a random number ``a`` in ``range(2, p)``
is generated and the encryped message is returned as
`c_{1}` and `c_{2}` where:
`c_{1} \equiv r^{a} \pmod p`
`c_{2} \equiv m e^{a} \pmod p`
Parameters
==========
msg
int of encoded message.
key
Public key.
Returns
=======
tuple : (c1, c2)
Encipher into two number.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.testing.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import encipher_elgamal, elgamal_private_key, elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3]); pri
(37, 2, 3)
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 36
>>> encipher_elgamal(msg, pub, seed=[3])
(8, 6)
"""
p, r, e = key
if i < 0 or i >= p:
raise ValueError(
'Message (%s) should be in range(%s)' % (i, p))
randrange = _randrange(seed)
a = randrange(2, p)
return pow(r, a, p), i*pow(e, a, p) % p
def decipher_elgamal(msg, key):
r"""
Decrypt message with private key.
`msg = (c_{1}, c_{2})`
`key = (p, r, d)`
According to extended Eucliden theorem,
`u c_{1}^{d} + p n = 1`
`u \equiv 1/{{c_{1}}^d} \pmod p`
`u c_{2} \equiv \frac{1}{c_{1}^d} c_{2} \equiv \frac{1}{r^{ad}} c_{2} \pmod p`
`\frac{1}{r^{ad}} m e^a \equiv \frac{1}{r^{ad}} m {r^{d a}} \equiv m \pmod p`
Examples
========
>>> from sympy.crypto.crypto import decipher_elgamal
>>> from sympy.crypto.crypto import encipher_elgamal
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.crypto.crypto import elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3])
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 17
>>> decipher_elgamal(encipher_elgamal(msg, pub), pri) == msg
True
"""
p, _, d = key
c1, c2 = msg
u = igcdex(c1**d, p)[0]
return u * c2 % p
################ Diffie-Hellman Key Exchange #########################
def dh_private_key(digit=10, seed=None):
r"""
Return three integer tuple as private key.
Explanation
===========
Diffie-Hellman key exchange is based on the mathematical problem
called the Discrete Logarithm Problem (see ElGamal).
Diffie-Hellman key exchange is divided into the following steps:
* Alice and Bob agree on a base that consist of a prime ``p``
and a primitive root of ``p`` called ``g``
* Alice choses a number ``a`` and Bob choses a number ``b`` where
``a`` and ``b`` are random numbers in range `[2, p)`. These are
their private keys.
* Alice then publicly sends Bob `g^{a} \pmod p` while Bob sends
Alice `g^{b} \pmod p`
* They both raise the received value to their secretly chosen
number (``a`` or ``b``) and now have both as their shared key
`g^{ab} \pmod p`
Parameters
==========
digit
Minimum number of binary digits required in key.
Returns
=======
tuple : (p, g, a)
p = prime number.
g = primitive root of p.
a = random number from 2 through p - 1.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.testing.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import dh_private_key
>>> from sympy.ntheory import isprime, is_primitive_root
>>> p, g, _ = dh_private_key()
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
>>> p, g, _ = dh_private_key(5)
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
"""
p = nextprime(2**digit)
g = primitive_root(p)
randrange = _randrange(seed)
a = randrange(2, p)
return p, g, a
def dh_public_key(key):
r"""
Return three number tuple as public key.
This is the tuple that Alice sends to Bob.
Parameters
==========
key : (p, g, a)
A tuple generated by ``dh_private_key``.
Returns
=======
tuple : int, int, int
A tuple of `(p, g, g^a \mod p)` with `p`, `g` and `a` given as
parameters.s
Examples
========
>>> from sympy.crypto.crypto import dh_private_key, dh_public_key
>>> p, g, a = dh_private_key();
>>> _p, _g, x = dh_public_key((p, g, a))
>>> p == _p and g == _g
True
>>> x == pow(g, a, p)
True
"""
p, g, a = key
return p, g, pow(g, a, p)
def dh_shared_key(key, b):
"""
Return an integer that is the shared key.
This is what Bob and Alice can both calculate using the public
keys they received from each other and their private keys.
Parameters
==========
key : (p, g, x)
Tuple `(p, g, x)` generated by ``dh_public_key``.
b
Random number in the range of `2` to `p - 1`
(Chosen by second key exchange member (Bob)).
Returns
=======
int
A shared key.
Examples
========
>>> from sympy.crypto.crypto import (
... dh_private_key, dh_public_key, dh_shared_key)
>>> prk = dh_private_key();
>>> p, g, x = dh_public_key(prk);
>>> sk = dh_shared_key((p, g, x), 1000)
>>> sk == pow(x, 1000, p)
True
"""
p, _, x = key
if 1 >= b or b >= p:
raise ValueError(filldedent('''
Value of b should be greater 1 and less
than prime %s.''' % p))
return pow(x, b, p)
################ Goldwasser-Micali Encryption #########################
def _legendre(a, p):
"""
Returns the legendre symbol of a and p
assuming that p is a prime.
i.e. 1 if a is a quadratic residue mod p
-1 if a is not a quadratic residue mod p
0 if a is divisible by p
Parameters
==========
a : int
The number to test.
p : prime
The prime to test ``a`` against.
Returns
=======
int
Legendre symbol (a / p).
"""
sig = pow(a, (p - 1)//2, p)
if sig == 1:
return 1
elif sig == 0:
return 0
else:
return -1
def _random_coprime_stream(n, seed=None):
randrange = _randrange(seed)
while True:
y = randrange(n)
if gcd(y, n) == 1:
yield y
def gm_private_key(p, q, a=None):
r"""
Check if ``p`` and ``q`` can be used as private keys for
the Goldwasser-Micali encryption. The method works
roughly as follows.
Explanation
===========
#. Pick two large primes $p$ and $q$.
#. Call their product $N$.
#. Given a message as an integer $i$, write $i$ in its bit representation $b_0, \dots, b_n$.
#. For each $k$,
if $b_k = 0$:
let $a_k$ be a random square
(quadratic residue) modulo $p q$
such that ``jacobi_symbol(a, p*q) = 1``
if $b_k = 1$:
let $a_k$ be a random non-square
(non-quadratic residue) modulo $p q$
such that ``jacobi_symbol(a, p*q) = 1``
returns $\left[a_1, a_2, \dots\right]$
$b_k$ can be recovered by checking whether or not
$a_k$ is a residue. And from the $b_k$'s, the message
can be reconstructed.
The idea is that, while ``jacobi_symbol(a, p*q)``
can be easily computed (and when it is equal to $-1$ will
tell you that $a$ is not a square mod $p q$), quadratic
residuosity modulo a composite number is hard to compute
without knowing its factorization.
Moreover, approximately half the numbers coprime to $p q$ have
:func:`~.jacobi_symbol` equal to $1$ . And among those, approximately half
are residues and approximately half are not. This maximizes the
entropy of the code.
Parameters
==========
p, q, a
Initialization variables.
Returns
=======
tuple : (p, q)
The input value ``p`` and ``q``.
Raises
======
ValueError
If ``p`` and ``q`` are not distinct odd primes.
"""
if p == q:
raise ValueError("expected distinct primes, "
"got two copies of %i" % p)
elif not isprime(p) or not isprime(q):
raise ValueError("first two arguments must be prime, "
"got %i of %i" % (p, q))
elif p == 2 or q == 2:
raise ValueError("first two arguments must not be even, "
"got %i of %i" % (p, q))
return p, q
def gm_public_key(p, q, a=None, seed=None):
"""
Compute public keys for ``p`` and ``q``.
Note that in Goldwasser-Micali Encryption,
public keys are randomly selected.
Parameters
==========
p, q, a : int, int, int
Initialization variables.
Returns
=======
tuple : (a, N)
``a`` is the input ``a`` if it is not ``None`` otherwise
some random integer coprime to ``p`` and ``q``.
``N`` is the product of ``p`` and ``q``.
"""
p, q = gm_private_key(p, q)
N = p * q
if a is None:
randrange = _randrange(seed)
while True:
a = randrange(N)
if _legendre(a, p) == _legendre(a, q) == -1:
break
else:
if _legendre(a, p) != -1 or _legendre(a, q) != -1:
return False
return (a, N)
def encipher_gm(i, key, seed=None):
"""
Encrypt integer 'i' using public_key 'key'
Note that gm uses random encryption.
Parameters
==========
i : int
The message to encrypt.
key : (a, N)
The public key.
Returns
=======
list : list of int
The randomized encrypted message.
"""
if i < 0:
raise ValueError(
"message must be a non-negative "
"integer: got %d instead" % i)
a, N = key
bits = []
while i > 0:
bits.append(i % 2)
i //= 2
gen = _random_coprime_stream(N, seed)
rev = reversed(bits)
encode = lambda b: next(gen)**2*pow(a, b) % N
return [ encode(b) for b in rev ]
def decipher_gm(message, key):
"""
Decrypt message 'message' using public_key 'key'.
Parameters
==========
message : list of int
The randomized encrypted message.
key : (p, q)
The private key.
Returns
=======
int
The encrypted message.
"""
p, q = key
res = lambda m, p: _legendre(m, p) > 0
bits = [res(m, p) * res(m, q) for m in message]
m = 0
for b in bits:
m <<= 1
m += not b
return m
########### RailFence Cipher #############
def encipher_railfence(message,rails):
"""
Performs Railfence Encryption on plaintext and returns ciphertext
Examples
========
>>> from sympy.crypto.crypto import encipher_railfence
>>> message = "hello world"
>>> encipher_railfence(message,3)
'horel ollwd'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Encrypted string message.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rail_fence_cipher
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
return ''.join(sorted(message, key=lambda i: next(p)))
def decipher_railfence(ciphertext,rails):
"""
Decrypt the message using the given rails
Examples
========
>>> from sympy.crypto.crypto import decipher_railfence
>>> decipher_railfence("horel ollwd",3)
'hello world'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Decrypted string message.
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
idx = sorted(range(len(ciphertext)), key=lambda i: next(p))
res = [''] * len(ciphertext)
for i, c in zip(idx, ciphertext):
res[i] = c
return ''.join(res)
################ Blum-Goldwasser cryptosystem #########################
def bg_private_key(p, q):
"""
Check if p and q can be used as private keys for
the Blum-Goldwasser cryptosystem.
Explanation
===========
The three necessary checks for p and q to pass
so that they can be used as private keys:
1. p and q must both be prime
2. p and q must be distinct
3. p and q must be congruent to 3 mod 4
Parameters
==========
p, q
The keys to be checked.
Returns
=======
p, q
Input values.
Raises
======
ValueError
If p and q do not pass the above conditions.
"""
if not isprime(p) or not isprime(q):
raise ValueError("the two arguments must be prime, "
"got %i and %i" %(p, q))
elif p == q:
raise ValueError("the two arguments must be distinct, "
"got two copies of %i. " %p)
elif (p - 3) % 4 != 0 or (q - 3) % 4 != 0:
raise ValueError("the two arguments must be congruent to 3 mod 4, "
"got %i and %i" %(p, q))
return p, q
def bg_public_key(p, q):
"""
Calculates public keys from private keys.
Explanation
===========
The function first checks the validity of
private keys passed as arguments and
then returns their product.
Parameters
==========
p, q
The private keys.
Returns
=======
N
The public key.
"""
p, q = bg_private_key(p, q)
N = p * q
return N
def encipher_bg(i, key, seed=None):
"""
Encrypts the message using public key and seed.
Explanation
===========
ALGORITHM:
1. Encodes i as a string of L bits, m.
2. Select a random element r, where 1 < r < key, and computes
x = r^2 mod key.
3. Use BBS pseudo-random number generator to generate L random bits, b,
using the initial seed as x.
4. Encrypted message, c_i = m_i XOR b_i, 1 <= i <= L.
5. x_L = x^(2^L) mod key.
6. Return (c, x_L)
Parameters
==========
i
Message, a non-negative integer
key
The public key
Returns
=======
Tuple
(encrypted_message, x_L)
Raises
======
ValueError
If i is negative.
"""
if i < 0:
raise ValueError(
"message must be a non-negative "
"integer: got %d instead" % i)
enc_msg = []
while i > 0:
enc_msg.append(i % 2)
i //= 2
enc_msg.reverse()
L = len(enc_msg)
r = _randint(seed)(2, key - 1)
x = r**2 % key
x_L = pow(int(x), int(2**L), int(key))
rand_bits = []
for _ in range(L):
rand_bits.append(x % 2)
x = x**2 % key
encrypt_msg = [m ^ b for (m, b) in zip(enc_msg, rand_bits)]
return (encrypt_msg, x_L)
def decipher_bg(message, key):
"""
Decrypts the message using private keys.
Explanation
===========
ALGORITHM:
1. Let, c be the encrypted message, y the second number received,
and p and q be the private keys.
2. Compute, r_p = y^((p+1)/4 ^ L) mod p and
r_q = y^((q+1)/4 ^ L) mod q.
3. Compute x_0 = (q(q^-1 mod p)r_p + p(p^-1 mod q)r_q) mod N.
4. From, recompute the bits using the BBS generator, as in the
encryption algorithm.
5. Compute original message by XORing c and b.
Parameters
==========
message
Tuple of encrypted message and a non-negative integer.
key
Tuple of private keys.
Returns
=======
orig_msg
The original message
"""
p, q = key
encrypt_msg, y = message
public_key = p * q
L = len(encrypt_msg)
p_t = ((p + 1)/4)**L
q_t = ((q + 1)/4)**L
r_p = pow(int(y), int(p_t), int(p))
r_q = pow(int(y), int(q_t), int(q))
x = (q * mod_inverse(q, p) * r_p + p * mod_inverse(p, q) * r_q) % public_key
orig_bits = []
for _ in range(L):
orig_bits.append(x % 2)
x = x**2 % public_key
orig_msg = 0
for (m, b) in zip(encrypt_msg, orig_bits):
orig_msg = orig_msg * 2
orig_msg += (m ^ b)
return orig_msg
|
4e6081d2bc61dfc01334199b518ff674dead581ec25dc7ed8e1cbd35c1fc0aa2 | from typing import Dict as tDict, Callable
from sympy.core import S, Add, Expr, Basic, Mul, Pow, Rational
from sympy.core.logic import fuzzy_not
from sympy.logic.boolalg import Boolean
from sympy.assumptions import ask, Q # type: ignore
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Explanation
===========
Unlike :func:`~.simplify()` which performs structural simplification
without any assumption, this function transforms the expression into
the form which is only valid under certain assumptions. Note that
``simplify()`` is generally not done in refining process.
Refining boolean expression involves reducing it to ``S.true`` or
``S.false``. Unlike :func:`~.ask()`, the expression will not be reduced
if the truth value cannot be determined.
Examples
========
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
>>> refine(Q.real(x), Q.positive(x))
True
>>> refine(Q.positive(x), Q.real(x))
Q.positive(x)
See Also
========
sympy.simplify.simplify.simplify : Structural simplification without assumptions.
sympy.assumptions.ask.ask : Query for boolean expressions using assumptions.
"""
if not isinstance(expr, Basic):
return expr
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
if hasattr(expr, '_eval_refine'):
ref_expr = expr._eval_refine(assumptions)
if ref_expr is not None:
return ref_expr
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
from sympy.functions.elementary.complexes import Abs
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
# arg is Mul
if isinstance(arg, Mul):
r = [refine(abs(a), assumptions) for a in arg.args]
non_abs = []
in_abs = []
for i in r:
if isinstance(i, Abs):
in_abs.append(i.args[0])
else:
non_abs.append(i)
return Mul(*non_abs) * Abs(Mul(*in_abs))
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if isinstance(expr.base, Pow):
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set()
odd_terms = set()
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_atan2(expr, assumptions):
"""
Handler for the atan2 function.
Examples
========
>>> from sympy import Q, atan2
>>> from sympy.assumptions.refine import refine_atan2
>>> from sympy.abc import x, y
>>> refine_atan2(atan2(y,x), Q.real(y) & Q.positive(x))
atan(y/x)
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.negative(x))
atan(y/x) - pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.negative(x))
atan(y/x) + pi
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.negative(x))
pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.zero(x))
pi/2
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.zero(x))
-pi/2
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.zero(x))
nan
"""
from sympy.functions.elementary.trigonometric import atan
y, x = expr.args
if ask(Q.real(y) & Q.positive(x), assumptions):
return atan(y / x)
elif ask(Q.negative(y) & Q.negative(x), assumptions):
return atan(y / x) - S.Pi
elif ask(Q.positive(y) & Q.negative(x), assumptions):
return atan(y / x) + S.Pi
elif ask(Q.zero(y) & Q.negative(x), assumptions):
return S.Pi
elif ask(Q.positive(y) & Q.zero(x), assumptions):
return S.Pi/2
elif ask(Q.negative(y) & Q.zero(x), assumptions):
return -S.Pi/2
elif ask(Q.zero(y) & Q.zero(x), assumptions):
return S.NaN
else:
return expr
def refine_re(expr, assumptions):
"""
Handler for real part.
Examples
========
>>> from sympy.assumptions.refine import refine_re
>>> from sympy import Q, re
>>> from sympy.abc import x
>>> refine_re(re(x), Q.real(x))
x
>>> refine_re(re(x), Q.imaginary(x))
0
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions):
return arg
if ask(Q.imaginary(arg), assumptions):
return S.Zero
return _refine_reim(expr, assumptions)
def refine_im(expr, assumptions):
"""
Handler for imaginary part.
Explanation
===========
>>> from sympy.assumptions.refine import refine_im
>>> from sympy import Q, im
>>> from sympy.abc import x
>>> refine_im(im(x), Q.real(x))
0
>>> refine_im(im(x), Q.imaginary(x))
-I*x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions):
return S.Zero
if ask(Q.imaginary(arg), assumptions):
return - S.ImaginaryUnit * arg
return _refine_reim(expr, assumptions)
def refine_arg(expr, assumptions):
"""
Handler for complex argument
Explanation
===========
>>> from sympy.assumptions.refine import refine_arg
>>> from sympy import Q, arg
>>> from sympy.abc import x
>>> refine_arg(arg(x), Q.positive(x))
0
>>> refine_arg(arg(x), Q.negative(x))
pi
"""
rg = expr.args[0]
if ask(Q.positive(rg), assumptions):
return S.Zero
if ask(Q.negative(rg), assumptions):
return S.Pi
return None
def _refine_reim(expr, assumptions):
# Helper function for refine_re & refine_im
expanded = expr.expand(complex = True)
if expanded != expr:
refined = refine(expanded, assumptions)
if refined != expanded:
return refined
# Best to leave the expression as is
return None
def refine_sign(expr, assumptions):
"""
Handler for sign.
Examples
========
>>> from sympy.assumptions.refine import refine_sign
>>> from sympy import Symbol, Q, sign, im
>>> x = Symbol('x', real = True)
>>> expr = sign(x)
>>> refine_sign(expr, Q.positive(x) & Q.nonzero(x))
1
>>> refine_sign(expr, Q.negative(x) & Q.nonzero(x))
-1
>>> refine_sign(expr, Q.zero(x))
0
>>> y = Symbol('y', imaginary = True)
>>> expr = sign(y)
>>> refine_sign(expr, Q.positive(im(y)))
I
>>> refine_sign(expr, Q.negative(im(y)))
-I
"""
arg = expr.args[0]
if ask(Q.zero(arg), assumptions):
return S.Zero
if ask(Q.real(arg)):
if ask(Q.positive(arg), assumptions):
return S.One
if ask(Q.negative(arg), assumptions):
return S.NegativeOne
if ask(Q.imaginary(arg)):
arg_re, arg_im = arg.as_real_imag()
if ask(Q.positive(arg_im), assumptions):
return S.ImaginaryUnit
if ask(Q.negative(arg_im), assumptions):
return -S.ImaginaryUnit
return expr
def refine_matrixelement(expr, assumptions):
"""
Handler for symmetric part.
Examples
========
>>> from sympy.assumptions.refine import refine_matrixelement
>>> from sympy import MatrixSymbol, Q
>>> X = MatrixSymbol('X', 3, 3)
>>> refine_matrixelement(X[0, 1], Q.symmetric(X))
X[0, 1]
>>> refine_matrixelement(X[1, 0], Q.symmetric(X))
X[0, 1]
"""
from sympy.matrices.expressions.matexpr import MatrixElement
matrix, i, j = expr.args
if ask(Q.symmetric(matrix), assumptions):
if (i - j).could_extract_minus_sign():
return expr
return MatrixElement(matrix, j, i)
handlers_dict = {
'Abs': refine_abs,
'Pow': refine_Pow,
'atan2': refine_atan2,
're': refine_re,
'im': refine_im,
'arg': refine_arg,
'sign': refine_sign,
'MatrixElement': refine_matrixelement
} # type: tDict[str, Callable[[Expr, Boolean], Expr]]
|
4d70d722cae2d6d771b118bf5fae0da40def89db78ecf4a8f758b5ec06190f7e | """
Module to evaluate the proposition with assumptions using SAT algorithm.
"""
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.assumptions.ask_generated import get_all_known_facts
from sympy.assumptions.assume import global_assumptions, AppliedPredicate
from sympy.assumptions.sathandlers import class_fact_registry
from sympy.core import oo
from sympy.logic.inference import satisfiable
from sympy.assumptions.cnf import CNF, EncodedCNF
def satask(proposition, assumptions=True, context=global_assumptions,
use_known_facts=True, iterations=oo):
"""
Function to evaluate the proposition with assumptions using SAT algorithm.
This function extracts every fact relevant to the expressions composing
proposition and assumptions. For example, if a predicate containing
``Abs(x)`` is proposed, then ``Q.zero(Abs(x)) | Q.positive(Abs(x))``
will be found and passed to SAT solver because ``Q.nonnegative`` is
registered as a fact for ``Abs``.
Proposition is evaluated to ``True`` or ``False`` if the truth value can be
determined. If not, ``None`` is returned.
Parameters
==========
proposition : Any boolean expression.
Proposition which will be evaluated to boolean value.
assumptions : Any boolean expression, optional.
Local assumptions to evaluate the *proposition*.
context : AssumptionsContext, optional.
Default assumptions to evaluate the *proposition*. By default,
this is ``sympy.assumptions.global_assumptions`` variable.
use_known_facts : bool, optional.
If ``True``, facts from ``sympy.assumptions.ask_generated``
module are passed to SAT solver as well.
iterations : int, optional.
Number of times that relevant facts are recursively extracted.
Default is infinite times until no new fact is found.
Returns
=======
``True``, ``False``, or ``None``
Examples
========
>>> from sympy import Abs, Q
>>> from sympy.assumptions.satask import satask
>>> from sympy.abc import x
>>> satask(Q.zero(Abs(x)), Q.zero(x))
True
"""
props = CNF.from_prop(proposition)
_props = CNF.from_prop(~proposition)
assumptions = CNF.from_prop(assumptions)
context_cnf = CNF()
if context:
context_cnf = context_cnf.extend(context)
sat = get_all_relevant_facts(props, assumptions, context_cnf,
use_known_facts=use_known_facts, iterations=iterations)
sat.add_from_cnf(assumptions)
if context:
sat.add_from_cnf(context_cnf)
return check_satisfiability(props, _props, sat)
def check_satisfiability(prop, _prop, factbase):
sat_true = factbase.copy()
sat_false = factbase.copy()
sat_true.add_from_cnf(prop)
sat_false.add_from_cnf(_prop)
can_be_true = satisfiable(sat_true)
can_be_false = satisfiable(sat_false)
if can_be_true and can_be_false:
return None
if can_be_true and not can_be_false:
return True
if not can_be_true and can_be_false:
return False
if not can_be_true and not can_be_false:
# TODO: Run additional checks to see which combination of the
# assumptions, global_assumptions, and relevant_facts are
# inconsistent.
raise ValueError("Inconsistent assumptions")
def extract_predargs(proposition, assumptions=None, context=None):
"""
Extract every expression in the argument of predicates from *proposition*,
*assumptions* and *context*.
Parameters
==========
proposition : sympy.assumptions.cnf.CNF
assumptions : sympy.assumptions.cnf.CNF, optional.
context : sympy.assumptions.cnf.CNF, optional.
CNF generated from assumptions context.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.assumptions.cnf import CNF
>>> from sympy.assumptions.satask import extract_predargs
>>> from sympy.abc import x, y
>>> props = CNF.from_prop(Q.zero(Abs(x*y)))
>>> assump = CNF.from_prop(Q.zero(x) & Q.zero(y))
>>> extract_predargs(props, assump)
{x, y, Abs(x*y)}
"""
req_keys = find_symbols(proposition)
keys = proposition.all_predicates()
# XXX: We need this since True/False are not Basic
lkeys = set()
if assumptions:
lkeys |= assumptions.all_predicates()
if context:
lkeys |= context.all_predicates()
lkeys = lkeys - {S.true, S.false}
tmp_keys = None
while tmp_keys != set():
tmp = set()
for l in lkeys:
syms = find_symbols(l)
if (syms & req_keys) != set():
tmp |= syms
tmp_keys = tmp - req_keys
req_keys |= tmp_keys
keys |= {l for l in lkeys if find_symbols(l) & req_keys != set()}
exprs = set()
for key in keys:
if isinstance(key, AppliedPredicate):
exprs |= set(key.arguments)
else:
exprs.add(key)
return exprs
def find_symbols(pred):
"""
Find every :obj:`~.Symbol` in *pred*.
Parameters
==========
pred : sympy.assumptions.cnf.CNF, or any Expr.
"""
if isinstance(pred, CNF):
symbols = set()
for a in pred.all_predicates():
symbols |= find_symbols(a)
return symbols
return pred.atoms(Symbol)
def get_relevant_clsfacts(exprs, relevant_facts=None):
"""
Extract relevant facts from the items in *exprs*. Facts are defined in
``assumptions.sathandlers`` module.
This function is recursively called by ``get_all_relevant_facts()``.
Parameters
==========
exprs : set
Expressions whose relevant facts are searched.
relevant_facts : sympy.assumptions.cnf.CNF, optional.
Pre-discovered relevant facts.
Returns
=======
exprs : set
Candidates for next relevant fact searching.
relevant_facts : sympy.assumptions.cnf.CNF
Updated relevant facts.
Examples
========
Here, we will see how facts relevant to ``Abs(x*y)`` are recursively
extracted. On the first run, set containing the expression is passed
without pre-discovered relevant facts. The result is a set containig
candidates for next run, and ``CNF()`` instance containing facts
which are relevant to ``Abs`` and its argument.
>>> from sympy import Abs
>>> from sympy.assumptions.satask import get_relevant_clsfacts
>>> from sympy.abc import x, y
>>> exprs = {Abs(x*y)}
>>> exprs, facts = get_relevant_clsfacts(exprs)
>>> exprs
{x*y}
>>> facts.clauses #doctest: +SKIP
{frozenset({Literal(Q.odd(Abs(x*y)), False), Literal(Q.odd(x*y), True)}),
frozenset({Literal(Q.zero(Abs(x*y)), False), Literal(Q.zero(x*y), True)}),
frozenset({Literal(Q.even(Abs(x*y)), False), Literal(Q.even(x*y), True)}),
frozenset({Literal(Q.zero(Abs(x*y)), True), Literal(Q.zero(x*y), False)}),
frozenset({Literal(Q.even(Abs(x*y)), False),
Literal(Q.odd(Abs(x*y)), False),
Literal(Q.odd(x*y), True)}),
frozenset({Literal(Q.even(Abs(x*y)), False),
Literal(Q.even(x*y), True),
Literal(Q.odd(Abs(x*y)), False)}),
frozenset({Literal(Q.positive(Abs(x*y)), False),
Literal(Q.zero(Abs(x*y)), False)})}
We pass the first run's results to the second run, and get the expressions
for next run and updated facts.
>>> exprs, facts = get_relevant_clsfacts(exprs, relevant_facts=facts)
>>> exprs
{x, y}
On final run, no more candidate is returned thus we know that all
relevant facts are successfully retrieved.
>>> exprs, facts = get_relevant_clsfacts(exprs, relevant_facts=facts)
>>> exprs
set()
"""
if not relevant_facts:
relevant_facts = CNF()
newexprs = set()
for expr in exprs:
for fact in class_fact_registry(expr):
newfact = CNF.to_CNF(fact)
relevant_facts = relevant_facts._and(newfact)
for key in newfact.all_predicates():
if isinstance(key, AppliedPredicate):
newexprs |= set(key.arguments)
return newexprs - exprs, relevant_facts
def get_all_relevant_facts(proposition, assumptions, context,
use_known_facts=True, iterations=oo):
"""
Extract all relevant facts from *proposition* and *assumptions*.
This function extracts the facts by recursively calling
``get_relevant_clsfacts()``. Extracted facts are converted to
``EncodedCNF`` and returned.
Parameters
==========
proposition : sympy.assumptions.cnf.CNF
CNF generated from proposition expression.
assumptions : sympy.assumptions.cnf.CNF
CNF generated from assumption expression.
context : sympy.assumptions.cnf.CNF
CNF generated from assumptions context.
use_known_facts : bool, optional.
If ``True``, facts from ``sympy.assumptions.ask_generated``
module are encoded as well.
iterations : int, optional.
Number of times that relevant facts are recursively extracted.
Default is infinite times until no new fact is found.
Returns
=======
sympy.assumptions.cnf.EncodedCNF
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.cnf import CNF
>>> from sympy.assumptions.satask import get_all_relevant_facts
>>> from sympy.abc import x, y
>>> props = CNF.from_prop(Q.nonzero(x*y))
>>> assump = CNF.from_prop(Q.nonzero(x))
>>> context = CNF.from_prop(Q.nonzero(y))
>>> get_all_relevant_facts(props, assump, context) #doctest: +SKIP
<sympy.assumptions.cnf.EncodedCNF at 0x7f09faa6ccd0>
"""
# The relevant facts might introduce new keys, e.g., Q.zero(x*y) will
# introduce the keys Q.zero(x) and Q.zero(y), so we need to run it until
# we stop getting new things. Hopefully this strategy won't lead to an
# infinite loop in the future.
i = 0
relevant_facts = CNF()
all_exprs = set()
while True:
if i == 0:
exprs = extract_predargs(proposition, assumptions, context)
all_exprs |= exprs
exprs, relevant_facts = get_relevant_clsfacts(exprs, relevant_facts)
i += 1
if i >= iterations:
break
if not exprs:
break
if use_known_facts:
known_facts_CNF = CNF()
known_facts_CNF.add_clauses(get_all_known_facts())
kf_encoded = EncodedCNF()
kf_encoded.from_cnf(known_facts_CNF)
def translate_literal(lit, delta):
if lit > 0:
return lit + delta
else:
return lit - delta
def translate_data(data, delta):
return [{translate_literal(i, delta) for i in clause} for clause in data]
data = []
symbols = []
n_lit = len(kf_encoded.symbols)
for i, expr in enumerate(all_exprs):
symbols += [pred(expr) for pred in kf_encoded.symbols]
data += translate_data(kf_encoded.data, i * n_lit)
encoding = dict(list(zip(symbols, range(1, len(symbols)+1))))
ctx = EncodedCNF(data, encoding)
else:
ctx = EncodedCNF()
ctx.add_from_cnf(relevant_facts)
return ctx
|
9204f439a60170898d25147ef1e2311db515e334e3e3f255eb39867e74050d32 | """
Known facts in assumptions module.
This module defines the facts between unary predicates in ``get_known_facts()``,
and supports functions to generate the contents in
``sympy.assumptions.ask_generated`` file.
"""
from sympy.assumptions import Q
from sympy.assumptions.assume import AppliedPredicate
from sympy.core.cache import cacheit
from sympy.core.symbol import Symbol
from sympy.logic.boolalg import (to_cnf, And, Not, Implies, Equivalent,
Exclusive,)
from sympy.logic.inference import satisfiable
@cacheit
def get_composite_predicates():
# To reduce the complexity of sat solver, these predicates are
# transformed into the combination of primitive predicates.
return {
Q.real : Q.negative | Q.zero | Q.positive,
Q.integer : Q.even | Q.odd,
Q.nonpositive : Q.negative | Q.zero,
Q.nonzero : Q.negative | Q.positive,
Q.nonnegative : Q.zero | Q.positive,
Q.extended_real : Q.negative_infinite | Q.negative | Q.zero | Q.positive | Q.positive_infinite,
Q.extended_positive: Q.positive | Q.positive_infinite,
Q.extended_negative: Q.negative | Q.negative_infinite,
Q.extended_nonzero: Q.negative_infinite | Q.negative | Q.positive | Q.positive_infinite,
Q.extended_nonpositive: Q.negative_infinite | Q.negative | Q.zero,
Q.extended_nonnegative: Q.zero | Q.positive | Q.positive_infinite,
Q.complex : Q.algebraic | Q.transcendental
}
@cacheit
def get_known_facts(x=None):
"""
Facts between unary predicates.
Parameters
==========
x : Symbol, optional
Placeholder symbol for unary facts. Default is ``Symbol('x')``.
Returns
=======
fact : Known facts in conjugated normal form.
"""
if x is None:
x = Symbol('x')
fact = And(
# primitive predicates for extended real exclude each other.
Exclusive(Q.negative_infinite(x), Q.negative(x), Q.zero(x),
Q.positive(x), Q.positive_infinite(x)),
# build complex plane
Exclusive(Q.real(x), Q.imaginary(x)),
Implies(Q.real(x) | Q.imaginary(x), Q.complex(x)),
# other subsets of complex
Exclusive(Q.transcendental(x), Q.algebraic(x)),
Equivalent(Q.real(x), Q.rational(x) | Q.irrational(x)),
Exclusive(Q.irrational(x), Q.rational(x)),
Implies(Q.rational(x), Q.algebraic(x)),
# integers
Exclusive(Q.even(x), Q.odd(x)),
Implies(Q.integer(x), Q.rational(x)),
Implies(Q.zero(x), Q.even(x)),
Exclusive(Q.composite(x), Q.prime(x)),
Implies(Q.composite(x) | Q.prime(x), Q.integer(x) & Q.positive(x)),
Implies(Q.even(x) & Q.positive(x) & ~Q.prime(x), Q.composite(x)),
# hermitian and antihermitian
Implies(Q.real(x), Q.hermitian(x)),
Implies(Q.imaginary(x), Q.antihermitian(x)),
Implies(Q.zero(x), Q.hermitian(x) | Q.antihermitian(x)),
# define finity and infinity, and build extended real line
Exclusive(Q.infinite(x), Q.finite(x)),
Implies(Q.complex(x), Q.finite(x)),
Implies(Q.negative_infinite(x) | Q.positive_infinite(x), Q.infinite(x)),
# commutativity
Implies(Q.finite(x) | Q.infinite(x), Q.commutative(x)),
# matrices
Implies(Q.orthogonal(x), Q.positive_definite(x)),
Implies(Q.orthogonal(x), Q.unitary(x)),
Implies(Q.unitary(x) & Q.real_elements(x), Q.orthogonal(x)),
Implies(Q.unitary(x), Q.normal(x)),
Implies(Q.unitary(x), Q.invertible(x)),
Implies(Q.normal(x), Q.square(x)),
Implies(Q.diagonal(x), Q.normal(x)),
Implies(Q.positive_definite(x), Q.invertible(x)),
Implies(Q.diagonal(x), Q.upper_triangular(x)),
Implies(Q.diagonal(x), Q.lower_triangular(x)),
Implies(Q.lower_triangular(x), Q.triangular(x)),
Implies(Q.upper_triangular(x), Q.triangular(x)),
Implies(Q.triangular(x), Q.upper_triangular(x) | Q.lower_triangular(x)),
Implies(Q.upper_triangular(x) & Q.lower_triangular(x), Q.diagonal(x)),
Implies(Q.diagonal(x), Q.symmetric(x)),
Implies(Q.unit_triangular(x), Q.triangular(x)),
Implies(Q.invertible(x), Q.fullrank(x)),
Implies(Q.invertible(x), Q.square(x)),
Implies(Q.symmetric(x), Q.square(x)),
Implies(Q.fullrank(x) & Q.square(x), Q.invertible(x)),
Equivalent(Q.invertible(x), ~Q.singular(x)),
Implies(Q.integer_elements(x), Q.real_elements(x)),
Implies(Q.real_elements(x), Q.complex_elements(x)),
)
return fact
def generate_known_facts_dict(keys, fact):
"""
Computes and returns a dictionary which contains the relations between
unary predicates.
Each key is a predicate, and item is two groups of predicates.
First group contains the predicates which are implied by the key, and
second group contains the predicates which are rejected by the key.
All predicates in *keys* and *fact* must be unary and have same placeholder
symbol.
Parameters
==========
keys : list of AppliedPredicate instances.
fact : Fact between predicates in conjugated normal form.
Examples
========
>>> from sympy import Q, And, Implies
>>> from sympy.assumptions.facts import generate_known_facts_dict
>>> from sympy.abc import x
>>> keys = [Q.even(x), Q.odd(x), Q.zero(x)]
>>> fact = And(Implies(Q.even(x), ~Q.odd(x)),
... Implies(Q.zero(x), Q.even(x)))
>>> generate_known_facts_dict(keys, fact)
{Q.even: ({Q.even}, {Q.odd}),
Q.odd: ({Q.odd}, {Q.even, Q.zero}),
Q.zero: ({Q.even, Q.zero}, {Q.odd})}
"""
fact_cnf = to_cnf(fact)
mapping = single_fact_lookup(keys, fact_cnf)
ret = {}
for key, value in mapping.items():
implied = set()
rejected = set()
for expr in value:
if isinstance(expr, AppliedPredicate):
implied.add(expr.function)
elif isinstance(expr, Not):
pred = expr.args[0]
rejected.add(pred.function)
ret[key.function] = (implied, rejected)
return ret
@cacheit
def get_known_facts_keys():
"""
Return every unary predicates registered to ``Q``.
This function is used to generate the keys for
``generate_known_facts_dict``.
"""
exclude = set()
for pred in [Q.eq, Q.ne, Q.gt, Q.lt, Q.ge, Q.le]:
# exclude polyadic predicates
exclude.add(pred)
result = []
for attr in Q.__class__.__dict__:
if attr.startswith('__'):
continue
pred = getattr(Q, attr)
if pred in exclude:
continue
result.append(pred)
return result
def single_fact_lookup(known_facts_keys, known_facts_cnf):
# Return the dictionary for quick lookup of single fact
mapping = {}
for key in known_facts_keys:
mapping[key] = {key}
for other_key in known_facts_keys:
if other_key != key:
if ask_full_inference(other_key, key, known_facts_cnf):
mapping[key].add(other_key)
if ask_full_inference(~other_key, key, known_facts_cnf):
mapping[key].add(~other_key)
return mapping
def ask_full_inference(proposition, assumptions, known_facts_cnf):
"""
Method for inferring properties about objects.
"""
if not satisfiable(And(known_facts_cnf, assumptions, proposition)):
return False
if not satisfiable(And(known_facts_cnf, assumptions, Not(proposition))):
return True
return None
|
0a8aad131f959234b0cfe3a26b87032390169825808e0aca9d172633f098d672 | """
The classes used here are for the internal use of assumptions system
only and should not be used anywhere else as these do not possess the
signatures common to SymPy objects. For general use of logic constructs
please refer to sympy.logic classes And, Or, Not, etc.
"""
from itertools import combinations, product
from sympy.core.singleton import S
from sympy.logic.boolalg import (Equivalent, ITE, Implies, Nand, Nor, Xor)
from sympy.core.relational import Eq, Ne, Gt, Lt, Ge, Le
from sympy.logic.boolalg import Or, And, Not, Xnor
from itertools import zip_longest
class Literal:
"""
The smallest element of a CNF object.
Parameters
==========
lit : Boolean expression
is_Not : bool
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.cnf import Literal
>>> from sympy.abc import x
>>> Literal(Q.even(x))
Literal(Q.even(x), False)
>>> Literal(~Q.even(x))
Literal(Q.even(x), True)
"""
def __new__(cls, lit, is_Not=False):
if isinstance(lit, Not):
lit = lit.args[0]
is_Not = True
elif isinstance(lit, (AND, OR, Literal)):
return ~lit if is_Not else lit
obj = super().__new__(cls)
obj.lit = lit
obj.is_Not = is_Not
return obj
@property
def arg(self):
return self.lit
def rcall(self, expr):
if callable(self.lit):
lit = self.lit(expr)
else:
try:
lit = self.lit.apply(expr)
except AttributeError:
lit = self.lit.rcall(expr)
return type(self)(lit, self.is_Not)
def __invert__(self):
is_Not = not self.is_Not
return Literal(self.lit, is_Not)
def __str__(self):
return '{}({}, {})'.format(type(self).__name__, self.lit, self.is_Not)
__repr__ = __str__
def __eq__(self, other):
return self.arg == other.arg and self.is_Not == other.is_Not
def __hash__(self):
h = hash((type(self).__name__, self.arg, self.is_Not))
return h
class OR:
"""
A low-level implementation for Or
"""
def __init__(self, *args):
self._args = args
@property
def args(self):
return sorted(self._args, key=str)
def rcall(self, expr):
return type(self)(*[arg.rcall(expr)
for arg in self._args
])
def __invert__(self):
return AND(*[~arg for arg in self._args])
def __hash__(self):
return hash((type(self).__name__,) + tuple(self.args))
def __eq__(self, other):
return self.args == other.args
def __str__(self):
s = '(' + ' | '.join([str(arg) for arg in self.args]) + ')'
return s
__repr__ = __str__
class AND:
"""
A low-level implementation for And
"""
def __init__(self, *args):
self._args = args
def __invert__(self):
return OR(*[~arg for arg in self._args])
@property
def args(self):
return sorted(self._args, key=str)
def rcall(self, expr):
return type(self)(*[arg.rcall(expr)
for arg in self._args
])
def __hash__(self):
return hash((type(self).__name__,) + tuple(self.args))
def __eq__(self, other):
return self.args == other.args
def __str__(self):
s = '('+' & '.join([str(arg) for arg in self.args])+')'
return s
__repr__ = __str__
def to_NNF(expr, composite_map=None):
"""
Generates the Negation Normal Form of any boolean expression in terms
of AND, OR, and Literal objects.
Examples
========
>>> from sympy import Q, Eq
>>> from sympy.assumptions.cnf import to_NNF
>>> from sympy.abc import x, y
>>> expr = Q.even(x) & ~Q.positive(x)
>>> to_NNF(expr)
(Literal(Q.even(x), False) & Literal(Q.positive(x), True))
Supported boolean objects are converted to corresponding predicates.
>>> to_NNF(Eq(x, y))
Literal(Q.eq(x, y), False)
If ``composite_map`` argument is given, ``to_NNF`` decomposes the
specified predicate into a combination of primitive predicates.
>>> cmap = {Q.nonpositive: Q.negative | Q.zero}
>>> to_NNF(Q.nonpositive, cmap)
(Literal(Q.negative, False) | Literal(Q.zero, False))
>>> to_NNF(Q.nonpositive(x), cmap)
(Literal(Q.negative(x), False) | Literal(Q.zero(x), False))
"""
from sympy.assumptions.ask import Q
from sympy.assumptions.assume import AppliedPredicate, Predicate
if composite_map is None:
composite_map = dict()
binrelpreds = {Eq: Q.eq, Ne: Q.ne, Gt: Q.gt, Lt: Q.lt, Ge: Q.ge, Le: Q.le}
if type(expr) in binrelpreds:
pred = binrelpreds[type(expr)]
expr = pred(*expr.args)
if isinstance(expr, Not):
arg = expr.args[0]
tmp = to_NNF(arg, composite_map) # Strategy: negate the NNF of expr
return ~tmp
if isinstance(expr, Or):
return OR(*[to_NNF(x, composite_map) for x in Or.make_args(expr)])
if isinstance(expr, And):
return AND(*[to_NNF(x, composite_map) for x in And.make_args(expr)])
if isinstance(expr, Nand):
tmp = AND(*[to_NNF(x, composite_map) for x in expr.args])
return ~tmp
if isinstance(expr, Nor):
tmp = OR(*[to_NNF(x, composite_map) for x in expr.args])
return ~tmp
if isinstance(expr, Xor):
cnfs = []
for i in range(0, len(expr.args) + 1, 2):
for neg in combinations(expr.args, i):
clause = [~to_NNF(s, composite_map) if s in neg else to_NNF(s, composite_map)
for s in expr.args]
cnfs.append(OR(*clause))
return AND(*cnfs)
if isinstance(expr, Xnor):
cnfs = []
for i in range(0, len(expr.args) + 1, 2):
for neg in combinations(expr.args, i):
clause = [~to_NNF(s, composite_map) if s in neg else to_NNF(s, composite_map)
for s in expr.args]
cnfs.append(OR(*clause))
return ~AND(*cnfs)
if isinstance(expr, Implies):
L, R = to_NNF(expr.args[0], composite_map), to_NNF(expr.args[1], composite_map)
return OR(~L, R)
if isinstance(expr, Equivalent):
cnfs = []
for a, b in zip_longest(expr.args, expr.args[1:], fillvalue=expr.args[0]):
a = to_NNF(a, composite_map)
b = to_NNF(b, composite_map)
cnfs.append(OR(~a, b))
return AND(*cnfs)
if isinstance(expr, ITE):
L = to_NNF(expr.args[0], composite_map)
M = to_NNF(expr.args[1], composite_map)
R = to_NNF(expr.args[2], composite_map)
return AND(OR(~L, M), OR(L, R))
if isinstance(expr, AppliedPredicate):
pred, args = expr.function, expr.arguments
newpred = composite_map.get(pred, None)
if newpred is not None:
return to_NNF(newpred.rcall(*args), composite_map)
if isinstance(expr, Predicate):
newpred = composite_map.get(expr, None)
if newpred is not None:
return to_NNF(newpred, composite_map)
return Literal(expr)
def distribute_AND_over_OR(expr):
"""
Distributes AND over OR in the NNF expression.
Returns the result( Conjunctive Normal Form of expression)
as a CNF object.
"""
if not isinstance(expr, (AND, OR)):
tmp = set()
tmp.add(frozenset((expr,)))
return CNF(tmp)
if isinstance(expr, OR):
return CNF.all_or(*[distribute_AND_over_OR(arg)
for arg in expr._args])
if isinstance(expr, AND):
return CNF.all_and(*[distribute_AND_over_OR(arg)
for arg in expr._args])
class CNF:
"""
Class to represent CNF of a Boolean expression.
Consists of set of clauses, which themselves are stored as
frozenset of Literal objects.
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.cnf import CNF
>>> from sympy.abc import x
>>> cnf = CNF.from_prop(Q.real(x) & ~Q.zero(x))
>>> cnf.clauses
{frozenset({Literal(Q.zero(x), True)}),
frozenset({Literal(Q.negative(x), False),
Literal(Q.positive(x), False), Literal(Q.zero(x), False)})}
"""
def __init__(self, clauses=None):
if not clauses:
clauses = set()
self.clauses = clauses
def add(self, prop):
clauses = CNF.to_CNF(prop).clauses
self.add_clauses(clauses)
def __str__(self):
s = ' & '.join(
['(' + ' | '.join([str(lit) for lit in clause]) +')'
for clause in self.clauses]
)
return s
def extend(self, props):
for p in props:
self.add(p)
return self
def copy(self):
return CNF(set(self.clauses))
def add_clauses(self, clauses):
self.clauses |= clauses
@classmethod
def from_prop(cls, prop):
res = cls()
res.add(prop)
return res
def __iand__(self, other):
self.add_clauses(other.clauses)
return self
def all_predicates(self):
predicates = set()
for c in self.clauses:
predicates |= {arg.lit for arg in c}
return predicates
def _or(self, cnf):
clauses = set()
for a, b in product(self.clauses, cnf.clauses):
tmp = set(a)
for t in b:
tmp.add(t)
clauses.add(frozenset(tmp))
return CNF(clauses)
def _and(self, cnf):
clauses = self.clauses.union(cnf.clauses)
return CNF(clauses)
def _not(self):
clss = list(self.clauses)
ll = set()
for x in clss[-1]:
ll.add(frozenset((~x,)))
ll = CNF(ll)
for rest in clss[:-1]:
p = set()
for x in rest:
p.add(frozenset((~x,)))
ll = ll._or(CNF(p))
return ll
def rcall(self, expr):
clause_list = list()
for clause in self.clauses:
lits = [arg.rcall(expr) for arg in clause]
clause_list.append(OR(*lits))
expr = AND(*clause_list)
return distribute_AND_over_OR(expr)
@classmethod
def all_or(cls, *cnfs):
b = cnfs[0].copy()
for rest in cnfs[1:]:
b = b._or(rest)
return b
@classmethod
def all_and(cls, *cnfs):
b = cnfs[0].copy()
for rest in cnfs[1:]:
b = b._and(rest)
return b
@classmethod
def to_CNF(cls, expr):
from sympy.assumptions.facts import get_composite_predicates
expr = to_NNF(expr, get_composite_predicates())
expr = distribute_AND_over_OR(expr)
return expr
@classmethod
def CNF_to_cnf(cls, cnf):
"""
Converts CNF object to SymPy's boolean expression
retaining the form of expression.
"""
def remove_literal(arg):
return Not(arg.lit) if arg.is_Not else arg.lit
return And(*(Or(*(remove_literal(arg) for arg in clause)) for clause in cnf.clauses))
class EncodedCNF:
"""
Class for encoding the CNF expression.
"""
def __init__(self, data=None, encoding=None):
if not data and not encoding:
data = list()
encoding = dict()
self.data = data
self.encoding = encoding
self._symbols = list(encoding.keys())
def from_cnf(self, cnf):
self._symbols = list(cnf.all_predicates())
n = len(self._symbols)
self.encoding = dict(list(zip(self._symbols, list(range(1, n + 1)))))
self.data = [self.encode(clause) for clause in cnf.clauses]
@property
def symbols(self):
return self._symbols
@property
def variables(self):
return range(1, len(self._symbols) + 1)
def copy(self):
new_data = [set(clause) for clause in self.data]
return EncodedCNF(new_data, dict(self.encoding))
def add_prop(self, prop):
cnf = CNF.from_prop(prop)
self.add_from_cnf(cnf)
def add_from_cnf(self, cnf):
clauses = [self.encode(clause) for clause in cnf.clauses]
self.data += clauses
def encode_arg(self, arg):
literal = arg.lit
value = self.encoding.get(literal, None)
if value is None:
n = len(self._symbols)
self._symbols.append(literal)
value = self.encoding[literal] = n + 1
if arg.is_Not:
return -value
else:
return value
def encode(self, clause):
return {self.encode_arg(arg) if not arg.lit == S.false else 0 for arg in clause}
|
f57acd2763aea0efaff855f86714e4395587de6ae94d889817c98d4c52a4f229 | from collections import defaultdict
from sympy.assumptions.ask import Q
from sympy.core import (Add, Mul, Pow, Number, NumberSymbol, Symbol)
from sympy.core.numbers import ImaginaryUnit
from sympy.functions.elementary.complexes import Abs
from sympy.logic.boolalg import (Equivalent, And, Or, Implies)
from sympy.matrices.expressions import MatMul
# APIs here may be subject to change
### Helper functions ###
def allargs(symbol, fact, expr):
"""
Apply all arguments of the expression to the fact structure.
Parameters
==========
symbol : Symbol
A placeholder symbol.
fact : Boolean
Resulting ``Boolean`` expression.
expr : Expr
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.sathandlers import allargs
>>> from sympy.abc import x, y
>>> allargs(x, Q.negative(x) | Q.positive(x), x*y)
(Q.negative(x) | Q.positive(x)) & (Q.negative(y) | Q.positive(y))
"""
return And(*[fact.subs(symbol, arg) for arg in expr.args])
def anyarg(symbol, fact, expr):
"""
Apply any argument of the expression to the fact structure.
Parameters
==========
symbol : Symbol
A placeholder symbol.
fact : Boolean
Resulting ``Boolean`` expression.
expr : Expr
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.sathandlers import anyarg
>>> from sympy.abc import x, y
>>> anyarg(x, Q.negative(x) & Q.positive(x), x*y)
(Q.negative(x) & Q.positive(x)) | (Q.negative(y) & Q.positive(y))
"""
return Or(*[fact.subs(symbol, arg) for arg in expr.args])
def exactlyonearg(symbol, fact, expr):
"""
Apply exactly one argument of the expression to the fact structure.
Parameters
==========
symbol : Symbol
A placeholder symbol.
fact : Boolean
Resulting ``Boolean`` expression.
expr : Expr
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.sathandlers import exactlyonearg
>>> from sympy.abc import x, y
>>> exactlyonearg(x, Q.positive(x), x*y)
(Q.positive(x) & ~Q.positive(y)) | (Q.positive(y) & ~Q.positive(x))
"""
pred_args = [fact.subs(symbol, arg) for arg in expr.args]
res = Or(*[And(pred_args[i], *[~lit for lit in pred_args[:i] +
pred_args[i+1:]]) for i in range(len(pred_args))])
return res
### Fact registry ###
class ClassFactRegistry:
"""
Register handlers against classes.
Explanation
===========
``register`` method registers the handler function for a class. Here,
handler function should return a single fact. ``multiregister`` method
registers the handler function for multiple classes. Here, handler function
should return a container of multiple facts.
``registry(expr)`` returns a set of facts for *expr*.
Examples
========
Here, we register the facts for ``Abs``.
>>> from sympy import Abs, Equivalent, Q
>>> from sympy.assumptions.sathandlers import ClassFactRegistry
>>> reg = ClassFactRegistry()
>>> @reg.register(Abs)
... def f1(expr):
... return Q.nonnegative(expr)
>>> @reg.register(Abs)
... def f2(expr):
... arg = expr.args[0]
... return Equivalent(~Q.zero(arg), ~Q.zero(expr))
Calling the registry with expression returns the defined facts for the
expression.
>>> from sympy.abc import x
>>> reg(Abs(x))
{Q.nonnegative(Abs(x)), Equivalent(~Q.zero(x), ~Q.zero(Abs(x)))}
Multiple facts can be registered at once by ``multiregister`` method.
>>> reg2 = ClassFactRegistry()
>>> @reg2.multiregister(Abs)
... def _(expr):
... arg = expr.args[0]
... return [Q.even(arg) >> Q.even(expr), Q.odd(arg) >> Q.odd(expr)]
>>> reg2(Abs(x))
{Implies(Q.even(x), Q.even(Abs(x))), Implies(Q.odd(x), Q.odd(Abs(x)))}
"""
def __init__(self):
self.singlefacts = defaultdict(frozenset)
self.multifacts = defaultdict(frozenset)
def register(self, cls):
def _(func):
self.singlefacts[cls] |= {func}
return func
return _
def multiregister(self, *classes):
def _(func):
for cls in classes:
self.multifacts[cls] |= {func}
return func
return _
def __getitem__(self, key):
ret1 = self.singlefacts[key]
for k in self.singlefacts:
if issubclass(key, k):
ret1 |= self.singlefacts[k]
ret2 = self.multifacts[key]
for k in self.multifacts:
if issubclass(key, k):
ret2 |= self.multifacts[k]
return ret1, ret2
def __call__(self, expr):
ret = set()
handlers1, handlers2 = self[expr.func]
for h in handlers1:
ret.add(h(expr))
for h in handlers2:
ret.update(h(expr))
return ret
class_fact_registry = ClassFactRegistry()
### Class fact registration ###
x = Symbol('x')
## Abs ##
@class_fact_registry.multiregister(Abs) # type: ignore
def _(expr):
arg = expr.args[0]
return [Q.nonnegative(expr),
Equivalent(~Q.zero(arg), ~Q.zero(expr)),
Q.even(arg) >> Q.even(expr),
Q.odd(arg) >> Q.odd(expr),
Q.integer(arg) >> Q.integer(expr),
]
### Add ##
@class_fact_registry.multiregister(Add) # type: ignore
def _(expr):
return [allargs(x, Q.positive(x), expr) >> Q.positive(expr),
allargs(x, Q.negative(x), expr) >> Q.negative(expr),
allargs(x, Q.real(x), expr) >> Q.real(expr),
allargs(x, Q.rational(x), expr) >> Q.rational(expr),
allargs(x, Q.integer(x), expr) >> Q.integer(expr),
exactlyonearg(x, ~Q.integer(x), expr) >> ~Q.integer(expr),
]
@class_fact_registry.register(Add) # type: ignore
def _(expr):
allargs_real = allargs(x, Q.real(x), expr)
onearg_irrational = exactlyonearg(x, Q.irrational(x), expr)
return Implies(allargs_real, Implies(onearg_irrational, Q.irrational(expr)))
### Mul ###
@class_fact_registry.multiregister(Mul) # type: ignore
def _(expr):
return [Equivalent(Q.zero(expr), anyarg(x, Q.zero(x), expr)),
allargs(x, Q.positive(x), expr) >> Q.positive(expr),
allargs(x, Q.real(x), expr) >> Q.real(expr),
allargs(x, Q.rational(x), expr) >> Q.rational(expr),
allargs(x, Q.integer(x), expr) >> Q.integer(expr),
exactlyonearg(x, ~Q.rational(x), expr) >> ~Q.integer(expr),
allargs(x, Q.commutative(x), expr) >> Q.commutative(expr),
]
@class_fact_registry.register(Mul) # type: ignore
def _(expr):
# Implicitly assumes Mul has more than one arg
# Would be allargs(x, Q.prime(x) | Q.composite(x)) except 1 is composite
# More advanced prime assumptions will require inequalities, as 1 provides
# a corner case.
allargs_prime = allargs(x, Q.prime(x), expr)
return Implies(allargs_prime, ~Q.prime(expr))
@class_fact_registry.register(Mul) # type: ignore
def _(expr):
# General Case: Odd number of imaginary args implies mul is imaginary(To be implemented)
allargs_imag_or_real = allargs(x, Q.imaginary(x) | Q.real(x), expr)
onearg_imaginary = exactlyonearg(x, Q.imaginary(x), expr)
return Implies(allargs_imag_or_real, Implies(onearg_imaginary, Q.imaginary(expr)))
@class_fact_registry.register(Mul) # type: ignore
def _(expr):
allargs_real = allargs(x, Q.real(x), expr)
onearg_irrational = exactlyonearg(x, Q.irrational(x), expr)
return Implies(allargs_real, Implies(onearg_irrational, Q.irrational(expr)))
@class_fact_registry.register(Mul) # type: ignore
def _(expr):
# Including the integer qualification means we don't need to add any facts
# for odd, since the assumptions already know that every integer is
# exactly one of even or odd.
allargs_integer = allargs(x, Q.integer(x), expr)
anyarg_even = anyarg(x, Q.even(x), expr)
return Implies(allargs_integer, Equivalent(anyarg_even, Q.even(expr)))
### MatMul ###
@class_fact_registry.register(MatMul) # type: ignore
def _(expr):
allargs_square = allargs(x, Q.square(x), expr)
allargs_invertible = allargs(x, Q.invertible(x), expr)
return Implies(allargs_square, Equivalent(Q.invertible(expr), allargs_invertible))
### Pow ###
@class_fact_registry.multiregister(Pow) # type: ignore
def _(expr):
base, exp = expr.base, expr.exp
return [
(Q.real(base) & Q.even(exp) & Q.nonnegative(exp)) >> Q.nonnegative(expr),
(Q.nonnegative(base) & Q.odd(exp) & Q.nonnegative(exp)) >> Q.nonnegative(expr),
(Q.nonpositive(base) & Q.odd(exp) & Q.nonnegative(exp)) >> Q.nonpositive(expr),
Equivalent(Q.zero(expr), Q.zero(base) & Q.positive(exp))
]
### Numbers ###
_old_assump_getters = {
Q.positive: lambda o: o.is_positive,
Q.zero: lambda o: o.is_zero,
Q.negative: lambda o: o.is_negative,
Q.rational: lambda o: o.is_rational,
Q.irrational: lambda o: o.is_irrational,
Q.even: lambda o: o.is_even,
Q.odd: lambda o: o.is_odd,
Q.imaginary: lambda o: o.is_imaginary,
Q.prime: lambda o: o.is_prime,
Q.composite: lambda o: o.is_composite,
}
@class_fact_registry.multiregister(Number, NumberSymbol, ImaginaryUnit) # type: ignore
def _(expr):
ret = []
for p, getter in _old_assump_getters.items():
pred = p(expr)
prop = getter(expr)
if prop is not None:
ret.append(Equivalent(pred, prop))
return ret
|
234854f46260a8df2e287fbc645b220404a1a4c0adf14b2706ee43d7b0c9ed50 | """A functions module, includes all the standard functions.
Combinatorial - factorial, fibonacci, harmonic, bernoulli...
Elementary - hyperbolic, trigonometric, exponential, floor and ceiling, sqrt...
Special - gamma, zeta,spherical harmonics...
"""
from sympy.functions.combinatorial.factorials import (factorial, factorial2,
rf, ff, binomial, RisingFactorial, FallingFactorial, subfactorial)
from sympy.functions.combinatorial.numbers import (carmichael, fibonacci, lucas, tribonacci,
harmonic, bernoulli, bell, euler, catalan, genocchi, partition, motzkin)
from sympy.functions.elementary.miscellaneous import (sqrt, root, Min, Max,
Id, real_root, cbrt, Rem)
from sympy.functions.elementary.complexes import (re, im, sign, Abs,
conjugate, arg, polar_lift, periodic_argument, unbranched_argument,
principal_branch, transpose, adjoint, polarify, unpolarify)
from sympy.functions.elementary.trigonometric import (sin, cos, tan,
sec, csc, cot, sinc, asin, acos, atan, asec, acsc, acot, atan2)
from sympy.functions.elementary.exponential import (exp_polar, exp, log,
LambertW)
from sympy.functions.elementary.hyperbolic import (sinh, cosh, tanh, coth,
sech, csch, asinh, acosh, atanh, acoth, asech, acsch)
from sympy.functions.elementary.integers import floor, ceiling, frac
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.special.error_functions import (erf, erfc, erfi, erf2,
erfinv, erfcinv, erf2inv, Ei, expint, E1, li, Li, Si, Ci, Shi, Chi,
fresnels, fresnelc)
from sympy.functions.special.gamma_functions import (gamma, lowergamma,
uppergamma, polygamma, loggamma, digamma, trigamma, multigamma)
from sympy.functions.special.zeta_functions import (dirichlet_eta, zeta,
lerchphi, polylog, stieltjes, riemann_xi)
from sympy.functions.special.tensor_functions import (Eijk, LeviCivita,
KroneckerDelta)
from sympy.functions.special.singularity_functions import SingularityFunction
from sympy.functions.special.delta_functions import DiracDelta, Heaviside
from sympy.functions.special.bsplines import bspline_basis, bspline_basis_set, interpolating_spline
from sympy.functions.special.bessel import (besselj, bessely, besseli, besselk,
hankel1, hankel2, jn, yn, jn_zeros, hn1, hn2, airyai, airybi, airyaiprime, airybiprime, marcumq)
from sympy.functions.special.hyper import hyper, meijerg, appellf1
from sympy.functions.special.polynomials import (legendre, assoc_legendre,
hermite, chebyshevt, chebyshevu, chebyshevu_root, chebyshevt_root,
laguerre, assoc_laguerre, gegenbauer, jacobi, jacobi_normalized)
from sympy.functions.special.spherical_harmonics import Ynm, Ynm_c, Znm
from sympy.functions.special.elliptic_integrals import (elliptic_k,
elliptic_f, elliptic_e, elliptic_pi)
from sympy.functions.special.beta_functions import beta, betainc, betainc_regularized
from sympy.functions.special.mathieu_functions import (mathieus, mathieuc,
mathieusprime, mathieucprime)
ln = log
__all__ = [
'factorial', 'factorial2', 'rf', 'ff', 'binomial', 'RisingFactorial',
'FallingFactorial', 'subfactorial',
'carmichael', 'fibonacci', 'lucas', 'motzkin', 'tribonacci', 'harmonic',
'bernoulli', 'bell', 'euler', 'catalan', 'genocchi', 'partition',
'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root', 'cbrt', 'Rem',
're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift',
'periodic_argument', 'unbranched_argument', 'principal_branch',
'transpose', 'adjoint', 'polarify', 'unpolarify',
'sin', 'cos', 'tan', 'sec', 'csc', 'cot', 'sinc', 'asin', 'acos', 'atan',
'asec', 'acsc', 'acot', 'atan2',
'exp_polar', 'exp', 'ln', 'log', 'LambertW',
'sinh', 'cosh', 'tanh', 'coth', 'sech', 'csch', 'asinh', 'acosh', 'atanh',
'acoth', 'asech', 'acsch',
'floor', 'ceiling', 'frac',
'Piecewise', 'piecewise_fold',
'erf', 'erfc', 'erfi', 'erf2', 'erfinv', 'erfcinv', 'erf2inv', 'Ei',
'expint', 'E1', 'li', 'Li', 'Si', 'Ci', 'Shi', 'Chi', 'fresnels',
'fresnelc',
'gamma', 'lowergamma', 'uppergamma', 'polygamma', 'loggamma', 'digamma',
'trigamma', 'multigamma',
'dirichlet_eta', 'zeta', 'lerchphi', 'polylog', 'stieltjes', 'riemann_xi',
'Eijk', 'LeviCivita', 'KroneckerDelta',
'SingularityFunction',
'DiracDelta', 'Heaviside',
'bspline_basis', 'bspline_basis_set', 'interpolating_spline',
'besselj', 'bessely', 'besseli', 'besselk', 'hankel1', 'hankel2', 'jn',
'yn', 'jn_zeros', 'hn1', 'hn2', 'airyai', 'airybi', 'airyaiprime',
'airybiprime', 'marcumq',
'hyper', 'meijerg', 'appellf1',
'legendre', 'assoc_legendre', 'hermite', 'chebyshevt', 'chebyshevu',
'chebyshevu_root', 'chebyshevt_root', 'laguerre', 'assoc_laguerre',
'gegenbauer', 'jacobi', 'jacobi_normalized',
'Ynm', 'Ynm_c', 'Znm',
'elliptic_k', 'elliptic_f', 'elliptic_e', 'elliptic_pi',
'beta', 'betainc', 'betainc_regularized',
'mathieus', 'mathieuc', 'mathieusprime', 'mathieucprime',
]
|
4ceb2e900c9bce4ffa659f89ded25dbe8751b7bc349453ccb60572fb78fd30da | from sympy.core.add import Add
from sympy.core.exprtools import factor_terms
from sympy.core.function import expand_log, _mexpand
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy
from sympy.functions.elementary.exponential import (LambertW, exp, log)
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polyroots import roots
from sympy.polys.polytools import Poly, factor
from sympy.simplify.simplify import separatevars
from sympy.simplify.radsimp import collect
from sympy.simplify.simplify import powsimp
from sympy.solvers.solvers import solve, _invert
from sympy.utilities.iterables import uniq
def _filtered_gens(poly, symbol):
"""process the generators of ``poly``, returning the set of generators that
have ``symbol``. If there are two generators that are inverses of each other,
prefer the one that has no denominator.
Examples
========
>>> from sympy.solvers.bivariate import _filtered_gens
>>> from sympy import Poly, exp
>>> from sympy.abc import x
>>> _filtered_gens(Poly(x + 1/x + exp(x)), x)
{x, exp(x)}
"""
gens = {g for g in poly.gens if symbol in g.free_symbols}
for g in list(gens):
ag = 1/g
if g in gens and ag in gens:
if ag.as_numer_denom()[1] is not S.One:
g = ag
gens.remove(g)
return gens
def _mostfunc(lhs, func, X=None):
"""Returns the term in lhs which contains the most of the
func-type things e.g. log(log(x)) wins over log(x) if both terms appear.
``func`` can be a function (exp, log, etc...) or any other SymPy object,
like Pow.
If ``X`` is not ``None``, then the function returns the term composed with the
most ``func`` having the specified variable.
Examples
========
>>> from sympy.solvers.bivariate import _mostfunc
>>> from sympy.functions.elementary.exponential import exp
>>> from sympy.abc import x, y
>>> _mostfunc(exp(x) + exp(exp(x) + 2), exp)
exp(exp(x) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp)
exp(exp(y) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp, x)
exp(x)
>>> _mostfunc(x, exp, x) is None
True
>>> _mostfunc(exp(x) + exp(x*y), exp, x)
exp(x)
"""
fterms = [tmp for tmp in lhs.atoms(func) if (not X or
X.is_Symbol and X in tmp.free_symbols or
not X.is_Symbol and tmp.has(X))]
if len(fterms) == 1:
return fterms[0]
elif fterms:
return max(list(ordered(fterms)), key=lambda x: x.count(func))
return None
def _linab(arg, symbol):
"""Return ``a, b, X`` assuming ``arg`` can be written as ``a*X + b``
where ``X`` is a symbol-dependent factor and ``a`` and ``b`` are
independent of ``symbol``.
Examples
========
>>> from sympy.functions.elementary.exponential import exp
>>> from sympy.solvers.bivariate import _linab
>>> from sympy.abc import x, y
>>> from sympy import S
>>> _linab(S(2), x)
(2, 0, 1)
>>> _linab(2*x, x)
(2, 0, x)
>>> _linab(y + y*x + 2*x, x)
(y + 2, y, x)
>>> _linab(3 + 2*exp(x), x)
(2, 3, exp(x))
"""
arg = factor_terms(arg.expand())
ind, dep = arg.as_independent(symbol)
if arg.is_Mul and dep.is_Add:
a, b, x = _linab(dep, symbol)
return ind*a, ind*b, x
if not arg.is_Add:
b = 0
a, x = ind, dep
else:
b = ind
a, x = separatevars(dep).as_independent(symbol, as_Add=False)
if x.could_extract_minus_sign():
a = -a
x = -x
return a, b, x
def _lambert(eq, x):
"""
Given an expression assumed to be in the form
``F(X, a..f) = a*log(b*X + c) + d*X + f = 0``
where X = g(x) and x = g^-1(X), return the Lambert solution,
``x = g^-1(-c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(-f/a)))``.
"""
eq = _mexpand(expand_log(eq))
mainlog = _mostfunc(eq, log, x)
if not mainlog:
return [] # violated assumptions
other = eq.subs(mainlog, 0)
if isinstance(-other, log):
eq = (eq - other).subs(mainlog, mainlog.args[0])
mainlog = mainlog.args[0]
if not isinstance(mainlog, log):
return [] # violated assumptions
other = -(-other).args[0]
eq += other
if not x in other.free_symbols:
return [] # violated assumptions
d, f, X2 = _linab(other, x)
logterm = collect(eq - other, mainlog)
a = logterm.as_coefficient(mainlog)
if a is None or x in a.free_symbols:
return [] # violated assumptions
logarg = mainlog.args[0]
b, c, X1 = _linab(logarg, x)
if X1 != X2:
return [] # violated assumptions
# invert the generator X1 so we have x(u)
u = Dummy('rhs')
xusolns = solve(X1 - u, x)
# There are infinitely many branches for LambertW
# but only branches for k = -1 and 0 might be real. The k = 0
# branch is real and the k = -1 branch is real if the LambertW argumen
# in in range [-1/e, 0]. Since `solve` does not return infinite
# solutions we will only include the -1 branch if it tests as real.
# Otherwise, inclusion of any LambertW in the solution indicates to
# the user that there are imaginary solutions corresponding to
# different k values.
lambert_real_branches = [-1, 0]
sol = []
# solution of the given Lambert equation is like
# sol = -c/b + (a/d)*LambertW(arg, k),
# where arg = d/(a*b)*exp((c*d-b*f)/a/b) and k in lambert_real_branches.
# Instead of considering the single arg, `d/(a*b)*exp((c*d-b*f)/a/b)`,
# the individual `p` roots obtained when writing `exp((c*d-b*f)/a/b)`
# as `exp(A/p) = exp(A)**(1/p)`, where `p` is an Integer, are used.
# calculating args for LambertW
num, den = ((c*d-b*f)/a/b).as_numer_denom()
p, den = den.as_coeff_Mul()
e = exp(num/den)
t = Dummy('t')
args = [d/(a*b)*t for t in roots(t**p - e, t).keys()]
# calculating solutions from args
for arg in args:
for k in lambert_real_branches:
w = LambertW(arg, k)
if k and not w.is_real:
continue
rhs = -c/b + (a/d)*w
for xu in xusolns:
sol.append(xu.subs(u, rhs))
return sol
def _solve_lambert(f, symbol, gens):
"""Return solution to ``f`` if it is a Lambert-type expression
else raise NotImplementedError.
For ``f(X, a..f) = a*log(b*X + c) + d*X - f = 0`` the solution
for ``X`` is ``X = -c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(f/a))``.
There are a variety of forms for `f(X, a..f)` as enumerated below:
1a1)
if B**B = R for R not in [0, 1] (since those cases would already
be solved before getting here) then log of both sides gives
log(B) + log(log(B)) = log(log(R)) and
X = log(B), a = 1, b = 1, c = 0, d = 1, f = log(log(R))
1a2)
if B*(b*log(B) + c)**a = R then log of both sides gives
log(B) + a*log(b*log(B) + c) = log(R) and
X = log(B), d=1, f=log(R)
1b)
if a*log(b*B + c) + d*B = R and
X = B, f = R
2a)
if (b*B + c)*exp(d*B + g) = R then log of both sides gives
log(b*B + c) + d*B + g = log(R) and
X = B, a = 1, f = log(R) - g
2b)
if g*exp(d*B + h) - b*B = c then the log form is
log(g) + d*B + h - log(b*B + c) = 0 and
X = B, a = -1, f = -h - log(g)
3)
if d*p**(a*B + g) - b*B = c then the log form is
log(d) + (a*B + g)*log(p) - log(b*B + c) = 0 and
X = B, a = -1, d = a*log(p), f = -log(d) - g*log(p)
"""
def _solve_even_degree_expr(expr, t, symbol):
"""Return the unique solutions of equations derived from
``expr`` by replacing ``t`` with ``+/- symbol``.
Parameters
==========
expr : Expr
The expression which includes a dummy variable t to be
replaced with +symbol and -symbol.
symbol : Symbol
The symbol for which a solution is being sought.
Returns
=======
List of unique solution of the two equations generated by
replacing ``t`` with positive and negative ``symbol``.
Notes
=====
If ``expr = 2*log(t) + x/2` then solutions for
``2*log(x) + x/2 = 0`` and ``2*log(-x) + x/2 = 0`` are
returned by this function. Though this may seem
counter-intuitive, one must note that the ``expr`` being
solved here has been derived from a different expression. For
an expression like ``eq = x**2*g(x) = 1``, if we take the
log of both sides we obtain ``log(x**2) + log(g(x)) = 0``. If
x is positive then this simplifies to
``2*log(x) + log(g(x)) = 0``; the Lambert-solving routines will
return solutions for this, but we must also consider the
solutions for ``2*log(-x) + log(g(x))`` since those must also
be a solution of ``eq`` which has the same value when the ``x``
in ``x**2`` is negated. If `g(x)` does not have even powers of
symbol then we do not want to replace the ``x`` there with
``-x``. So the role of the ``t`` in the expression received by
this function is to mark where ``+/-x`` should be inserted
before obtaining the Lambert solutions.
"""
nlhs, plhs = [
expr.xreplace({t: sgn*symbol}) for sgn in (-1, 1)]
sols = _solve_lambert(nlhs, symbol, gens)
if plhs != nlhs:
sols.extend(_solve_lambert(plhs, symbol, gens))
# uniq is needed for a case like
# 2*log(t) - log(-z**2) + log(z + log(x) + log(z))
# where subtituting t with +/-x gives all the same solution;
# uniq, rather than list(set()), is used to maintain canonical
# order
return list(uniq(sols))
nrhs, lhs = f.as_independent(symbol, as_Add=True)
rhs = -nrhs
lamcheck = [tmp for tmp in gens
if (tmp.func in [exp, log] or
(tmp.is_Pow and symbol in tmp.exp.free_symbols))]
if not lamcheck:
raise NotImplementedError()
if lhs.is_Add or lhs.is_Mul:
# replacing all even_degrees of symbol with dummy variable t
# since these will need special handling; non-Add/Mul do not
# need this handling
t = Dummy('t', **symbol.assumptions0)
lhs = lhs.replace(
lambda i: # find symbol**even
i.is_Pow and i.base == symbol and i.exp.is_even,
lambda i: # replace t**even
t**i.exp)
if lhs.is_Add and lhs.has(t):
t_indep = lhs.subs(t, 0)
t_term = lhs - t_indep
_rhs = rhs - t_indep
if not t_term.is_Add and _rhs and not (
t_term.has(S.ComplexInfinity, S.NaN)):
eq = expand_log(log(t_term) - log(_rhs))
return _solve_even_degree_expr(eq, t, symbol)
elif lhs.is_Mul and rhs:
# this needs to happen whether t is present or not
lhs = expand_log(log(lhs), force=True)
rhs = log(rhs)
if lhs.has(t) and lhs.is_Add:
# it expanded from Mul to Add
eq = lhs - rhs
return _solve_even_degree_expr(eq, t, symbol)
# restore symbol in lhs
lhs = lhs.xreplace({t: symbol})
lhs = powsimp(factor(lhs, deep=True))
# make sure we have inverted as completely as possible
r = Dummy()
i, lhs = _invert(lhs - r, symbol)
rhs = i.xreplace({r: rhs})
# For the first forms:
#
# 1a1) B**B = R will arrive here as B*log(B) = log(R)
# lhs is Mul so take log of both sides:
# log(B) + log(log(B)) = log(log(R))
# 1a2) B*(b*log(B) + c)**a = R will arrive unchanged so
# lhs is Mul, so take log of both sides:
# log(B) + a*log(b*log(B) + c) = log(R)
# 1b) d*log(a*B + b) + c*B = R will arrive unchanged so
# lhs is Add, so isolate c*B and expand log of both sides:
# log(c) + log(B) = log(R - d*log(a*B + b))
soln = []
if not soln:
mainlog = _mostfunc(lhs, log, symbol)
if mainlog:
if lhs.is_Mul and rhs != 0:
soln = _lambert(log(lhs) - log(rhs), symbol)
elif lhs.is_Add:
other = lhs.subs(mainlog, 0)
if other and not other.is_Add and [
tmp for tmp in other.atoms(Pow)
if symbol in tmp.free_symbols]:
if not rhs:
diff = log(other) - log(other - lhs)
else:
diff = log(lhs - other) - log(rhs - other)
soln = _lambert(expand_log(diff), symbol)
else:
#it's ready to go
soln = _lambert(lhs - rhs, symbol)
# For the next forms,
#
# collect on main exp
# 2a) (b*B + c)*exp(d*B + g) = R
# lhs is mul, so take log of both sides:
# log(b*B + c) + d*B = log(R) - g
# 2b) g*exp(d*B + h) - b*B = R
# lhs is add, so add b*B to both sides,
# take the log of both sides and rearrange to give
# log(R + b*B) - d*B = log(g) + h
if not soln:
mainexp = _mostfunc(lhs, exp, symbol)
if mainexp:
lhs = collect(lhs, mainexp)
if lhs.is_Mul and rhs != 0:
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainexp-containing term to rhs
other = lhs.subs(mainexp, 0)
mainterm = lhs - other
rhs = rhs - other
if (mainterm.could_extract_minus_sign() and
rhs.could_extract_minus_sign()):
mainterm *= -1
rhs *= -1
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
# For the last form:
#
# 3) d*p**(a*B + g) - b*B = c
# collect on main pow, add b*B to both sides,
# take log of both sides and rearrange to give
# a*B*log(p) - log(b*B + c) = -log(d) - g*log(p)
if not soln:
mainpow = _mostfunc(lhs, Pow, symbol)
if mainpow and symbol in mainpow.exp.free_symbols:
lhs = collect(lhs, mainpow)
if lhs.is_Mul and rhs != 0:
# b*B = 0
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainpow-containing term to rhs
other = lhs.subs(mainpow, 0)
mainterm = lhs - other
rhs = rhs - other
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
if not soln:
raise NotImplementedError('%s does not appear to have a solution in '
'terms of LambertW' % f)
return list(ordered(soln))
def bivariate_type(f, x, y, *, first=True):
"""Given an expression, f, 3 tests will be done to see what type
of composite bivariate it might be, options for u(x, y) are::
x*y
x+y
x*y+x
x*y+y
If it matches one of these types, ``u(x, y)``, ``P(u)`` and dummy
variable ``u`` will be returned. Solving ``P(u)`` for ``u`` and
equating the solutions to ``u(x, y)`` and then solving for ``x`` or
``y`` is equivalent to solving the original expression for ``x`` or
``y``. If ``x`` and ``y`` represent two functions in the same
variable, e.g. ``x = g(t)`` and ``y = h(t)``, then if ``u(x, y) - p``
can be solved for ``t`` then these represent the solutions to
``P(u) = 0`` when ``p`` are the solutions of ``P(u) = 0``.
Only positive values of ``u`` are considered.
Examples
========
>>> from sympy.solvers.solvers import solve
>>> from sympy.solvers.bivariate import bivariate_type
>>> from sympy.abc import x, y
>>> eq = (x**2 - 3).subs(x, x + y)
>>> bivariate_type(eq, x, y)
(x + y, _u**2 - 3, _u)
>>> uxy, pu, u = _
>>> usol = solve(pu, u); usol
[sqrt(3)]
>>> [solve(uxy - s) for s in solve(pu, u)]
[[{x: -y + sqrt(3)}]]
>>> all(eq.subs(s).equals(0) for sol in _ for s in sol)
True
"""
u = Dummy('u', positive=True)
if first:
p = Poly(f, x, y)
f = p.as_expr()
_x = Dummy()
_y = Dummy()
rv = bivariate_type(Poly(f.subs({x: _x, y: _y}), _x, _y), _x, _y, first=False)
if rv:
reps = {_x: x, _y: y}
return rv[0].xreplace(reps), rv[1].xreplace(reps), rv[2]
return
p = f
f = p.as_expr()
# f(x*y)
args = Add.make_args(p.as_expr())
new = []
for a in args:
a = _mexpand(a.subs(x, u/y))
free = a.free_symbols
if x in free or y in free:
break
new.append(a)
else:
return x*y, Add(*new), u
def ok(f, v, c):
new = _mexpand(f.subs(v, c))
free = new.free_symbols
return None if (x in free or y in free) else new
# f(a*x + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
a = root(p.coeff_monomial(x**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a)
if new is not None:
return a*x + b*y, new, u
# f(a*x*y + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
for itry in range(2):
a = root(p.coeff_monomial(x**d*y**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a/y)
if new is not None:
return a*x*y + b*y, new, u
x, y = y, x
|
0f0903912cc12129487e8f1d591dcbb41bbb090771be370c53140bf1a846b9fd | r"""
This module is intended for solving recurrences or, in other words,
difference equations. Currently supported are linear, inhomogeneous
equations with polynomial or rational coefficients.
The solutions are obtained among polynomials, rational functions,
hypergeometric terms, or combinations of hypergeometric term which
are pairwise dissimilar.
``rsolve_X`` functions were meant as a low level interface
for ``rsolve`` which would use Mathematica's syntax.
Given a recurrence relation:
.. math:: a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) +
... + a_{0}(n) y(n) = f(n)
where `k > 0` and `a_{i}(n)` are polynomials in `n`. To use
``rsolve_X`` we need to put all coefficients in to a list ``L`` of
`k+1` elements the following way:
``L = [a_{0}(n), ..., a_{k-1}(n), a_{k}(n)]``
where ``L[i]``, for `i=0, \ldots, k`, maps to
`a_{i}(n) y(n+i)` (`y(n+i)` is implicit).
For example if we would like to compute `m`-th Bernoulli polynomial
up to a constant (example was taken from rsolve_poly docstring),
then we would use `b(n+1) - b(n) = m n^{m-1}` recurrence, which
has solution `b(n) = B_m + C`.
Then ``L = [-1, 1]`` and `f(n) = m n^(m-1)` and finally for `m=4`:
>>> from sympy import Symbol, bernoulli, rsolve_poly
>>> n = Symbol('n', integer=True)
>>> rsolve_poly([-1, 1], 4*n**3, n)
C0 + n**4 - 2*n**3 + n**2
>>> bernoulli(4, n)
n**4 - 2*n**3 + n**2 - 1/30
For the sake of completeness, `f(n)` can be:
[1] a polynomial -> rsolve_poly
[2] a rational function -> rsolve_ratio
[3] a hypergeometric function -> rsolve_hyper
"""
from collections import defaultdict
from sympy.core.singleton import S
from sympy.core.numbers import Rational, I
from sympy.core.symbol import Symbol, Wild, Dummy
from sympy.core.relational import Equality
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.sorting import default_sort_key
from sympy.core.sympify import sympify
from sympy.simplify import simplify, hypersimp, hypersimilar # type: ignore
from sympy.solvers import solve, solve_undetermined_coeffs
from sympy.polys import Poly, quo, gcd, lcm, roots, resultant
from sympy.functions import binomial, factorial, FallingFactorial, RisingFactorial
from sympy.matrices import Matrix, casoratian
from sympy.utilities.iterables import numbered_symbols
def rsolve_poly(coeffs, f, n, shift=0, **hints):
r"""
Given linear recurrence operator `\operatorname{L}` of order
`k` with polynomial coefficients and inhomogeneous equation
`\operatorname{L} y = f`, where `f` is a polynomial, we seek for
all polynomial solutions over field `K` of characteristic zero.
The algorithm performs two basic steps:
(1) Compute degree `N` of the general polynomial solution.
(2) Find all polynomials of degree `N` or less
of `\operatorname{L} y = f`.
There are two methods for computing the polynomial solutions.
If the degree bound is relatively small, i.e. it's smaller than
or equal to the order of the recurrence, then naive method of
undetermined coefficients is being used. This gives system
of algebraic equations with `N+1` unknowns.
In the other case, the algorithm performs transformation of the
initial equation to an equivalent one, for which the system of
algebraic equations has only `r` indeterminates. This method is
quite sophisticated (in comparison with the naive one) and was
invented together by Abramov, Bronstein and Petkovsek.
It is possible to generalize the algorithm implemented here to
the case of linear q-difference and differential equations.
Lets say that we would like to compute `m`-th Bernoulli polynomial
up to a constant. For this we can use `b(n+1) - b(n) = m n^{m-1}`
recurrence, which has solution `b(n) = B_m + C`. For example:
>>> from sympy import Symbol, rsolve_poly
>>> n = Symbol('n', integer=True)
>>> rsolve_poly([-1, 1], 4*n**3, n)
C0 + n**4 - 2*n**3 + n**2
References
==========
.. [1] S. A. Abramov, M. Bronstein and M. Petkovsek, On polynomial
solutions of linear operator equations, in: T. Levelt, ed.,
Proc. ISSAC '95, ACM Press, New York, 1995, 290-296.
.. [2] M. Petkovsek, Hypergeometric solutions of linear recurrences
with polynomial coefficients, J. Symbolic Computation,
14 (1992), 243-264.
.. [3] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.
"""
f = sympify(f)
if not f.is_polynomial(n):
return None
homogeneous = f.is_zero
r = len(coeffs) - 1
coeffs = [Poly(coeff, n) for coeff in coeffs]
polys = [Poly(0, n)]*(r + 1)
terms = [(S.Zero, S.NegativeInfinity)]*(r + 1)
for i in range(r + 1):
for j in range(i, r + 1):
polys[i] += coeffs[j]*(binomial(j, i).as_poly(n))
if not polys[i].is_zero:
(exp,), coeff = polys[i].LT()
terms[i] = (coeff, exp)
d = b = terms[0][1]
for i in range(1, r + 1):
if terms[i][1] > d:
d = terms[i][1]
if terms[i][1] - i > b:
b = terms[i][1] - i
d, b = int(d), int(b)
x = Dummy('x')
degree_poly = S.Zero
for i in range(r + 1):
if terms[i][1] - i == b:
degree_poly += terms[i][0]*FallingFactorial(x, i)
nni_roots = list(roots(degree_poly, x, filter='Z',
predicate=lambda r: r >= 0).keys())
if nni_roots:
N = [max(nni_roots)]
else:
N = []
if homogeneous:
N += [-b - 1]
else:
N += [f.as_poly(n).degree() - b, -b - 1]
N = int(max(N))
if N < 0:
if homogeneous:
if hints.get('symbols', False):
return (S.Zero, [])
else:
return S.Zero
else:
return None
if N <= r:
C = []
y = E = S.Zero
for i in range(N + 1):
C.append(Symbol('C' + str(i + shift)))
y += C[i] * n**i
for i in range(r + 1):
E += coeffs[i].as_expr()*y.subs(n, n + i)
solutions = solve_undetermined_coeffs(E - f, C, n)
if solutions is not None:
C = [c for c in C if (c not in solutions)]
result = y.subs(solutions)
else:
return None # TBD
else:
A = r
U = N + A + b + 1
nni_roots = list(roots(polys[r], filter='Z',
predicate=lambda r: r >= 0).keys())
if nni_roots != []:
a = max(nni_roots) + 1
else:
a = S.Zero
def _zero_vector(k):
return [S.Zero] * k
def _one_vector(k):
return [S.One] * k
def _delta(p, k):
B = S.One
D = p.subs(n, a + k)
for i in range(1, k + 1):
B *= Rational(i - k - 1, i)
D += B * p.subs(n, a + k - i)
return D
alpha = {}
for i in range(-A, d + 1):
I = _one_vector(d + 1)
for k in range(1, d + 1):
I[k] = I[k - 1] * (x + i - k + 1)/k
alpha[i] = S.Zero
for j in range(A + 1):
for k in range(d + 1):
B = binomial(k, i + j)
D = _delta(polys[j].as_expr(), k)
alpha[i] += I[k]*B*D
V = Matrix(U, A, lambda i, j: int(i == j))
if homogeneous:
for i in range(A, U):
v = _zero_vector(A)
for k in range(1, A + b + 1):
if i - k < 0:
break
B = alpha[k - A].subs(x, i - k)
for j in range(A):
v[j] += B * V[i - k, j]
denom = alpha[-A].subs(x, i)
for j in range(A):
V[i, j] = -v[j] / denom
else:
G = _zero_vector(U)
for i in range(A, U):
v = _zero_vector(A)
g = S.Zero
for k in range(1, A + b + 1):
if i - k < 0:
break
B = alpha[k - A].subs(x, i - k)
for j in range(A):
v[j] += B * V[i - k, j]
g += B * G[i - k]
denom = alpha[-A].subs(x, i)
for j in range(A):
V[i, j] = -v[j] / denom
G[i] = (_delta(f, i - A) - g) / denom
P, Q = _one_vector(U), _zero_vector(A)
for i in range(1, U):
P[i] = (P[i - 1] * (n - a - i + 1)/i).expand()
for i in range(A):
Q[i] = Add(*[(v*p).expand() for v, p in zip(V[:, i], P)])
if not homogeneous:
h = Add(*[(g*p).expand() for g, p in zip(G, P)])
C = [Symbol('C' + str(i + shift)) for i in range(A)]
g = lambda i: Add(*[c*_delta(q, i) for c, q in zip(C, Q)])
if homogeneous:
E = [g(i) for i in range(N + 1, U)]
else:
E = [g(i) + _delta(h, i) for i in range(N + 1, U)]
if E != []:
solutions = solve(E, *C)
if not solutions:
if homogeneous:
if hints.get('symbols', False):
return (S.Zero, [])
else:
return S.Zero
else:
return None
else:
solutions = {}
if homogeneous:
result = S.Zero
else:
result = h
for c, q in list(zip(C, Q)):
if c in solutions:
s = solutions[c]*q
C.remove(c)
else:
s = c*q
result += s.expand()
if hints.get('symbols', False):
return (result, C)
else:
return result
def rsolve_ratio(coeffs, f, n, **hints):
r"""
Given linear recurrence operator `\operatorname{L}` of order `k`
with polynomial coefficients and inhomogeneous equation
`\operatorname{L} y = f`, where `f` is a polynomial, we seek
for all rational solutions over field `K` of characteristic zero.
This procedure accepts only polynomials, however if you are
interested in solving recurrence with rational coefficients
then use ``rsolve`` which will pre-process the given equation
and run this procedure with polynomial arguments.
The algorithm performs two basic steps:
(1) Compute polynomial `v(n)` which can be used as universal
denominator of any rational solution of equation
`\operatorname{L} y = f`.
(2) Construct new linear difference equation by substitution
`y(n) = u(n)/v(n)` and solve it for `u(n)` finding all its
polynomial solutions. Return ``None`` if none were found.
Algorithm implemented here is a revised version of the original
Abramov's algorithm, developed in 1989. The new approach is much
simpler to implement and has better overall efficiency. This
method can be easily adapted to q-difference equations case.
Besides finding rational solutions alone, this functions is
an important part of Hyper algorithm were it is used to find
particular solution of inhomogeneous part of a recurrence.
Examples
========
>>> from sympy.abc import x
>>> from sympy.solvers.recurr import rsolve_ratio
>>> rsolve_ratio([-2*x**3 + x**2 + 2*x - 1, 2*x**3 + x**2 - 6*x,
... - 2*x**3 - 11*x**2 - 18*x - 9, 2*x**3 + 13*x**2 + 22*x + 8], 0, x)
C2*(2*x - 3)/(2*(x**2 - 1))
References
==========
.. [1] S. A. Abramov, Rational solutions of linear difference
and q-difference equations with polynomial coefficients,
in: T. Levelt, ed., Proc. ISSAC '95, ACM Press, New York,
1995, 285-289
See Also
========
rsolve_hyper
"""
f = sympify(f)
if not f.is_polynomial(n):
return None
coeffs = list(map(sympify, coeffs))
r = len(coeffs) - 1
A, B = coeffs[r], coeffs[0]
A = A.subs(n, n - r).expand()
h = Dummy('h')
res = resultant(A, B.subs(n, n + h), n)
if not res.is_polynomial(h):
p, q = res.as_numer_denom()
res = quo(p, q, h)
nni_roots = list(roots(res, h, filter='Z',
predicate=lambda r: r >= 0).keys())
if not nni_roots:
return rsolve_poly(coeffs, f, n, **hints)
else:
C, numers = S.One, [S.Zero]*(r + 1)
for i in range(int(max(nni_roots)), -1, -1):
d = gcd(A, B.subs(n, n + i), n)
A = quo(A, d, n)
B = quo(B, d.subs(n, n - i), n)
C *= Mul(*[d.subs(n, n - j) for j in range(i + 1)])
denoms = [C.subs(n, n + i) for i in range(r + 1)]
for i in range(r + 1):
g = gcd(coeffs[i], denoms[i], n)
numers[i] = quo(coeffs[i], g, n)
denoms[i] = quo(denoms[i], g, n)
for i in range(r + 1):
numers[i] *= Mul(*(denoms[:i] + denoms[i + 1:]))
result = rsolve_poly(numers, f * Mul(*denoms), n, **hints)
if result is not None:
if hints.get('symbols', False):
return (simplify(result[0] / C), result[1])
else:
return simplify(result / C)
else:
return None
def rsolve_hyper(coeffs, f, n, **hints):
r"""
Given linear recurrence operator `\operatorname{L}` of order `k`
with polynomial coefficients and inhomogeneous equation
`\operatorname{L} y = f` we seek for all hypergeometric solutions
over field `K` of characteristic zero.
The inhomogeneous part can be either hypergeometric or a sum
of a fixed number of pairwise dissimilar hypergeometric terms.
The algorithm performs three basic steps:
(1) Group together similar hypergeometric terms in the
inhomogeneous part of `\operatorname{L} y = f`, and find
particular solution using Abramov's algorithm.
(2) Compute generating set of `\operatorname{L}` and find basis
in it, so that all solutions are linearly independent.
(3) Form final solution with the number of arbitrary
constants equal to dimension of basis of `\operatorname{L}`.
Term `a(n)` is hypergeometric if it is annihilated by first order
linear difference equations with polynomial coefficients or, in
simpler words, if consecutive term ratio is a rational function.
The output of this procedure is a linear combination of fixed
number of hypergeometric terms. However the underlying method
can generate larger class of solutions - D'Alembertian terms.
Note also that this method not only computes the kernel of the
inhomogeneous equation, but also reduces in to a basis so that
solutions generated by this procedure are linearly independent
Examples
========
>>> from sympy.solvers import rsolve_hyper
>>> from sympy.abc import x
>>> rsolve_hyper([-1, -1, 1], 0, x)
C0*(1/2 - sqrt(5)/2)**x + C1*(1/2 + sqrt(5)/2)**x
>>> rsolve_hyper([-1, 1], 1 + x, x)
C0 + x*(x + 1)/2
References
==========
.. [1] M. Petkovsek, Hypergeometric solutions of linear recurrences
with polynomial coefficients, J. Symbolic Computation,
14 (1992), 243-264.
.. [2] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.
"""
from sympy.concrete import product
coeffs = list(map(sympify, coeffs))
f = sympify(f)
r, kernel, symbols = len(coeffs) - 1, [], set()
if not f.is_zero:
if f.is_Add:
similar = {}
for g in f.expand().args:
if not g.is_hypergeometric(n):
return None
for h in similar.keys():
if hypersimilar(g, h, n):
similar[h] += g
break
else:
similar[g] = S.Zero
inhomogeneous = []
for g, h in similar.items():
inhomogeneous.append(g + h)
elif f.is_hypergeometric(n):
inhomogeneous = [f]
else:
return None
for i, g in enumerate(inhomogeneous):
coeff, polys = S.One, coeffs[:]
denoms = [S.One]*(r + 1)
s = hypersimp(g, n)
for j in range(1, r + 1):
coeff *= s.subs(n, n + j - 1)
p, q = coeff.as_numer_denom()
polys[j] *= p
denoms[j] = q
for j in range(r + 1):
polys[j] *= Mul(*(denoms[:j] + denoms[j + 1:]))
R = rsolve_poly(polys, Mul(*denoms), n)
if not (R is None or R is S.Zero):
inhomogeneous[i] *= R
else:
return None
result = Add(*inhomogeneous)
else:
result = S.Zero
Z = Dummy('Z')
p, q = coeffs[0], coeffs[r].subs(n, n - r + 1)
p_factors = [z for z in roots(p, n).keys()]
q_factors = [z for z in roots(q, n).keys()]
factors = [(S.One, S.One)]
for p in p_factors:
for q in q_factors:
if p.is_integer and q.is_integer and p <= q:
continue
else:
factors += [(n - p, n - q)]
p = [(n - p, S.One) for p in p_factors]
q = [(S.One, n - q) for q in q_factors]
factors = p + factors + q
for A, B in factors:
polys, degrees = [], []
D = A*B.subs(n, n + r - 1)
for i in range(r + 1):
a = Mul(*[A.subs(n, n + j) for j in range(i)])
b = Mul(*[B.subs(n, n + j) for j in range(i, r)])
poly = quo(coeffs[i]*a*b, D, n)
polys.append(poly.as_poly(n))
if not poly.is_zero:
degrees.append(polys[i].degree())
if degrees:
d, poly = max(degrees), S.Zero
else:
return None
for i in range(r + 1):
coeff = polys[i].nth(d)
if coeff is not S.Zero:
poly += coeff * Z**i
for z in roots(poly, Z).keys():
if z.is_zero:
continue
recurr_coeffs = [polys[i].as_expr()*z**i for i in range(r + 1)]
if d == 0 and 0 != Add(*[recurr_coeffs[j]*j for j in range(1, r + 1)]):
# faster inline check (than calling rsolve_poly) for a
# constant solution to a constant coefficient recurrence.
C = Symbol("C" + str(len(symbols)))
s = [C]
else:
C, s = rsolve_poly(recurr_coeffs, 0, n, len(symbols), symbols=True)
if C is not None and C is not S.Zero:
symbols |= set(s)
ratio = z * A * C.subs(n, n + 1) / B / C
ratio = simplify(ratio)
# If there is a nonnegative root in the denominator of the ratio,
# this indicates that the term y(n_root) is zero, and one should
# start the product with the term y(n_root + 1).
n0 = 0
for n_root in roots(ratio.as_numer_denom()[1], n).keys():
if n_root.has(I):
return None
elif (n0 < (n_root + 1)) == True:
n0 = n_root + 1
K = product(ratio, (n, n0, n - 1))
if K.has(factorial, FallingFactorial, RisingFactorial):
K = simplify(K)
if casoratian(kernel + [K], n, zero=False) != 0:
kernel.append(K)
kernel.sort(key=default_sort_key)
sk = list(zip(numbered_symbols('C'), kernel))
if sk:
for C, ker in sk:
result += C * ker
else:
return None
if hints.get('symbols', False):
# XXX: This returns the symbols in a non-deterministic order
symbols |= {s for s, k in sk}
return (result, list(symbols))
else:
return result
def rsolve(f, y, init=None):
r"""
Solve univariate recurrence with rational coefficients.
Given `k`-th order linear recurrence `\operatorname{L} y = f`,
or equivalently:
.. math:: a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) +
\cdots + a_{0}(n) y(n) = f(n)
where `a_{i}(n)`, for `i=0, \ldots, k`, are polynomials or rational
functions in `n`, and `f` is a hypergeometric function or a sum
of a fixed number of pairwise dissimilar hypergeometric terms in
`n`, finds all solutions or returns ``None``, if none were found.
Initial conditions can be given as a dictionary in two forms:
(1) ``{ n_0 : v_0, n_1 : v_1, ..., n_m : v_m}``
(2) ``{y(n_0) : v_0, y(n_1) : v_1, ..., y(n_m) : v_m}``
or as a list ``L`` of values:
``L = [v_0, v_1, ..., v_m]``
where ``L[i] = v_i``, for `i=0, \ldots, m`, maps to `y(n_i)`.
Examples
========
Lets consider the following recurrence:
.. math:: (n - 1) y(n + 2) - (n^2 + 3 n - 2) y(n + 1) +
2 n (n + 1) y(n) = 0
>>> from sympy import Function, rsolve
>>> from sympy.abc import n
>>> y = Function('y')
>>> f = (n - 1)*y(n + 2) - (n**2 + 3*n - 2)*y(n + 1) + 2*n*(n + 1)*y(n)
>>> rsolve(f, y(n))
2**n*C0 + C1*factorial(n)
>>> rsolve(f, y(n), {y(0):0, y(1):3})
3*2**n - 3*factorial(n)
See Also
========
rsolve_poly, rsolve_ratio, rsolve_hyper
"""
if isinstance(f, Equality):
f = f.lhs - f.rhs
n = y.args[0]
k = Wild('k', exclude=(n,))
# Preprocess user input to allow things like
# y(n) + a*(y(n + 1) + y(n - 1))/2
f = f.expand().collect(y.func(Wild('m', integer=True)))
h_part = defaultdict(list)
i_part = []
for g in Add.make_args(f):
coeff, dep = g.as_coeff_mul(y.func)
if not dep:
i_part.append(coeff)
continue
for h in dep:
if h.is_Function and h.func == y.func:
result = h.args[0].match(n + k)
if result is not None:
h_part[int(result[k])].append(coeff)
continue
raise ValueError(
"'%s(%s + k)' expected, got '%s'" % (y.func, n, h))
for k in h_part:
h_part[k] = Add(*h_part[k])
h_part.default_factory = lambda: 0
i_part = Add(*i_part)
for k, coeff in h_part.items():
h_part[k] = simplify(coeff)
common = S.One
if not i_part.is_zero and not i_part.is_hypergeometric(n) and \
not (i_part.is_Add and all(map(lambda x: x.is_hypergeometric(n), i_part.expand().args))):
raise ValueError("The independent term should be a sum of hypergeometric functions, got '%s'" % i_part)
for coeff in h_part.values():
if coeff.is_rational_function(n):
if not coeff.is_polynomial(n):
common = lcm(common, coeff.as_numer_denom()[1], n)
else:
raise ValueError(
"Polynomial or rational function expected, got '%s'" % coeff)
i_numer, i_denom = i_part.as_numer_denom()
if i_denom.is_polynomial(n):
common = lcm(common, i_denom, n)
if common is not S.One:
for k, coeff in h_part.items():
numer, denom = coeff.as_numer_denom()
h_part[k] = numer*quo(common, denom, n)
i_part = i_numer*quo(common, i_denom, n)
K_min = min(h_part.keys())
if K_min < 0:
K = abs(K_min)
H_part = defaultdict(lambda: S.Zero)
i_part = i_part.subs(n, n + K).expand()
common = common.subs(n, n + K).expand()
for k, coeff in h_part.items():
H_part[k + K] = coeff.subs(n, n + K).expand()
else:
H_part = h_part
K_max = max(H_part.keys())
coeffs = [H_part[i] for i in range(K_max + 1)]
result = rsolve_hyper(coeffs, -i_part, n, symbols=True)
if result is None:
return None
solution, symbols = result
if init in ({}, []):
init = None
if symbols and init is not None:
if isinstance(init, list):
init = {i: init[i] for i in range(len(init))}
equations = []
for k, v in init.items():
try:
i = int(k)
except TypeError:
if k.is_Function and k.func == y.func:
i = int(k.args[0])
else:
raise ValueError("Integer or term expected, got '%s'" % k)
eq = solution.subs(n, i) - v
if eq.has(S.NaN):
eq = solution.limit(n, i) - v
equations.append(eq)
result = solve(equations, *symbols)
if not result:
return None
else:
solution = solution.subs(result)
return solution
|
1dc6ca68f4554ce51f0c12e732176dac992f87dd29a2810a82fef918894fa50c | from sympy.core import (Function, Pow, sympify, Expr)
from sympy.core.relational import Relational
from sympy.core.singleton import S
from sympy.polys import Poly, decompose
from sympy.utilities.misc import func_name
def decompogen(f, symbol):
"""
Computes General functional decomposition of ``f``.
Given an expression ``f``, returns a list ``[f_1, f_2, ..., f_n]``,
where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
Note: This is a General decomposition function. It also decomposes
Polynomials. For only Polynomial decomposition see ``decompose`` in polys.
Examples
========
>>> from sympy.solvers.decompogen import decompogen
>>> from sympy.abc import x
>>> from sympy import sqrt, sin, cos
>>> decompogen(sin(cos(x)), x)
[sin(x), cos(x)]
>>> decompogen(sin(x)**2 + sin(x) + 1, x)
[x**2 + x + 1, sin(x)]
>>> decompogen(sqrt(6*x**2 - 5), x)
[sqrt(x), 6*x**2 - 5]
>>> decompogen(sin(sqrt(cos(x**2 + 1))), x)
[sin(x), sqrt(x), cos(x), x**2 + 1]
>>> decompogen(x**4 + 2*x**3 - x - 1, x)
[x**2 - x - 1, x**2 + x]
"""
f = sympify(f)
if not isinstance(f, Expr) or isinstance(f, Relational):
raise TypeError('expecting Expr but got: `%s`' % func_name(f))
if symbol not in f.free_symbols:
return [f]
result = []
# ===== Simple Functions ===== #
if isinstance(f, (Function, Pow)):
if f.is_Pow and f.base == S.Exp1:
arg = f.exp
else:
arg = f.args[0]
if arg == symbol:
return [f]
result += [f.subs(arg, symbol)] + decompogen(arg, symbol)
return result
# ===== Convert to Polynomial ===== #
fp = Poly(f)
gens = list(filter(lambda x: symbol in x.free_symbols, fp.gens))
if len(gens) == 1 and gens[0] != symbol:
f1 = f.subs(gens[0], symbol)
f2 = gens[0]
result += [f1] + decompogen(f2, symbol)
return result
# ===== Polynomial decompose() ====== #
try:
result += decompose(f)
return result
except ValueError:
return [f]
def compogen(g_s, symbol):
"""
Returns the composition of functions.
Given a list of functions ``g_s``, returns their composition ``f``,
where:
f = g_1 o g_2 o .. o g_n
Note: This is a General composition function. It also composes Polynomials.
For only Polynomial composition see ``compose`` in polys.
Examples
========
>>> from sympy.solvers.decompogen import compogen
>>> from sympy.abc import x
>>> from sympy import sqrt, sin, cos
>>> compogen([sin(x), cos(x)], x)
sin(cos(x))
>>> compogen([x**2 + x + 1, sin(x)], x)
sin(x)**2 + sin(x) + 1
>>> compogen([sqrt(x), 6*x**2 - 5], x)
sqrt(6*x**2 - 5)
>>> compogen([sin(x), sqrt(x), cos(x), x**2 + 1], x)
sin(sqrt(cos(x**2 + 1)))
>>> compogen([x**2 - x - 1, x**2 + x], x)
-x**2 - x + (x**2 + x)**2 - 1
"""
if len(g_s) == 1:
return g_s[0]
foo = g_s[0].subs(symbol, g_s[1])
if len(g_s) == 2:
return foo
return compogen([foo] + g_s[2:], symbol)
|
2de3a2abec02e84a7c9c6497668cd96a903b0d5f0a8a0d7da24db2cf24f73282 | """
This module contains functions to:
- solve a single equation for a single variable, in any domain either real or complex.
- solve a single transcendental equation for a single variable in any domain either real or complex.
(currently supports solving in real domain only)
- solve a system of linear equations with N variables and M equations.
- solve a system of Non Linear Equations with N variables and M equations
"""
from sympy.core.sympify import sympify
from sympy.core import (S, Pow, Dummy, pi, Expr, Wild, Mul, Equality,
Add)
from sympy.core.containers import Tuple
from sympy.core.function import (Lambda, expand_complex, AppliedUndef,
expand_log, _mexpand, expand_trig)
from sympy.core.mod import Mod
from sympy.core.numbers import igcd, I, Number, Rational, oo, ilcm
from sympy.core.power import integer_log
from sympy.core.relational import Eq, Ne, Relational
from sympy.core.sorting import default_sort_key, ordered
from sympy.core.symbol import Symbol, _uniquely_named_symbol
from sympy.core.sympify import _sympify
from sympy.simplify.simplify import simplify, fraction, trigsimp
from sympy.simplify import powdenest, logcombine
from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp,
acos, asin, acsc, asec, arg,
piecewise_fold, Piecewise)
from sympy.functions.elementary.complexes import re, im
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.functions.elementary.miscellaneous import real_root
from sympy.logic.boolalg import And, BooleanTrue
from sympy.sets import (FiniteSet, imageset, Interval, Intersection,
Union, ConditionSet, ImageSet, Complement, Contains)
from sympy.sets.sets import Set, ProductSet
from sympy.matrices import Matrix, MatrixBase
from sympy.ntheory import totient
from sympy.ntheory.factor_ import divisors
from sympy.ntheory.residue_ntheory import discrete_log, nthroot_mod
from sympy.polys import (roots, Poly, degree, together, PolynomialError,
RootOf, factor, lcm, gcd)
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.polytools import invert
from sympy.polys.solvers import (sympy_eqs_to_ring, solve_lin_sys,
PolyNonlinearError)
from sympy.polys.matrices.linsolve import _linsolve
from sympy.solvers.solvers import (checksol, denoms, unrad,
_simple_dens, recast_to_symbols)
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.utilities import filldedent
from sympy.utilities.iterables import (numbered_symbols, has_dups,
is_sequence)
from sympy.calculus.util import periodicity, continuous_domain
from types import GeneratorType
from collections import defaultdict
class NonlinearError(ValueError):
"""Raised when unexpectedly encountering nonlinear equations"""
pass
_rc = Dummy("R", real=True), Dummy("C", complex=True)
def _masked(f, *atoms):
"""Return ``f``, with all objects given by ``atoms`` replaced with
Dummy symbols, ``d``, and the list of replacements, ``(d, e)``,
where ``e`` is an object of type given by ``atoms`` in which
any other instances of atoms have been recursively replaced with
Dummy symbols, too. The tuples are ordered so that if they are
applied in sequence, the origin ``f`` will be restored.
Examples
========
>>> from sympy import cos
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import _masked
>>> f = cos(cos(x) + 1)
>>> f, reps = _masked(cos(1 + cos(x)), cos)
>>> f
_a1
>>> reps
[(_a1, cos(_a0 + 1)), (_a0, cos(x))]
>>> for d, e in reps:
... f = f.xreplace({d: e})
>>> f
cos(cos(x) + 1)
"""
sym = numbered_symbols('a', cls=Dummy, real=True)
mask = []
for a in ordered(f.atoms(*atoms)):
for i in mask:
a = a.replace(*i)
mask.append((a, next(sym)))
for i, (o, n) in enumerate(mask):
f = f.replace(o, n)
mask[i] = (n, o)
mask = list(reversed(mask))
return f, mask
def _invert(f_x, y, x, domain=S.Complexes):
r"""
Reduce the complex valued equation $f(x) = y$ to a set of equations
$$\left\{g(x) = h_1(y),\ g(x) = h_2(y),\ \dots,\ g(x) = h_n(y) \right\}$$
where $g(x)$ is a simpler function than $f(x)$. The return value is a tuple
$(g(x), \mathrm{set}_h)$, where $g(x)$ is a function of $x$ and $\mathrm{set}_h$ is
the set of function $\left\{h_1(y), h_2(y), \dots, h_n(y)\right\}$.
Here, $y$ is not necessarily a symbol.
$\mathrm{set}_h$ contains the functions, along with the information
about the domain in which they are valid, through set
operations. For instance, if :math:`y = |x| - n` is inverted
in the real domain, then $\mathrm{set}_h$ is not simply
$\{-n, n\}$ as the nature of `n` is unknown; rather, it is:
$$ \left(\left[0, \infty\right) \cap \left\{n\right\}\right) \cup
\left(\left(-\infty, 0\right] \cap \left\{- n\right\}\right)$$
By default, the complex domain is used which means that inverting even
seemingly simple functions like $\exp(x)$ will give very different
results from those obtained in the real domain.
(In the case of $\exp(x)$, the inversion via $\log$ is multi-valued
in the complex domain, having infinitely many branches.)
If you are working with real values only (or you are not sure which
function to use) you should probably set the domain to
``S.Reals`` (or use ``invert_real`` which does that automatically).
Examples
========
>>> from sympy.solvers.solveset import invert_complex, invert_real
>>> from sympy.abc import x, y
>>> from sympy import exp
When does exp(x) == y?
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers))
>>> invert_real(exp(x), y, x)
(x, Intersection({log(y)}, Reals))
When does exp(x) == 1?
>>> invert_complex(exp(x), 1, x)
(x, ImageSet(Lambda(_n, 2*_n*I*pi), Integers))
>>> invert_real(exp(x), 1, x)
(x, {0})
See Also
========
invert_real, invert_complex
"""
x = sympify(x)
if not x.is_Symbol:
raise ValueError("x must be a symbol")
f_x = sympify(f_x)
if x not in f_x.free_symbols:
raise ValueError("Inverse of constant function doesn't exist")
y = sympify(y)
if x in y.free_symbols:
raise ValueError("y should be independent of x ")
if domain.is_subset(S.Reals):
x1, s = _invert_real(f_x, FiniteSet(y), x)
else:
x1, s = _invert_complex(f_x, FiniteSet(y), x)
if not isinstance(s, FiniteSet) or x1 != x:
return x1, s
# Avoid adding gratuitous intersections with S.Complexes. Actual
# conditions should be handled by the respective inverters.
if domain is S.Complexes:
return x1, s
else:
return x1, s.intersection(domain)
invert_complex = _invert
def invert_real(f_x, y, x):
"""
Inverts a real-valued function. Same as :func:`invert_complex`, but sets
the domain to ``S.Reals`` before inverting.
"""
return _invert(f_x, y, x, S.Reals)
def _invert_real(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol or g_ys is S.EmptySet:
return (f, g_ys)
n = Dummy('n', real=True)
if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
return _invert_real(f.exp,
imageset(Lambda(n, log(n)), g_ys),
symbol)
if hasattr(f, 'inverse') and f.inverse() is not None and not isinstance(f, (
TrigonometricFunction,
HyperbolicFunction,
)):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys),
symbol)
if isinstance(f, Abs):
return _invert_abs(f.args[0], g_ys, symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
if expo.is_rational:
num, den = expo.as_numer_denom()
if den % 2 == 0 and num % 2 == 1 and den.is_zero is False:
root = Lambda(n, real_root(n, expo))
g_ys_pos = g_ys & Interval(0, oo)
res = imageset(root, g_ys_pos)
base_positive = solveset(base >= 0, symbol, S.Reals)
_inv, _set = _invert_real(base, res, symbol)
return (_inv, _set.intersect(base_positive))
if den % 2 == 1:
root = Lambda(n, real_root(n, expo))
res = imageset(root, g_ys)
if num % 2 == 0:
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
if num % 2 == 1:
return _invert_real(base, res, symbol)
elif expo.is_irrational:
root = Lambda(n, real_root(n, expo))
g_ys_pos = g_ys & Interval(0, oo)
res = imageset(root, g_ys_pos)
return _invert_real(base, res, symbol)
else:
# indeterminate exponent, e.g. Float or parity of
# num, den of rational could not be determined
pass # use default return
if not base_has_sym:
rhs = g_ys.args[0]
if base.is_positive:
return _invert_real(expo,
imageset(Lambda(n, log(n, base, evaluate=False)), g_ys), symbol)
elif base.is_negative:
s, b = integer_log(rhs, base)
if b:
return _invert_real(expo, FiniteSet(s), symbol)
else:
return (expo, S.EmptySet)
elif base.is_zero:
one = Eq(rhs, 1)
if one == S.true:
# special case: 0**x - 1
return _invert_real(expo, FiniteSet(0), symbol)
elif one == S.false:
return (expo, S.EmptySet)
if isinstance(f, TrigonometricFunction):
if isinstance(g_ys, FiniteSet):
def inv(trig):
if isinstance(trig, (sin, csc)):
F = asin if isinstance(trig, sin) else acsc
return (lambda a: n*pi + S.NegativeOne**n*F(a),)
if isinstance(trig, (cos, sec)):
F = acos if isinstance(trig, cos) else asec
return (
lambda a: 2*n*pi + F(a),
lambda a: 2*n*pi - F(a),)
if isinstance(trig, (tan, cot)):
return (lambda a: n*pi + trig.inverse()(a),)
n = Dummy('n', integer=True)
invs = S.EmptySet
for L in inv(f):
invs += Union(*[imageset(Lambda(n, L(g)), S.Integers) for g in g_ys])
return _invert_real(f.args[0], invs, symbol)
return (f, g_ys)
def _invert_complex(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol or g_ys is S.EmptySet:
return (f, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
if g in {S.NegativeInfinity, S.ComplexInfinity, S.Infinity}:
return (h, S.EmptySet)
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
# special case: g**r = 0
# Could be improved like `_invert_real` to handle more general cases.
if expo.is_Rational and g_ys == FiniteSet(0):
if expo.is_positive:
return _invert_complex(base, g_ys, symbol)
if hasattr(f, 'inverse') and f.inverse() is not None and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, HyperbolicFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
if isinstance(g_ys, ImageSet):
# can solve upto `(d*exp(exp(...(exp(a*x + b))...) + c)` format.
# Further can be improved to `(d*exp(exp(...(exp(a*x**n + b*x**(n-1) + ... + f))...) + c)`.
g_ys_expr = g_ys.lamda.expr
g_ys_vars = g_ys.lamda.variables
k = Dummy('k{}'.format(len(g_ys_vars)))
g_ys_vars_1 = (k,) + g_ys_vars
exp_invs = Union(*[imageset(Lambda((g_ys_vars_1,), (I*(2*k*pi + arg(g_ys_expr))
+ log(Abs(g_ys_expr)))), S.Integers**(len(g_ys_vars_1)))])
return _invert_complex(f.exp, exp_invs, symbol)
elif isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.exp, exp_invs, symbol)
return (f, g_ys)
def _invert_abs(f, g_ys, symbol):
"""Helper function for inverting absolute value functions.
Returns the complete result of inverting an absolute value
function along with the conditions which must also be satisfied.
If it is certain that all these conditions are met, a :class:`~.FiniteSet`
of all possible solutions is returned. If any condition cannot be
satisfied, an :class:`~.EmptySet` is returned. Otherwise, a
:class:`~.ConditionSet` of the solutions, with all the required conditions
specified, is returned.
"""
if not g_ys.is_FiniteSet:
# this could be used for FiniteSet, but the
# results are more compact if they aren't, e.g.
# ConditionSet(x, Contains(n, Interval(0, oo)), {-n, n}) vs
# Union(Intersection(Interval(0, oo), {n}), Intersection(Interval(-oo, 0), {-n}))
# for the solution of abs(x) - n
pos = Intersection(g_ys, Interval(0, S.Infinity))
parg = _invert_real(f, pos, symbol)
narg = _invert_real(-f, pos, symbol)
if parg[0] != narg[0]:
raise NotImplementedError
return parg[0], Union(narg[1], parg[1])
# check conditions: all these must be true. If any are unknown
# then return them as conditions which must be satisfied
unknown = []
for a in g_ys.args:
ok = a.is_nonnegative if a.is_Number else a.is_positive
if ok is None:
unknown.append(a)
elif not ok:
return symbol, S.EmptySet
if unknown:
conditions = And(*[Contains(i, Interval(0, oo))
for i in unknown])
else:
conditions = True
n = Dummy('n', real=True)
# this is slightly different than above: instead of solving
# +/-f on positive values, here we solve for f on +/- g_ys
g_x, values = _invert_real(f, Union(
imageset(Lambda(n, n), g_ys),
imageset(Lambda(n, -n), g_ys)), symbol)
return g_x, ConditionSet(g_x, conditions, values)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
elif isinstance(f, Piecewise):
# Check the cases of the Piecewise in turn. There might be invalid
# expressions in later cases that don't apply e.g.
# solveset(Piecewise((0, Eq(x, 0)), (1/x, True)), x)
for expr, cond in f.args:
condsubs = cond.subs(symbol, p)
if condsubs is S.false:
continue
elif condsubs is S.true:
return _domain_check(expr, symbol, p)
else:
# We don't know which case of the Piecewise holds. On this
# basis we cannot decide whether any solution is in or out of
# the domain. Ideally this function would allow returning a
# symbolic condition for the validity of the solution that
# could be handled in the calling code. In the mean time we'll
# give this particular solution the benefit of the doubt and
# let it pass.
return True
else:
# TODO : We should not blindly recurse through all args of arbitrary expressions like this
return all(_domain_check(g, symbol, p)
for g in f.args)
def _is_finite_with_finite_vars(f, domain=S.Complexes):
"""
Return True if the given expression is finite. For symbols that
do not assign a value for `complex` and/or `real`, the domain will
be used to assign a value; symbols that do not assign a value
for `finite` will be made finite. All other assumptions are
left unmodified.
"""
def assumptions(s):
A = s.assumptions0
A.setdefault('finite', A.get('finite', True))
if domain.is_subset(S.Reals):
# if this gets set it will make complex=True, too
A.setdefault('real', True)
else:
# don't change 'real' because being complex implies
# nothing about being real
A.setdefault('complex', True)
return A
reps = {s: Dummy(**assumptions(s)) for s in f.free_symbols}
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
... HyperbolicFunction)
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def _solve_as_rational(f, symbol, domain):
""" solve rational functions"""
f = together(_mexpand(f, recursive=True), deep=True)
g, h = fraction(f)
if not h.has(symbol):
try:
return _solve_as_poly(g, symbol, domain)
except NotImplementedError:
# The polynomial formed from g could end up having
# coefficients in a ring over which finding roots
# isn't implemented yet, e.g. ZZ[a] for some symbol a
return ConditionSet(symbol, Eq(f, 0), domain)
except CoercionFailed:
# contained oo, zoo or nan
return S.EmptySet
else:
valid_solns = _solveset(g, symbol, domain)
invalid_solns = _solveset(h, symbol, domain)
return valid_solns - invalid_solns
class _SolveTrig1Error(Exception):
"""Raised when _solve_trig1 heuristics do not apply"""
def _solve_trig(f, symbol, domain):
"""Function to call other helpers to solve trigonometric equations """
sol = None
try:
sol = _solve_trig1(f, symbol, domain)
except _SolveTrig1Error:
try:
sol = _solve_trig2(f, symbol, domain)
except ValueError:
raise NotImplementedError(filldedent('''
Solution to this kind of trigonometric equations
is yet to be implemented'''))
return sol
def _solve_trig1(f, symbol, domain):
"""Primary solver for trigonometric and hyperbolic equations
Returns either the solution set as a ConditionSet (auto-evaluated to a
union of ImageSets if no variables besides 'symbol' are involved) or
raises _SolveTrig1Error if f == 0 cannot be solved.
Notes
=====
Algorithm:
1. Do a change of variable x -> mu*x in arguments to trigonometric and
hyperbolic functions, in order to reduce them to small integers. (This
step is crucial to keep the degrees of the polynomials of step 4 low.)
2. Rewrite trigonometric/hyperbolic functions as exponentials.
3. Proceed to a 2nd change of variable, replacing exp(I*x) or exp(x) by y.
4. Solve the resulting rational equation.
5. Use invert_complex or invert_real to return to the original variable.
6. If the coefficients of 'symbol' were symbolic in nature, add the
necessary consistency conditions in a ConditionSet.
"""
# Prepare change of variable
x = Dummy('x')
if _is_function_class_equation(HyperbolicFunction, f, symbol):
cov = exp(x)
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
else:
cov = exp(I*x)
inverter = invert_complex
f = trigsimp(f)
f_original = f
trig_functions = f.atoms(TrigonometricFunction, HyperbolicFunction)
trig_arguments = [e.args[0] for e in trig_functions]
# trigsimp may have reduced the equation to an expression
# that is independent of 'symbol' (e.g. cos**2+sin**2)
if not any(a.has(symbol) for a in trig_arguments):
return solveset(f_original, symbol, domain)
denominators = []
numerators = []
for ar in trig_arguments:
try:
poly_ar = Poly(ar, symbol)
except PolynomialError:
raise _SolveTrig1Error("trig argument is not a polynomial")
if poly_ar.degree() > 1: # degree >1 still bad
raise _SolveTrig1Error("degree of variable must not exceed one")
if poly_ar.degree() == 0: # degree 0, don't care
continue
c = poly_ar.all_coeffs()[0] # got the coefficient of 'symbol'
numerators.append(fraction(c)[0])
denominators.append(fraction(c)[1])
mu = lcm(denominators)/gcd(numerators)
f = f.subs(symbol, mu*x)
f = f.rewrite(exp)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(cov, y), h.subs(cov, y)
if g.has(x) or h.has(x):
raise _SolveTrig1Error("change of variable not possible")
solns = solveset_complex(g, y) - solveset_complex(h, y)
if isinstance(solns, ConditionSet):
raise _SolveTrig1Error("polynomial has ConditionSet solution")
if isinstance(solns, FiniteSet):
if any(isinstance(s, RootOf) for s in solns):
raise _SolveTrig1Error("polynomial results in RootOf object")
# revert the change of variable
cov = cov.subs(x, symbol/mu)
result = Union(*[inverter(cov, s, symbol)[1] for s in solns])
# In case of symbolic coefficients, the solution set is only valid
# if numerator and denominator of mu are non-zero.
if mu.has(Symbol):
syms = (mu).atoms(Symbol)
munum, muden = fraction(mu)
condnum = munum.as_independent(*syms, as_Add=False)[1]
condden = muden.as_independent(*syms, as_Add=False)[1]
cond = And(Ne(condnum, 0), Ne(condden, 0))
else:
cond = True
# Actual conditions are returned as part of the ConditionSet. Adding an
# intersection with C would only complicate some solution sets due to
# current limitations of intersection code. (e.g. #19154)
if domain is S.Complexes:
# This is a slight abuse of ConditionSet. Ideally this should
# be some kind of "PiecewiseSet". (See #19507 discussion)
return ConditionSet(symbol, cond, result)
else:
return ConditionSet(symbol, cond, Intersection(result, domain))
elif solns is S.EmptySet:
return S.EmptySet
else:
raise _SolveTrig1Error("polynomial solutions must form FiniteSet")
def _solve_trig2(f, symbol, domain):
"""Secondary helper to solve trigonometric equations,
called when first helper fails """
f = trigsimp(f)
f_original = f
trig_functions = f.atoms(sin, cos, tan, sec, cot, csc)
trig_arguments = [e.args[0] for e in trig_functions]
denominators = []
numerators = []
# todo: This solver can be extended to hyperbolics if the
# analogous change of variable to tanh (instead of tan)
# is used.
if not trig_functions:
return ConditionSet(symbol, Eq(f_original, 0), domain)
# todo: The pre-processing below (extraction of numerators, denominators,
# gcd, lcm, mu, etc.) should be updated to the enhanced version in
# _solve_trig1. (See #19507)
for ar in trig_arguments:
try:
poly_ar = Poly(ar, symbol)
except PolynomialError:
raise ValueError("give up, we cannot solve if this is not a polynomial in x")
if poly_ar.degree() > 1: # degree >1 still bad
raise ValueError("degree of variable inside polynomial should not exceed one")
if poly_ar.degree() == 0: # degree 0, don't care
continue
c = poly_ar.all_coeffs()[0] # got the coefficient of 'symbol'
try:
numerators.append(Rational(c).p)
denominators.append(Rational(c).q)
except TypeError:
return ConditionSet(symbol, Eq(f_original, 0), domain)
x = Dummy('x')
# ilcm() and igcd() require more than one argument
if len(numerators) > 1:
mu = Rational(2)*ilcm(*denominators)/igcd(*numerators)
else:
assert len(numerators) == 1
mu = Rational(2)*denominators[0]/numerators[0]
f = f.subs(symbol, mu*x)
f = f.rewrite(tan)
f = expand_trig(f)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(tan(x), y), h.subs(tan(x), y)
if g.has(x) or h.has(x):
return ConditionSet(symbol, Eq(f_original, 0), domain)
solns = solveset(g, y, S.Reals) - solveset(h, y, S.Reals)
if isinstance(solns, FiniteSet):
result = Union(*[invert_real(tan(symbol/mu), s, symbol)[1]
for s in solns])
dsol = invert_real(tan(symbol/mu), oo, symbol)[1]
if degree(h) > degree(g): # If degree(denom)>degree(num) then there
result = Union(result, dsol) # would be another sol at Lim(denom-->oo)
return Intersection(result, domain)
elif solns is S.EmptySet:
return S.EmptySet
else:
return ConditionSet(symbol, Eq(f_original, 0), S.Reals)
def _solve_as_poly(f, symbol, domain=S.Complexes):
"""
Solve the equation using polynomial techniques if it already is a
polynomial equation or, with a change of variables, can be made so.
"""
result = None
if f.is_polynomial(symbol):
solns = roots(f, symbol, cubics=True, quartics=True,
quintics=True, domain='EX')
num_roots = sum(solns.values())
if degree(f, symbol) <= num_roots:
result = FiniteSet(*solns.keys())
else:
poly = Poly(f, symbol)
solns = poly.all_roots()
if poly.degree() <= len(solns):
result = FiniteSet(*solns)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
poly = Poly(f)
if poly is None:
result = ConditionSet(symbol, Eq(f, 0), domain)
gens = [g for g in poly.gens if g.has(symbol)]
if len(gens) == 1:
poly = Poly(poly, gens[0])
gen = poly.gen
deg = poly.degree()
poly = Poly(poly.as_expr(), poly.gen, composite=True)
poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True,
quintics=True).keys())
if len(poly_solns) < deg:
result = ConditionSet(symbol, Eq(f, 0), domain)
if gen != symbol:
y = Dummy('y')
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
lhs, rhs_s = inverter(gen, y, symbol)
if lhs == symbol:
result = Union(*[rhs_s.subs(y, s) for s in poly_solns])
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
if result is not None:
if isinstance(result, FiniteSet):
# this is to simplify solutions like -sqrt(-I) to sqrt(2)/2
# - sqrt(2)*I/2. We are not expanding for solution with symbols
# or undefined functions because that makes the solution more complicated.
# For example, expand_complex(a) returns re(a) + I*im(a)
if all(s.atoms(Symbol, AppliedUndef) == set() and not isinstance(s, RootOf)
for s in result):
s = Dummy('s')
result = imageset(Lambda(s, expand_complex(s)), result)
if isinstance(result, FiniteSet) and domain != S.Complexes:
# Avoid adding gratuitous intersections with S.Complexes. Actual
# conditions should be handled elsewhere.
result = result.intersection(domain)
return result
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def _solve_radical(f, unradf, symbol, solveset_solver):
""" Helper function to solve equations with radicals """
res = unradf
eq, cov = res if res else (f, [])
if not cov:
result = solveset_solver(eq, symbol) - \
Union(*[solveset_solver(g, symbol) for g in denoms(f, symbol)])
else:
y, yeq = cov
if not solveset_solver(y - I, y):
yreal = Dummy('yreal', real=True)
yeq = yeq.xreplace({y: yreal})
eq = eq.xreplace({y: yreal})
y = yreal
g_y_s = solveset_solver(yeq, symbol)
f_y_sols = solveset_solver(eq, y)
result = Union(*[imageset(Lambda(y, g_y), f_y_sols)
for g_y in g_y_s])
if not isinstance(result, FiniteSet):
solution_set = result
else:
f_set = [] # solutions for FiniteSet
c_set = [] # solutions for ConditionSet
for s in result:
if checksol(f, symbol, s):
f_set.append(s)
else:
c_set.append(s)
solution_set = FiniteSet(*f_set) + ConditionSet(symbol, Eq(f, 0), FiniteSet(*c_set))
return solution_set
def _solve_abs(f, symbol, domain):
""" Helper function to solve equation involving absolute value function """
if not domain.is_subset(S.Reals):
raise ValueError(filldedent('''
Absolute values cannot be inverted in the
complex domain.'''))
p, q, r = Wild('p'), Wild('q'), Wild('r')
pattern_match = f.match(p*Abs(q) + r) or {}
f_p, f_q, f_r = [pattern_match.get(i, S.Zero) for i in (p, q, r)]
if not (f_p.is_zero or f_q.is_zero):
domain = continuous_domain(f_q, symbol, domain)
q_pos_cond = solve_univariate_inequality(f_q >= 0, symbol,
relational=False, domain=domain, continuous=True)
q_neg_cond = q_pos_cond.complement(domain)
sols_q_pos = solveset_real(f_p*f_q + f_r,
symbol).intersect(q_pos_cond)
sols_q_neg = solveset_real(f_p*(-f_q) + f_r,
symbol).intersect(q_neg_cond)
return Union(sols_q_pos, sols_q_neg)
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def solve_decomposition(f, symbol, domain):
"""
Function to solve equations via the principle of "Decomposition
and Rewriting".
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S
>>> from sympy.solvers.solveset import solve_decomposition as sd
>>> x = Symbol('x')
>>> f1 = exp(2*x) - 3*exp(x) + 2
>>> sd(f1, x, S.Reals)
{0, log(2)}
>>> f2 = sin(x)**2 + 2*sin(x) + 1
>>> pprint(sd(f2, x, S.Reals), use_unicode=False)
3*pi
{2*n*pi + ---- | n in Integers}
2
>>> f3 = sin(x + 2)
>>> pprint(sd(f3, x, S.Reals), use_unicode=False)
{2*n*pi - 2 | n in Integers} U {2*n*pi - 2 + pi | n in Integers}
"""
from sympy.solvers.decompogen import decompogen
from sympy.calculus.util import function_range
# decompose the given function
g_s = decompogen(f, symbol)
# `y_s` represents the set of values for which the function `g` is to be
# solved.
# `solutions` represent the solutions of the equations `g = y_s` or
# `g = 0` depending on the type of `y_s`.
# As we are interested in solving the equation: f = 0
y_s = FiniteSet(0)
for g in g_s:
frange = function_range(g, symbol, domain)
y_s = Intersection(frange, y_s)
result = S.EmptySet
if isinstance(y_s, FiniteSet):
for y in y_s:
solutions = solveset(Eq(g, y), symbol, domain)
if not isinstance(solutions, ConditionSet):
result += solutions
else:
if isinstance(y_s, ImageSet):
iter_iset = (y_s,)
elif isinstance(y_s, Union):
iter_iset = y_s.args
elif y_s is S.EmptySet:
# y_s is not in the range of g in g_s, so no solution exists
#in the given domain
return S.EmptySet
for iset in iter_iset:
new_solutions = solveset(Eq(iset.lamda.expr, g), symbol, domain)
dummy_var = tuple(iset.lamda.expr.free_symbols)[0]
(base_set,) = iset.base_sets
if isinstance(new_solutions, FiniteSet):
new_exprs = new_solutions
elif isinstance(new_solutions, Intersection):
if isinstance(new_solutions.args[1], FiniteSet):
new_exprs = new_solutions.args[1]
for new_expr in new_exprs:
result += ImageSet(Lambda(dummy_var, new_expr), base_set)
if result is S.EmptySet:
return ConditionSet(symbol, Eq(f, 0), domain)
y_s = result
return y_s
def _solveset(f, symbol, domain, _check=False):
"""Helper for solveset to return a result from an expression
that has already been sympify'ed and is known to contain the
given symbol."""
# _check controls whether the answer is checked or not
from sympy.simplify.simplify import signsimp
if isinstance(f, BooleanTrue):
return domain
orig_f = f
if f.is_Mul:
coeff, f = f.as_independent(symbol, as_Add=False)
if coeff in {S.ComplexInfinity, S.NegativeInfinity, S.Infinity}:
f = together(orig_f)
elif f.is_Add:
a, h = f.as_independent(symbol)
m, h = h.as_independent(symbol, as_Add=False)
if m not in {S.ComplexInfinity, S.Zero, S.Infinity,
S.NegativeInfinity}:
f = a/m + h # XXX condition `m != 0` should be added to soln
# assign the solvers to use
solver = lambda f, x, domain=domain: _solveset(f, x, domain)
inverter = lambda f, rhs, symbol: _invert(f, rhs, symbol, domain)
result = S.EmptySet
if f.expand().is_zero:
return domain
elif not f.has(symbol):
return S.EmptySet
elif f.is_Mul and all(_is_finite_with_finite_vars(m, domain)
for m in f.args):
# if f(x) and g(x) are both finite we can say that the solution of
# f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in
# general. g(x) can grow to infinitely large for the values where
# f(x) == 0. To be sure that we are not silently allowing any
# wrong solutions we are using this technique only if both f and g are
# finite for a finite input.
result = Union(*[solver(m, symbol) for m in f.args])
elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \
_is_function_class_equation(HyperbolicFunction, f, symbol):
result = _solve_trig(f, symbol, domain)
elif isinstance(f, arg):
a = f.args[0]
result = Intersection(_solveset(re(a) > 0, symbol, domain),
_solveset(im(a), symbol, domain))
elif f.is_Piecewise:
expr_set_pairs = f.as_expr_set_pairs(domain)
for (expr, in_set) in expr_set_pairs:
if in_set.is_Relational:
in_set = in_set.as_set()
solns = solver(expr, symbol, in_set)
result += solns
elif isinstance(f, Eq):
result = solver(Add(f.lhs, - f.rhs, evaluate=False), symbol, domain)
elif f.is_Relational:
try:
result = solve_univariate_inequality(
f, symbol, domain=domain, relational=False)
except NotImplementedError:
result = ConditionSet(symbol, f, domain)
return result
elif _is_modular(f, symbol):
result = _solve_modular(f, symbol, domain)
else:
lhs, rhs_s = inverter(f, 0, symbol)
if lhs == symbol:
# do some very minimal simplification since
# repeated inversion may have left the result
# in a state that other solvers (e.g. poly)
# would have simplified; this is done here
# rather than in the inverter since here it
# is only done once whereas there it would
# be repeated for each step of the inversion
if isinstance(rhs_s, FiniteSet):
rhs_s = FiniteSet(*[Mul(*
signsimp(i).as_content_primitive())
for i in rhs_s])
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
for equation in [lhs - rhs for rhs in rhs_s]:
if equation == f:
u = unrad(f, symbol)
if u:
result += _solve_radical(equation, u,
symbol,
solver)
elif equation.has(Abs):
result += _solve_abs(f, symbol, domain)
else:
result_rational = _solve_as_rational(equation, symbol, domain)
if not isinstance(result_rational, ConditionSet):
result += result_rational
else:
# may be a transcendental type equation
t_result = _transolve(equation, symbol, domain)
if isinstance(t_result, ConditionSet):
# might need factoring; this is expensive so we
# have delayed until now. To avoid recursion
# errors look for a non-trivial factoring into
# a product of symbol dependent terms; I think
# that something that factors as a Pow would
# have already been recognized by now.
factored = equation.factor()
if factored.is_Mul and equation != factored:
_, dep = factored.as_independent(symbol)
if not dep.is_Add:
# non-trivial factoring of equation
# but use form with constants
# in case they need special handling
t_result = solver(factored, symbol)
result += t_result
else:
result += solver(equation, symbol)
elif rhs_s is not S.EmptySet:
result = ConditionSet(symbol, Eq(f, 0), domain)
if isinstance(result, ConditionSet):
if isinstance(f, Expr):
num, den = f.as_numer_denom()
if den.has(symbol):
_result = _solveset(num, symbol, domain)
if not isinstance(_result, ConditionSet):
singularities = _solveset(den, symbol, domain)
result = _result - singularities
if _check:
if isinstance(result, ConditionSet):
# it wasn't solved or has enumerated all conditions
# -- leave it alone
return result
# whittle away all but the symbol-containing core
# to use this for testing
if isinstance(orig_f, Expr):
fx = orig_f.as_independent(symbol, as_Add=True)[1]
fx = fx.as_independent(symbol, as_Add=False)[1]
else:
fx = orig_f
if isinstance(result, FiniteSet):
# check the result for invalid solutions
result = FiniteSet(*[s for s in result
if isinstance(s, RootOf)
or domain_check(fx, symbol, s)])
return result
def _is_modular(f, symbol):
"""
Helper function to check below mentioned types of modular equations.
``A - Mod(B, C) = 0``
A -> This can or cannot be a function of symbol.
B -> This is surely a function of symbol.
C -> It is an integer.
Parameters
==========
f : Expr
The equation to be checked.
symbol : Symbol
The concerned variable for which the equation is to be checked.
Examples
========
>>> from sympy import symbols, exp, Mod
>>> from sympy.solvers.solveset import _is_modular as check
>>> x, y = symbols('x y')
>>> check(Mod(x, 3) - 1, x)
True
>>> check(Mod(x, 3) - 1, y)
False
>>> check(Mod(x, 3)**2 - 5, x)
False
>>> check(Mod(x, 3)**2 - y, x)
False
>>> check(exp(Mod(x, 3)) - 1, x)
False
>>> check(Mod(3, y) - 1, y)
False
"""
if not f.has(Mod):
return False
# extract modterms from f.
modterms = list(f.atoms(Mod))
return (len(modterms) == 1 and # only one Mod should be present
modterms[0].args[0].has(symbol) and # B-> function of symbol
modterms[0].args[1].is_integer and # C-> to be an integer.
any(isinstance(term, Mod)
for term in list(_term_factors(f))) # free from other funcs
)
def _invert_modular(modterm, rhs, n, symbol):
"""
Helper function to invert modular equation.
``Mod(a, m) - rhs = 0``
Generally it is inverted as (a, ImageSet(Lambda(n, m*n + rhs), S.Integers)).
More simplified form will be returned if possible.
If it is not invertible then (modterm, rhs) is returned.
The following cases arise while inverting equation ``Mod(a, m) - rhs = 0``:
1. If a is symbol then m*n + rhs is the required solution.
2. If a is an instance of ``Add`` then we try to find two symbol independent
parts of a and the symbol independent part gets tranferred to the other
side and again the ``_invert_modular`` is called on the symbol
dependent part.
3. If a is an instance of ``Mul`` then same as we done in ``Add`` we separate
out the symbol dependent and symbol independent parts and transfer the
symbol independent part to the rhs with the help of invert and again the
``_invert_modular`` is called on the symbol dependent part.
4. If a is an instance of ``Pow`` then two cases arise as following:
- If a is of type (symbol_indep)**(symbol_dep) then the remainder is
evaluated with the help of discrete_log function and then the least
period is being found out with the help of totient function.
period*n + remainder is the required solution in this case.
For reference: (https://en.wikipedia.org/wiki/Euler's_theorem)
- If a is of type (symbol_dep)**(symbol_indep) then we try to find all
primitive solutions list with the help of nthroot_mod function.
m*n + rem is the general solution where rem belongs to solutions list
from nthroot_mod function.
Parameters
==========
modterm, rhs : Expr
The modular equation to be inverted, ``modterm - rhs = 0``
symbol : Symbol
The variable in the equation to be inverted.
n : Dummy
Dummy variable for output g_n.
Returns
=======
A tuple (f_x, g_n) is being returned where f_x is modular independent function
of symbol and g_n being set of values f_x can have.
Examples
========
>>> from sympy import symbols, exp, Mod, Dummy, S
>>> from sympy.solvers.solveset import _invert_modular as invert_modular
>>> x, y = symbols('x y')
>>> n = Dummy('n')
>>> invert_modular(Mod(exp(x), 7), S(5), n, x)
(Mod(exp(x), 7), 5)
>>> invert_modular(Mod(x, 7), S(5), n, x)
(x, ImageSet(Lambda(_n, 7*_n + 5), Integers))
>>> invert_modular(Mod(3*x + 8, 7), S(5), n, x)
(x, ImageSet(Lambda(_n, 7*_n + 6), Integers))
>>> invert_modular(Mod(x**4, 7), S(5), n, x)
(x, EmptySet)
>>> invert_modular(Mod(2**(x**2 + x + 1), 7), S(2), n, x)
(x**2 + x + 1, ImageSet(Lambda(_n, 3*_n + 1), Naturals0))
"""
a, m = modterm.args
if rhs.is_real is False or any(term.is_real is False
for term in list(_term_factors(a))):
# Check for complex arguments
return modterm, rhs
if abs(rhs) >= abs(m):
# if rhs has value greater than value of m.
return symbol, S.EmptySet
if a == symbol:
return symbol, ImageSet(Lambda(n, m*n + rhs), S.Integers)
if a.is_Add:
# g + h = a
g, h = a.as_independent(symbol)
if g is not S.Zero:
x_indep_term = rhs - Mod(g, m)
return _invert_modular(Mod(h, m), Mod(x_indep_term, m), n, symbol)
if a.is_Mul:
# g*h = a
g, h = a.as_independent(symbol)
if g is not S.One:
x_indep_term = rhs*invert(g, m)
return _invert_modular(Mod(h, m), Mod(x_indep_term, m), n, symbol)
if a.is_Pow:
# base**expo = a
base, expo = a.args
if expo.has(symbol) and not base.has(symbol):
# remainder -> solution independent of n of equation.
# m, rhs are made coprime by dividing igcd(m, rhs)
try:
remainder = discrete_log(m / igcd(m, rhs), rhs, a.base)
except ValueError: # log does not exist
return modterm, rhs
# period -> coefficient of n in the solution and also referred as
# the least period of expo in which it is repeats itself.
# (a**(totient(m)) - 1) divides m. Here is link of theorem:
# (https://en.wikipedia.org/wiki/Euler's_theorem)
period = totient(m)
for p in divisors(period):
# there might a lesser period exist than totient(m).
if pow(a.base, p, m / igcd(m, a.base)) == 1:
period = p
break
# recursion is not applied here since _invert_modular is currently
# not smart enough to handle infinite rhs as here expo has infinite
# rhs = ImageSet(Lambda(n, period*n + remainder), S.Naturals0).
return expo, ImageSet(Lambda(n, period*n + remainder), S.Naturals0)
elif base.has(symbol) and not expo.has(symbol):
try:
remainder_list = nthroot_mod(rhs, expo, m, all_roots=True)
if remainder_list == []:
return symbol, S.EmptySet
except (ValueError, NotImplementedError):
return modterm, rhs
g_n = S.EmptySet
for rem in remainder_list:
g_n += ImageSet(Lambda(n, m*n + rem), S.Integers)
return base, g_n
return modterm, rhs
def _solve_modular(f, symbol, domain):
r"""
Helper function for solving modular equations of type ``A - Mod(B, C) = 0``,
where A can or cannot be a function of symbol, B is surely a function of
symbol and C is an integer.
Currently ``_solve_modular`` is only able to solve cases
where A is not a function of symbol.
Parameters
==========
f : Expr
The modular equation to be solved, ``f = 0``
symbol : Symbol
The variable in the equation to be solved.
domain : Set
A set over which the equation is solved. It has to be a subset of
Integers.
Returns
=======
A set of integer solutions satisfying the given modular equation.
A ``ConditionSet`` if the equation is unsolvable.
Examples
========
>>> from sympy.solvers.solveset import _solve_modular as solve_modulo
>>> from sympy import S, Symbol, sin, Intersection, Interval, Mod
>>> x = Symbol('x')
>>> solve_modulo(Mod(5*x - 8, 7) - 3, x, S.Integers)
ImageSet(Lambda(_n, 7*_n + 5), Integers)
>>> solve_modulo(Mod(5*x - 8, 7) - 3, x, S.Reals) # domain should be subset of integers.
ConditionSet(x, Eq(Mod(5*x + 6, 7) - 3, 0), Reals)
>>> solve_modulo(-7 + Mod(x, 5), x, S.Integers)
EmptySet
>>> solve_modulo(Mod(12**x, 21) - 18, x, S.Integers)
ImageSet(Lambda(_n, 6*_n + 2), Naturals0)
>>> solve_modulo(Mod(sin(x), 7) - 3, x, S.Integers) # not solvable
ConditionSet(x, Eq(Mod(sin(x), 7) - 3, 0), Integers)
>>> solve_modulo(3 - Mod(x, 5), x, Intersection(S.Integers, Interval(0, 100)))
Intersection(ImageSet(Lambda(_n, 5*_n + 3), Integers), Range(0, 101, 1))
"""
# extract modterm and g_y from f
unsolved_result = ConditionSet(symbol, Eq(f, 0), domain)
modterm = list(f.atoms(Mod))[0]
rhs = -S.One*(f.subs(modterm, S.Zero))
if f.as_coefficients_dict()[modterm].is_negative:
# checks if coefficient of modterm is negative in main equation.
rhs *= -S.One
if not domain.is_subset(S.Integers):
return unsolved_result
if rhs.has(symbol):
# TODO Case: A-> function of symbol, can be extended here
# in future.
return unsolved_result
n = Dummy('n', integer=True)
f_x, g_n = _invert_modular(modterm, rhs, n, symbol)
if f_x == modterm and g_n == rhs:
return unsolved_result
if f_x == symbol:
if domain is not S.Integers:
return domain.intersect(g_n)
return g_n
if isinstance(g_n, ImageSet):
lamda_expr = g_n.lamda.expr
lamda_vars = g_n.lamda.variables
base_sets = g_n.base_sets
sol_set = _solveset(f_x - lamda_expr, symbol, S.Integers)
if isinstance(sol_set, FiniteSet):
tmp_sol = S.EmptySet
for sol in sol_set:
tmp_sol += ImageSet(Lambda(lamda_vars, sol), *base_sets)
sol_set = tmp_sol
else:
sol_set = ImageSet(Lambda(lamda_vars, sol_set), *base_sets)
return domain.intersect(sol_set)
return unsolved_result
def _term_factors(f):
"""
Iterator to get the factors of all terms present
in the given equation.
Parameters
==========
f : Expr
Equation that needs to be addressed
Returns
=======
Factors of all terms present in the equation.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers.solveset import _term_factors
>>> x = symbols('x')
>>> list(_term_factors(-2 - x**2 + x*(x + 1)))
[-2, -1, x**2, x, x + 1]
"""
for add_arg in Add.make_args(f):
yield from Mul.make_args(add_arg)
def _solve_exponential(lhs, rhs, symbol, domain):
r"""
Helper function for solving (supported) exponential equations.
Exponential equations are the sum of (currently) at most
two terms with one or both of them having a power with a
symbol-dependent exponent.
For example
.. math:: 5^{2x + 3} - 5^{3x - 1}
.. math:: 4^{5 - 9x} - e^{2 - x}
Parameters
==========
lhs, rhs : Expr
The exponential equation to be solved, `lhs = rhs`
symbol : Symbol
The variable in which the equation is solved
domain : Set
A set over which the equation is solved.
Returns
=======
A set of solutions satisfying the given equation.
A ``ConditionSet`` if the equation is unsolvable or
if the assumptions are not properly defined, in that case
a different style of ``ConditionSet`` is returned having the
solution(s) of the equation with the desired assumptions.
Examples
========
>>> from sympy.solvers.solveset import _solve_exponential as solve_expo
>>> from sympy import symbols, S
>>> x = symbols('x', real=True)
>>> a, b = symbols('a b')
>>> solve_expo(2**x + 3**x - 5**x, 0, x, S.Reals) # not solvable
ConditionSet(x, Eq(2**x + 3**x - 5**x, 0), Reals)
>>> solve_expo(a**x - b**x, 0, x, S.Reals) # solvable but incorrect assumptions
ConditionSet(x, (a > 0) & (b > 0), {0})
>>> solve_expo(3**(2*x) - 2**(x + 3), 0, x, S.Reals)
{-3*log(2)/(-2*log(3) + log(2))}
>>> solve_expo(2**x - 4**x, 0, x, S.Reals)
{0}
* Proof of correctness of the method
The logarithm function is the inverse of the exponential function.
The defining relation between exponentiation and logarithm is:
.. math:: {\log_b x} = y \enspace if \enspace b^y = x
Therefore if we are given an equation with exponent terms, we can
convert every term to its corresponding logarithmic form. This is
achieved by taking logarithms and expanding the equation using
logarithmic identities so that it can easily be handled by ``solveset``.
For example:
.. math:: 3^{2x} = 2^{x + 3}
Taking log both sides will reduce the equation to
.. math:: (2x)\log(3) = (x + 3)\log(2)
This form can be easily handed by ``solveset``.
"""
unsolved_result = ConditionSet(symbol, Eq(lhs - rhs, 0), domain)
newlhs = powdenest(lhs)
if lhs != newlhs:
# it may also be advantageous to factor the new expr
neweq = factor(newlhs - rhs)
if neweq != (lhs - rhs):
return _solveset(neweq, symbol, domain) # try again with _solveset
if not (isinstance(lhs, Add) and len(lhs.args) == 2):
# solving for the sum of more than two powers is possible
# but not yet implemented
return unsolved_result
if rhs != 0:
return unsolved_result
a, b = list(ordered(lhs.args))
a_term = a.as_independent(symbol)[1]
b_term = b.as_independent(symbol)[1]
a_base, a_exp = a_term.as_base_exp()
b_base, b_exp = b_term.as_base_exp()
if domain.is_subset(S.Reals):
conditions = And(
a_base > 0,
b_base > 0,
Eq(im(a_exp), 0),
Eq(im(b_exp), 0))
else:
conditions = And(
Ne(a_base, 0),
Ne(b_base, 0))
L, R = map(lambda i: expand_log(log(i), force=True), (a, -b))
solutions = _solveset(L - R, symbol, domain)
return ConditionSet(symbol, conditions, solutions)
def _is_exponential(f, symbol):
r"""
Return ``True`` if one or more terms contain ``symbol`` only in
exponents, else ``False``.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Examples
========
>>> from sympy import symbols, cos, exp
>>> from sympy.solvers.solveset import _is_exponential as check
>>> x, y = symbols('x y')
>>> check(y, y)
False
>>> check(x**y - 1, y)
True
>>> check(x**y*2**y - 1, y)
True
>>> check(exp(x + 3) + 3**x, x)
True
>>> check(cos(2**x), x)
False
* Philosophy behind the helper
The function extracts each term of the equation and checks if it is
of exponential form w.r.t ``symbol``.
"""
rv = False
for expr_arg in _term_factors(f):
if symbol not in expr_arg.free_symbols:
continue
if (isinstance(expr_arg, Pow) and
symbol not in expr_arg.base.free_symbols or
isinstance(expr_arg, exp)):
rv = True # symbol in exponent
else:
return False # dependent on symbol in non-exponential way
return rv
def _solve_logarithm(lhs, rhs, symbol, domain):
r"""
Helper to solve logarithmic equations which are reducible
to a single instance of `\log`.
Logarithmic equations are (currently) the equations that contains
`\log` terms which can be reduced to a single `\log` term or
a constant using various logarithmic identities.
For example:
.. math:: \log(x) + \log(x - 4)
can be reduced to:
.. math:: \log(x(x - 4))
Parameters
==========
lhs, rhs : Expr
The logarithmic equation to be solved, `lhs = rhs`
symbol : Symbol
The variable in which the equation is solved
domain : Set
A set over which the equation is solved.
Returns
=======
A set of solutions satisfying the given equation.
A ``ConditionSet`` if the equation is unsolvable.
Examples
========
>>> from sympy import symbols, log, S
>>> from sympy.solvers.solveset import _solve_logarithm as solve_log
>>> x = symbols('x')
>>> f = log(x - 3) + log(x + 3)
>>> solve_log(f, 0, x, S.Reals)
{-sqrt(10), sqrt(10)}
* Proof of correctness
A logarithm is another way to write exponent and is defined by
.. math:: {\log_b x} = y \enspace if \enspace b^y = x
When one side of the equation contains a single logarithm, the
equation can be solved by rewriting the equation as an equivalent
exponential equation as defined above. But if one side contains
more than one logarithm, we need to use the properties of logarithm
to condense it into a single logarithm.
Take for example
.. math:: \log(2x) - 15 = 0
contains single logarithm, therefore we can directly rewrite it to
exponential form as
.. math:: x = \frac{e^{15}}{2}
But if the equation has more than one logarithm as
.. math:: \log(x - 3) + \log(x + 3) = 0
we use logarithmic identities to convert it into a reduced form
Using,
.. math:: \log(a) + \log(b) = \log(ab)
the equation becomes,
.. math:: \log((x - 3)(x + 3))
This equation contains one logarithm and can be solved by rewriting
to exponents.
"""
new_lhs = logcombine(lhs, force=True)
new_f = new_lhs - rhs
return _solveset(new_f, symbol, domain)
def _is_logarithmic(f, symbol):
r"""
Return ``True`` if the equation is in the form
`a\log(f(x)) + b\log(g(x)) + ... + c` else ``False``.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Returns
=======
``True`` if the equation is logarithmic otherwise ``False``.
Examples
========
>>> from sympy import symbols, tan, log
>>> from sympy.solvers.solveset import _is_logarithmic as check
>>> x, y = symbols('x y')
>>> check(log(x + 2) - log(x + 3), x)
True
>>> check(tan(log(2*x)), x)
False
>>> check(x*log(x), x)
False
>>> check(x + log(x), x)
False
>>> check(y + log(x), x)
True
* Philosophy behind the helper
The function extracts each term and checks whether it is
logarithmic w.r.t ``symbol``.
"""
rv = False
for term in Add.make_args(f):
saw_log = False
for term_arg in Mul.make_args(term):
if symbol not in term_arg.free_symbols:
continue
if isinstance(term_arg, log):
if saw_log:
return False # more than one log in term
saw_log = True
else:
return False # dependent on symbol in non-log way
if saw_log:
rv = True
return rv
def _is_lambert(f, symbol):
r"""
If this returns ``False`` then the Lambert solver (``_solve_lambert``) will not be called.
Explanation
===========
Quick check for cases that the Lambert solver might be able to handle.
1. Equations containing more than two operands and `symbol`s involving any of
`Pow`, `exp`, `HyperbolicFunction`,`TrigonometricFunction`, `log` terms.
2. In `Pow`, `exp` the exponent should have `symbol` whereas for
`HyperbolicFunction`,`TrigonometricFunction`, `log` should contain `symbol`.
3. For `HyperbolicFunction`,`TrigonometricFunction` the number of trigonometric functions in
equation should be less than number of symbols. (since `A*cos(x) + B*sin(x) - c`
is not the Lambert type).
Some forms of lambert equations are:
1. X**X = C
2. X*(B*log(X) + D)**A = C
3. A*log(B*X + A) + d*X = C
4. (B*X + A)*exp(d*X + g) = C
5. g*exp(B*X + h) - B*X = C
6. A*D**(E*X + g) - B*X = C
7. A*cos(X) + B*sin(X) - D*X = C
8. A*cosh(X) + B*sinh(X) - D*X = C
Where X is any variable,
A, B, C, D, E are any constants,
g, h are linear functions or log terms.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Returns
=======
If this returns ``False`` then the Lambert solver (``_solve_lambert``) will not be called.
Examples
========
>>> from sympy.solvers.solveset import _is_lambert
>>> from sympy import symbols, cosh, sinh, log
>>> x = symbols('x')
>>> _is_lambert(3*log(x) - x*log(3), x)
True
>>> _is_lambert(log(log(x - 3)) + log(x-3), x)
True
>>> _is_lambert(cosh(x) - sinh(x), x)
False
>>> _is_lambert((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1), x)
True
See Also
========
_solve_lambert
"""
term_factors = list(_term_factors(f.expand()))
# total number of symbols in equation
no_of_symbols = len([arg for arg in term_factors if arg.has(symbol)])
# total number of trigonometric terms in equation
no_of_trig = len([arg for arg in term_factors \
if arg.has(HyperbolicFunction, TrigonometricFunction)])
if f.is_Add and no_of_symbols >= 2:
# `log`, `HyperbolicFunction`, `TrigonometricFunction` should have symbols
# and no_of_trig < no_of_symbols
lambert_funcs = (log, HyperbolicFunction, TrigonometricFunction)
if any(isinstance(arg, lambert_funcs)\
for arg in term_factors if arg.has(symbol)):
if no_of_trig < no_of_symbols:
return True
# here, `Pow`, `exp` exponent should have symbols
elif any(isinstance(arg, (Pow, exp)) \
for arg in term_factors if (arg.as_base_exp()[1]).has(symbol)):
return True
return False
def _transolve(f, symbol, domain):
r"""
Function to solve transcendental equations. It is a helper to
``solveset`` and should be used internally. ``_transolve``
currently supports the following class of equations:
- Exponential equations
- Logarithmic equations
Parameters
==========
f : Any transcendental equation that needs to be solved.
This needs to be an expression, which is assumed
to be equal to ``0``.
symbol : The variable for which the equation is solved.
This needs to be of class ``Symbol``.
domain : A set over which the equation is solved.
This needs to be of class ``Set``.
Returns
=======
Set
A set of values for ``symbol`` for which ``f`` is equal to
zero. An ``EmptySet`` is returned if ``f`` does not have solutions
in respective domain. A ``ConditionSet`` is returned as unsolved
object if algorithms to evaluate complete solution are not
yet implemented.
How to use ``_transolve``
=========================
``_transolve`` should not be used as an independent function, because
it assumes that the equation (``f``) and the ``symbol`` comes from
``solveset`` and might have undergone a few modification(s).
To use ``_transolve`` as an independent function the equation (``f``)
and the ``symbol`` should be passed as they would have been by
``solveset``.
Examples
========
>>> from sympy.solvers.solveset import _transolve as transolve
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy import symbols, S, pprint
>>> x = symbols('x', real=True) # assumption added
>>> transolve(5**(x - 3) - 3**(2*x + 1), x, S.Reals)
{-(log(3) + 3*log(5))/(-log(5) + 2*log(3))}
How ``_transolve`` works
========================
``_transolve`` uses two types of helper functions to solve equations
of a particular class:
Identifying helpers: To determine whether a given equation
belongs to a certain class of equation or not. Returns either
``True`` or ``False``.
Solving helpers: Once an equation is identified, a corresponding
helper either solves the equation or returns a form of the equation
that ``solveset`` might better be able to handle.
* Philosophy behind the module
The purpose of ``_transolve`` is to take equations which are not
already polynomial in their generator(s) and to either recast them
as such through a valid transformation or to solve them outright.
A pair of helper functions for each class of supported
transcendental functions are employed for this purpose. One
identifies the transcendental form of an equation and the other
either solves it or recasts it into a tractable form that can be
solved by ``solveset``.
For example, an equation in the form `ab^{f(x)} - cd^{g(x)} = 0`
can be transformed to
`\log(a) + f(x)\log(b) - \log(c) - g(x)\log(d) = 0`
(under certain assumptions) and this can be solved with ``solveset``
if `f(x)` and `g(x)` are in polynomial form.
How ``_transolve`` is better than ``_tsolve``
=============================================
1) Better output
``_transolve`` provides expressions in a more simplified form.
Consider a simple exponential equation
>>> f = 3**(2*x) - 2**(x + 3)
>>> pprint(transolve(f, x, S.Reals), use_unicode=False)
-3*log(2)
{------------------}
-2*log(3) + log(2)
>>> pprint(tsolve(f, x), use_unicode=False)
/ 3 \
| --------|
| log(2/9)|
[-log\2 /]
2) Extensible
The API of ``_transolve`` is designed such that it is easily
extensible, i.e. the code that solves a given class of
equations is encapsulated in a helper and not mixed in with
the code of ``_transolve`` itself.
3) Modular
``_transolve`` is designed to be modular i.e, for every class of
equation a separate helper for identification and solving is
implemented. This makes it easy to change or modify any of the
method implemented directly in the helpers without interfering
with the actual structure of the API.
4) Faster Computation
Solving equation via ``_transolve`` is much faster as compared to
``_tsolve``. In ``solve``, attempts are made computing every possibility
to get the solutions. This series of attempts makes solving a bit
slow. In ``_transolve``, computation begins only after a particular
type of equation is identified.
How to add new class of equations
=================================
Adding a new class of equation solver is a three-step procedure:
- Identify the type of the equations
Determine the type of the class of equations to which they belong:
it could be of ``Add``, ``Pow``, etc. types. Separate internal functions
are used for each type. Write identification and solving helpers
and use them from within the routine for the given type of equation
(after adding it, if necessary). Something like:
.. code-block:: python
def add_type(lhs, rhs, x):
....
if _is_exponential(lhs, x):
new_eq = _solve_exponential(lhs, rhs, x)
....
rhs, lhs = eq.as_independent(x)
if lhs.is_Add:
result = add_type(lhs, rhs, x)
- Define the identification helper.
- Define the solving helper.
Apart from this, a few other things needs to be taken care while
adding an equation solver:
- Naming conventions:
Name of the identification helper should be as
``_is_class`` where class will be the name or abbreviation
of the class of equation. The solving helper will be named as
``_solve_class``.
For example: for exponential equations it becomes
``_is_exponential`` and ``_solve_expo``.
- The identifying helpers should take two input parameters,
the equation to be checked and the variable for which a solution
is being sought, while solving helpers would require an additional
domain parameter.
- Be sure to consider corner cases.
- Add tests for each helper.
- Add a docstring to your helper that describes the method
implemented.
The documentation of the helpers should identify:
- the purpose of the helper,
- the method used to identify and solve the equation,
- a proof of correctness
- the return values of the helpers
"""
def add_type(lhs, rhs, symbol, domain):
"""
Helper for ``_transolve`` to handle equations of
``Add`` type, i.e. equations taking the form as
``a*f(x) + b*g(x) + .... = c``.
For example: 4**x + 8**x = 0
"""
result = ConditionSet(symbol, Eq(lhs - rhs, 0), domain)
# check if it is exponential type equation
if _is_exponential(lhs, symbol):
result = _solve_exponential(lhs, rhs, symbol, domain)
# check if it is logarithmic type equation
elif _is_logarithmic(lhs, symbol):
result = _solve_logarithm(lhs, rhs, symbol, domain)
return result
result = ConditionSet(symbol, Eq(f, 0), domain)
# invert_complex handles the call to the desired inverter based
# on the domain specified.
lhs, rhs_s = invert_complex(f, 0, symbol, domain)
if isinstance(rhs_s, FiniteSet):
assert (len(rhs_s.args)) == 1
rhs = rhs_s.args[0]
if lhs.is_Add:
result = add_type(lhs, rhs, symbol, domain)
else:
result = rhs_s
return result
def solveset(f, symbol=None, domain=S.Complexes):
r"""Solves a given inequality or equation with set as output
Parameters
==========
f : Expr or a relational.
The target equation or inequality
symbol : Symbol
The variable for which the equation is solved
domain : Set
The domain over which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is True or is equal to
zero. An :class:`~.EmptySet` is returned if `f` is False or nonzero.
A :class:`~.ConditionSet` is returned as unsolved object if algorithms
to evaluate complete solution are not yet implemented.
``solveset`` claims to be complete in the solution set that it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
Notes
=====
Python interprets 0 and 1 as False and True, respectively, but
in this function they refer to solutions of an expression. So 0 and 1
return the domain and EmptySet, respectively, while True and False
return the opposite (as they are assumed to be solutions of relational
expressions).
See Also
========
solveset_real: solver for real domain
solveset_complex: solver for complex domain
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S, Eq
>>> from sympy.solvers.solveset import solveset, solveset_real
* The default domain is complex. Not specifying a domain will lead
to the solving of the equation in the complex domain (and this
is not affected by the assumptions on the symbol):
>>> x = Symbol('x')
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers}
>>> x = Symbol('x', real=True)
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers}
* If you want to use ``solveset`` to solve the equation in the
real domain, provide a real domain. (Using ``solveset_real``
does this automatically.)
>>> R = S.Reals
>>> x = Symbol('x')
>>> solveset(exp(x) - 1, x, R)
{0}
>>> solveset_real(exp(x) - 1, x)
{0}
The solution is unaffected by assumptions on the symbol:
>>> p = Symbol('p', positive=True)
>>> pprint(solveset(p**2 - 4))
{-2, 2}
When a :class:`~.ConditionSet` is returned, symbols with assumptions that
would alter the set are replaced with more generic symbols:
>>> i = Symbol('i', imaginary=True)
>>> solveset(Eq(i**2 + i*sin(i), 1), i, domain=S.Reals)
ConditionSet(_R, Eq(_R**2 + _R*sin(_R) - 1, 0), Reals)
* Inequalities can be solved over the real domain only. Use of a complex
domain leads to a NotImplementedError.
>>> solveset(exp(x) > 1, x, R)
Interval.open(0, oo)
"""
f = sympify(f)
symbol = sympify(symbol)
if f is S.true:
return domain
if f is S.false:
return S.EmptySet
if not isinstance(f, (Expr, Relational, Number)):
raise ValueError("%s is not a valid SymPy expression" % f)
if not isinstance(symbol, (Expr, Relational)) and symbol is not None:
raise ValueError("%s is not a valid SymPy symbol" % (symbol,))
if not isinstance(domain, Set):
raise ValueError("%s is not a valid domain" %(domain))
free_symbols = f.free_symbols
if f.has(Piecewise):
f = piecewise_fold(f)
if symbol is None and not free_symbols:
b = Eq(f, 0)
if b is S.true:
return domain
elif b is S.false:
return S.EmptySet
else:
raise NotImplementedError(filldedent('''
relationship between value and 0 is unknown: %s''' % b))
if symbol is None:
if len(free_symbols) == 1:
symbol = free_symbols.pop()
elif free_symbols:
raise ValueError(filldedent('''
The independent variable must be specified for a
multivariate equation.'''))
elif not isinstance(symbol, Symbol):
f, s, swap = recast_to_symbols([f], [symbol])
# the xreplace will be needed if a ConditionSet is returned
return solveset(f[0], s[0], domain).xreplace(swap)
# solveset should ignore assumptions on symbols
if symbol not in _rc:
x = _rc[0] if domain.is_subset(S.Reals) else _rc[1]
rv = solveset(f.xreplace({symbol: x}), x, domain)
# try to use the original symbol if possible
try:
_rv = rv.xreplace({x: symbol})
except TypeError:
_rv = rv
if rv.dummy_eq(_rv):
rv = _rv
return rv
# Abs has its own handling method which avoids the
# rewriting property that the first piece of abs(x)
# is for x >= 0 and the 2nd piece for x < 0 -- solutions
# can look better if the 2nd condition is x <= 0. Since
# the solution is a set, duplication of results is not
# an issue, e.g. {y, -y} when y is 0 will be {0}
f, mask = _masked(f, Abs)
f = f.rewrite(Piecewise) # everything that's not an Abs
for d, e in mask:
# everything *in* an Abs
e = e.func(e.args[0].rewrite(Piecewise))
f = f.xreplace({d: e})
f = piecewise_fold(f)
return _solveset(f, symbol, domain, _check=True)
def solveset_real(f, symbol):
return solveset(f, symbol, S.Reals)
def solveset_complex(f, symbol):
return solveset(f, symbol, S.Complexes)
def _solveset_multi(eqs, syms, domains):
'''Basic implementation of a multivariate solveset.
For internal use (not ready for public consumption)'''
rep = {}
for sym, dom in zip(syms, domains):
if dom is S.Reals:
rep[sym] = Symbol(sym.name, real=True)
eqs = [eq.subs(rep) for eq in eqs]
syms = [sym.subs(rep) for sym in syms]
syms = tuple(syms)
if len(eqs) == 0:
return ProductSet(*domains)
if len(syms) == 1:
sym = syms[0]
domain = domains[0]
solsets = [solveset(eq, sym, domain) for eq in eqs]
solset = Intersection(*solsets)
return ImageSet(Lambda((sym,), (sym,)), solset).doit()
eqs = sorted(eqs, key=lambda eq: len(eq.free_symbols & set(syms)))
for n in range(len(eqs)):
sols = []
all_handled = True
for sym in syms:
if sym not in eqs[n].free_symbols:
continue
sol = solveset(eqs[n], sym, domains[syms.index(sym)])
if isinstance(sol, FiniteSet):
i = syms.index(sym)
symsp = syms[:i] + syms[i+1:]
domainsp = domains[:i] + domains[i+1:]
eqsp = eqs[:n] + eqs[n+1:]
for s in sol:
eqsp_sub = [eq.subs(sym, s) for eq in eqsp]
sol_others = _solveset_multi(eqsp_sub, symsp, domainsp)
fun = Lambda((symsp,), symsp[:i] + (s,) + symsp[i:])
sols.append(ImageSet(fun, sol_others).doit())
else:
all_handled = False
if all_handled:
return Union(*sols)
def solvify(f, symbol, domain):
"""Solves an equation using solveset and returns the solution in accordance
with the `solve` output API.
Returns
=======
We classify the output based on the type of solution returned by `solveset`.
Solution | Output
----------------------------------------
FiniteSet | list
ImageSet, | list (if `f` is periodic)
Union |
Union | list (with FiniteSet)
EmptySet | empty list
Others | None
Raises
======
NotImplementedError
A ConditionSet is the input.
Examples
========
>>> from sympy.solvers.solveset import solvify
>>> from sympy.abc import x
>>> from sympy import S, tan, sin, exp
>>> solvify(x**2 - 9, x, S.Reals)
[-3, 3]
>>> solvify(sin(x) - 1, x, S.Reals)
[pi/2]
>>> solvify(tan(x), x, S.Reals)
[0]
>>> solvify(exp(x) - 1, x, S.Complexes)
>>> solvify(exp(x) - 1, x, S.Reals)
[0]
"""
solution_set = solveset(f, symbol, domain)
result = None
if solution_set is S.EmptySet:
result = []
elif isinstance(solution_set, ConditionSet):
raise NotImplementedError('solveset is unable to solve this equation.')
elif isinstance(solution_set, FiniteSet):
result = list(solution_set)
else:
period = periodicity(f, symbol)
if period is not None:
solutions = S.EmptySet
iter_solutions = ()
if isinstance(solution_set, ImageSet):
iter_solutions = (solution_set,)
elif isinstance(solution_set, Union):
if all(isinstance(i, ImageSet) for i in solution_set.args):
iter_solutions = solution_set.args
for solution in iter_solutions:
solutions += solution.intersect(Interval(0, period, False, True))
if isinstance(solutions, FiniteSet):
result = list(solutions)
else:
solution = solution_set.intersect(domain)
if isinstance(solution, Union):
# concerned about only FiniteSet with Union but not about ImageSet
# if required could be extend
if any(isinstance(i, FiniteSet) for i in solution.args):
result = [sol for soln in solution.args \
for sol in soln.args if isinstance(soln,FiniteSet)]
else:
return None
elif isinstance(solution, FiniteSet):
result += solution
return result
###############################################################################
################################ LINSOLVE #####################################
###############################################################################
def linear_coeffs(eq, *syms, **_kw):
"""Return a list whose elements are the coefficients of the
corresponding symbols in the sum of terms in ``eq``.
The additive constant is returned as the last element of the
list.
Raises
======
NonlinearError
The equation contains a nonlinear term
Examples
========
>>> from sympy.solvers.solveset import linear_coeffs
>>> from sympy.abc import x, y, z
>>> linear_coeffs(3*x + 2*y - 1, x, y)
[3, 2, -1]
It is not necessary to expand the expression:
>>> linear_coeffs(x + y*(z*(x*3 + 2) + 3), x)
[3*y*z + 1, y*(2*z + 3)]
But if there are nonlinear or cross terms -- even if they would
cancel after simplification -- an error is raised so the situation
does not pass silently past the caller's attention:
>>> eq = 1/x*(x - 1) + 1/x
>>> linear_coeffs(eq.expand(), x)
[0, 1]
>>> linear_coeffs(eq, x)
Traceback (most recent call last):
...
NonlinearError: nonlinear term encountered: 1/x
>>> linear_coeffs(x*(y + 1) - x*y, x, y)
Traceback (most recent call last):
...
NonlinearError: nonlinear term encountered: x*(y + 1)
"""
d = defaultdict(list)
eq = _sympify(eq)
symset = set(syms)
has = eq.free_symbols & symset
if not has:
return [S.Zero]*len(syms) + [eq]
c, terms = eq.as_coeff_add(*has)
d[0].extend(Add.make_args(c))
for t in terms:
m, f = t.as_coeff_mul(*has)
if len(f) != 1:
break
f = f[0]
if f in symset:
d[f].append(m)
elif f.is_Add:
d1 = linear_coeffs(f, *has, **{'dict': True})
d[0].append(m*d1.pop(0))
for xf, vf in d1.items():
d[xf].append(m*vf)
else:
break
else:
for k, v in d.items():
d[k] = Add(*v)
if not _kw:
return [d.get(s, S.Zero) for s in syms] + [d[0]]
return d # default is still list but this won't matter
raise NonlinearError('nonlinear term encountered: %s' % t)
def linear_eq_to_matrix(equations, *symbols):
r"""
Converts a given System of Equations into Matrix form.
Here `equations` must be a linear system of equations in
`symbols`. Element ``M[i, j]`` corresponds to the coefficient
of the jth symbol in the ith equation.
The Matrix form corresponds to the augmented matrix form.
For example:
.. math:: 4x + 2y + 3z = 1
.. math:: 3x + y + z = -6
.. math:: 2x + 4y + 9z = 2
This system will return $A$ and $b$ as:
$$ A = \left[\begin{array}{ccc}
4 & 2 & 3 \\
3 & 1 & 1 \\
2 & 4 & 9
\end{array}\right] \ \ b = \left[\begin{array}{c}
1 \\ -6 \\ 2
\end{array}\right] $$
The only simplification performed is to convert
``Eq(a, b)`` $\Rightarrow a - b$.
Raises
======
NonlinearError
The equations contain a nonlinear term.
ValueError
The symbols are not given or are not unique.
Examples
========
>>> from sympy import linear_eq_to_matrix, symbols
>>> c, x, y, z = symbols('c, x, y, z')
The coefficients (numerical or symbolic) of the symbols will
be returned as matrices:
>>> eqns = [c*x + z - 1 - c, y + z, x - y]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[c, 0, 1],
[0, 1, 1],
[1, -1, 0]])
>>> b
Matrix([
[c + 1],
[ 0],
[ 0]])
This routine does not simplify expressions and will raise an error
if nonlinearity is encountered:
>>> eqns = [
... (x**2 - 3*x)/(x - 3) - 3,
... y**2 - 3*y - y*(y - 4) + x - 4]
>>> linear_eq_to_matrix(eqns, [x, y])
Traceback (most recent call last):
...
NonlinearError:
The term (x**2 - 3*x)/(x - 3) is nonlinear in {x, y}
Simplifying these equations will discard the removable singularity
in the first, reveal the linear structure of the second:
>>> [e.simplify() for e in eqns]
[x - 3, x + y - 4]
Any such simplification needed to eliminate nonlinear terms must
be done before calling this routine.
"""
if not symbols:
raise ValueError(filldedent('''
Symbols must be given, for which coefficients
are to be found.
'''))
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
for i in symbols:
if not isinstance(i, Symbol):
raise ValueError(filldedent('''
Expecting a Symbol but got %s
''' % i))
if has_dups(symbols):
raise ValueError('Symbols must be unique')
equations = sympify(equations)
if isinstance(equations, MatrixBase):
equations = list(equations)
elif isinstance(equations, (Expr, Eq)):
equations = [equations]
elif not is_sequence(equations):
raise ValueError(filldedent('''
Equation(s) must be given as a sequence, Expr,
Eq or Matrix.
'''))
A, b = [], []
for i, f in enumerate(equations):
if isinstance(f, Equality):
f = f.rewrite(Add, evaluate=False)
coeff_list = linear_coeffs(f, *symbols)
b.append(-coeff_list.pop())
A.append(coeff_list)
A, b = map(Matrix, (A, b))
return A, b
def linsolve(system, *symbols):
r"""
Solve system of $N$ linear equations with $M$ variables; both
underdetermined and overdetermined systems are supported.
The possible number of solutions is zero, one or infinite.
Zero solutions throws a ValueError, whereas infinite
solutions are represented parametrically in terms of the given
symbols. For unique solution a :class:`~.FiniteSet` of ordered tuples
is returned.
All standard input formats are supported:
For the given set of equations, the respective input types
are given below:
.. math:: 3x + 2y - z = 1
.. math:: 2x - 2y + 4z = -2
.. math:: 2x - y + 2z = 0
* Augmented matrix form, ``system`` given below:
$$ \text{system} = \left[{array}{cccc}
3 & 2 & -1 & 1\\
2 & -2 & 4 & -2\\
2 & -1 & 2 & 0
\end{array}\right] $$
::
system = Matrix([[3, 2, -1, 1], [2, -2, 4, -2], [2, -1, 2, 0]])
* List of equations form
::
system = [3x + 2y - z - 1, 2x - 2y + 4z + 2, 2x - y + 2z]
* Input $A$ and $b$ in matrix form (from $Ax = b$) are given as:
$$ A = \left[\begin{array}{ccc}
3 & 2 & -1 \\
2 & -2 & 4 \\
2 & -1 & 2
\end{array}\right] \ \ b = \left[\begin{array}{c}
1 \\ -2 \\ 0
\end{array}\right] $$
::
A = Matrix([[3, 2, -1], [2, -2, 4], [2, -1, 2]])
b = Matrix([[1], [-2], [0]])
system = (A, b)
Symbols can always be passed but are actually only needed
when 1) a system of equations is being passed and 2) the
system is passed as an underdetermined matrix and one wants
to control the name of the free variables in the result.
An error is raised if no symbols are used for case 1, but if
no symbols are provided for case 2, internally generated symbols
will be provided. When providing symbols for case 2, there should
be at least as many symbols are there are columns in matrix A.
The algorithm used here is Gauss-Jordan elimination, which
results, after elimination, in a row echelon form matrix.
Returns
=======
A FiniteSet containing an ordered tuple of values for the
unknowns for which the `system` has a solution. (Wrapping
the tuple in FiniteSet is used to maintain a consistent
output format throughout solveset.)
Returns EmptySet, if the linear system is inconsistent.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
Examples
========
>>> from sympy import Matrix, linsolve, symbols
>>> x, y, z = symbols("x, y, z")
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> A
Matrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 10]])
>>> b
Matrix([
[3],
[6],
[9]])
>>> linsolve((A, b), [x, y, z])
{(-1, 2, 0)}
* Parametric Solution: In case the system is underdetermined, the
function will return a parametric solution in terms of the given
symbols. Those that are free will be returned unchanged. e.g. in
the system below, `z` is returned as the solution for variable z;
it can take on any value.
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = Matrix([3, 6, 9])
>>> linsolve((A, b), x, y, z)
{(z - 1, 2 - 2*z, z)}
If no symbols are given, internally generated symbols will be used.
The ``tau0`` in the third position indicates (as before) that the third
variable -- whatever it is named -- can take on any value:
>>> linsolve((A, b))
{(tau0 - 1, 2 - 2*tau0, tau0)}
* List of equations as input
>>> Eqns = [3*x + 2*y - z - 1, 2*x - 2*y + 4*z + 2, - x + y/2 - z]
>>> linsolve(Eqns, x, y, z)
{(1, -2, -2)}
* Augmented matrix as input
>>> aug = Matrix([[2, 1, 3, 1], [2, 6, 8, 3], [6, 8, 18, 5]])
>>> aug
Matrix([
[2, 1, 3, 1],
[2, 6, 8, 3],
[6, 8, 18, 5]])
>>> linsolve(aug, x, y, z)
{(3/10, 2/5, 0)}
* Solve for symbolic coefficients
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> linsolve(eqns, x, y)
{((-b*f + c*e)/(a*e - b*d), (a*f - c*d)/(a*e - b*d))}
* A degenerate system returns solution as set of given
symbols.
>>> system = Matrix(([0, 0, 0], [0, 0, 0], [0, 0, 0]))
>>> linsolve(system, x, y)
{(x, y)}
* For an empty system linsolve returns empty set
>>> linsolve([], x)
EmptySet
* An error is raised if, after expansion, any nonlinearity
is detected:
>>> linsolve([x*(1/x - 1), (y - 1)**2 - y**2 + 1], x, y)
{(1, 1)}
>>> linsolve([x**2 - 1], x)
Traceback (most recent call last):
...
NonlinearError:
nonlinear term encountered: x**2
"""
if not system:
return S.EmptySet
# If second argument is an iterable
if symbols and hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
sym_gen = isinstance(symbols, GeneratorType)
b = None # if we don't get b the input was bad
# unpack system
if hasattr(system, '__iter__'):
# 1). (A, b)
if len(system) == 2 and isinstance(system[0], MatrixBase):
A, b = system
# 2). (eq1, eq2, ...)
if not isinstance(system[0], MatrixBase):
if sym_gen or not symbols:
raise ValueError(filldedent('''
When passing a system of equations, the explicit
symbols for which a solution is being sought must
be given as a sequence, too.
'''))
#
# Pass to the sparse solver implemented in polys. It is important
# that we do not attempt to convert the equations to a matrix
# because that would be very inefficient for large sparse systems
# of equations.
#
eqs = system
eqs = [sympify(eq) for eq in eqs]
try:
sol = _linsolve(eqs, symbols)
except PolyNonlinearError as exc:
# e.g. cos(x) contains an element of the set of generators
raise NonlinearError(str(exc))
if sol is None:
return S.EmptySet
sol = FiniteSet(Tuple(*(sol.get(sym, sym) for sym in symbols)))
return sol
elif isinstance(system, MatrixBase) and not (
symbols and not isinstance(symbols, GeneratorType) and
isinstance(symbols[0], MatrixBase)):
# 3). A augmented with b
A, b = system[:, :-1], system[:, -1:]
if b is None:
raise ValueError("Invalid arguments")
if sym_gen:
symbols = [next(symbols) for i in range(A.cols)]
if any(set(symbols) & (A.free_symbols | b.free_symbols)):
raise ValueError(filldedent('''
At least one of the symbols provided
already appears in the system to be solved.
One way to avoid this is to use Dummy symbols in
the generator, e.g. numbered_symbols('%s', cls=Dummy)
''' % symbols[0].name.rstrip('1234567890')))
if not symbols:
symbols = [Dummy() for _ in range(A.cols)]
name = _uniquely_named_symbol('tau', (A, b),
compare=lambda i: str(i).rstrip('1234567890')).name
gen = numbered_symbols(name)
else:
gen = None
# This is just a wrapper for solve_lin_sys
eqs = []
rows = A.tolist()
for rowi, bi in zip(rows, b):
terms = [elem * sym for elem, sym in zip(rowi, symbols) if elem]
terms.append(-bi)
eqs.append(Add(*terms))
eqs, ring = sympy_eqs_to_ring(eqs, symbols)
sol = solve_lin_sys(eqs, ring, _raw=False)
if sol is None:
return S.EmptySet
#sol = {sym:val for sym, val in sol.items() if sym != val}
sol = FiniteSet(Tuple(*(sol.get(sym, sym) for sym in symbols)))
if gen is not None:
solsym = sol.free_symbols
rep = {sym: next(gen) for sym in symbols if sym in solsym}
sol = sol.subs(rep)
return sol
##############################################################################
# ------------------------------nonlinsolve ---------------------------------#
##############################################################################
def _return_conditionset(eqs, symbols):
# return conditionset
eqs = (Eq(lhs, 0) for lhs in eqs)
condition_set = ConditionSet(
Tuple(*symbols), And(*eqs), S.Complexes**len(symbols))
return condition_set
def substitution(system, symbols, result=[{}], known_symbols=[],
exclude=[], all_symbols=None):
r"""
Solves the `system` using substitution method. It is used in
:func:`~.nonlinsolve`. This will be called from :func:`~.nonlinsolve` when any
equation(s) is non polynomial equation.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of symbols to be solved.
The variable(s) for which the system is solved
known_symbols : list of solved symbols
Values are known for these variable(s)
result : An empty list or list of dict
If No symbol values is known then empty list otherwise
symbol as keys and corresponding value in dict.
exclude : Set of expression.
Mostly denominator expression(s) of the equations of the system.
Final solution should not satisfy these expressions.
all_symbols : known_symbols + symbols(unsolved).
Returns
=======
A FiniteSet of ordered tuple of values of `all_symbols` for which the
`system` has solution. Order of values in the tuple is same as symbols
present in the parameter `all_symbols`. If parameter `all_symbols` is None
then same as symbols present in the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not :class:`~.Symbol` type.
Examples
========
>>> from sympy import symbols, substitution
>>> x, y = symbols('x, y', real=True)
>>> substitution([x + y], [x], [{y: 1}], [y], set([]), [x, y])
{(-1, 1)}
* When you want a soln not satisfying $x + 1 = 0$
>>> substitution([x + y], [x], [{y: 1}], [y], set([x + 1]), [y, x])
EmptySet
>>> substitution([x + y], [x], [{y: 1}], [y], set([x - 1]), [y, x])
{(1, -1)}
>>> substitution([x + y - 1, y - x**2 + 5], [x, y])
{(-3, 4), (2, -1)}
* Returns both real and complex solution
>>> x, y, z = symbols('x, y, z')
>>> from sympy import exp, sin
>>> substitution([exp(x) - sin(y), y**2 - 4], [x, y])
{(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
(ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}
>>> eqs = [z**2 + exp(2*x) - sin(y), -3 + exp(-y)]
>>> substitution(eqs, [y, z])
{(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3)))),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, -sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers)),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers))}
"""
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if not is_sequence(symbols):
msg = ('symbols should be given as a sequence, e.g. a list.'
'Not type %s: %s')
raise TypeError(filldedent(msg % (type(symbols), symbols)))
if not getattr(symbols[0], 'is_Symbol', False):
msg = ('Iterable of symbols must be given as '
'second argument, not type %s: %s')
raise ValueError(filldedent(msg % (type(symbols[0]), symbols[0])))
# By default `all_symbols` will be same as `symbols`
if all_symbols is None:
all_symbols = symbols
old_result = result
# storing complements and intersection for particular symbol
complements = {}
intersections = {}
# when total_solveset_call equals total_conditionset
# it means that solveset failed to solve all eqs.
total_conditionset = -1
total_solveset_call = -1
def _unsolved_syms(eq, sort=False):
"""Returns the unsolved symbol present
in the equation `eq`.
"""
free = eq.free_symbols
unsolved = (free - set(known_symbols)) & set(all_symbols)
if sort:
unsolved = list(unsolved)
unsolved.sort(key=default_sort_key)
return unsolved
# end of _unsolved_syms()
# sort such that equation with the fewest potential symbols is first.
# means eq with less number of variable first in the list.
eqs_in_better_order = list(
ordered(system, lambda _: len(_unsolved_syms(_))))
def add_intersection_complement(result, intersection_dict, complement_dict):
# If solveset has returned some intersection/complement
# for any symbol, it will be added in the final solution.
final_result = []
for res in result:
res_copy = res
for key_res, value_res in res.items():
intersect_set, complement_set = None, None
for key_sym, value_sym in intersection_dict.items():
if key_sym == key_res:
intersect_set = value_sym
for key_sym, value_sym in complement_dict.items():
if key_sym == key_res:
complement_set = value_sym
if intersect_set or complement_set:
new_value = FiniteSet(value_res)
if intersect_set and intersect_set != S.Complexes:
new_value = Intersection(new_value, intersect_set)
if complement_set:
new_value = Complement(new_value, complement_set)
if new_value is S.EmptySet:
res_copy = None
break
elif new_value.is_FiniteSet and len(new_value) == 1:
res_copy[key_res] = set(new_value).pop()
else:
res_copy[key_res] = new_value
if res_copy is not None:
final_result.append(res_copy)
return final_result
# end of def add_intersection_complement()
def _extract_main_soln(sym, sol, soln_imageset):
"""Separate the Complements, Intersections, ImageSet lambda expr and
its base_set. This function returns the unmasks sol from different classes
of sets and also returns the appended ImageSet elements in a
soln_imageset (dict: where key as unmasked element and value as ImageSet).
"""
# if there is union, then need to check
# Complement, Intersection, Imageset.
# Order should not be changed.
if isinstance(sol, ConditionSet):
# extracts any solution in ConditionSet
sol = sol.base_set
if isinstance(sol, Complement):
# extract solution and complement
complements[sym] = sol.args[1]
sol = sol.args[0]
# complement will be added at the end
# using `add_intersection_complement` method
# if there is union of Imageset or other in soln.
# no testcase is written for this if block
if isinstance(sol, Union):
sol_args = sol.args
sol = S.EmptySet
# We need in sequence so append finteset elements
# and then imageset or other.
for sol_arg2 in sol_args:
if isinstance(sol_arg2, FiniteSet):
sol += sol_arg2
else:
# ImageSet, Intersection, complement then
# append them directly
sol += FiniteSet(sol_arg2)
if isinstance(sol, Intersection):
# Interval/Set will be at 0th index always
if sol.args[0] not in (S.Reals, S.Complexes):
# Sometimes solveset returns soln with intersection
# S.Reals or S.Complexes. We don't consider that
# intersection.
intersections[sym] = sol.args[0]
sol = sol.args[1]
# after intersection and complement Imageset should
# be checked.
if isinstance(sol, ImageSet):
soln_imagest = sol
expr2 = sol.lamda.expr
sol = FiniteSet(expr2)
soln_imageset[expr2] = soln_imagest
if not isinstance(sol, FiniteSet):
sol = FiniteSet(sol)
return sol, soln_imageset
# end of def _extract_main_soln()
# helper function for _append_new_soln
def _check_exclude(rnew, imgset_yes):
rnew_ = rnew
if imgset_yes:
# replace all dummy variables (Imageset lambda variables)
# with zero before `checksol`. Considering fundamental soln
# for `checksol`.
rnew_copy = rnew.copy()
dummy_n = imgset_yes[0]
for key_res, value_res in rnew_copy.items():
rnew_copy[key_res] = value_res.subs(dummy_n, 0)
rnew_ = rnew_copy
# satisfy_exclude == true if it satisfies the expr of `exclude` list.
try:
# something like : `Mod(-log(3), 2*I*pi)` can't be
# simplified right now, so `checksol` returns `TypeError`.
# when this issue is fixed this try block should be
# removed. Mod(-log(3), 2*I*pi) == -log(3)
satisfy_exclude = any(
checksol(d, rnew_) for d in exclude)
except TypeError:
satisfy_exclude = None
return satisfy_exclude
# end of def _check_exclude()
# helper function for _append_new_soln
def _restore_imgset(rnew, original_imageset, newresult):
restore_sym = set(rnew.keys()) & \
set(original_imageset.keys())
for key_sym in restore_sym:
img = original_imageset[key_sym]
rnew[key_sym] = img
if rnew not in newresult:
newresult.append(rnew)
# end of def _restore_imgset()
def _append_eq(eq, result, res, delete_soln, n=None):
u = Dummy('u')
if n:
eq = eq.subs(n, 0)
satisfy = eq if eq in (True, False) else checksol(u, u, eq, minimal=True)
if satisfy is False:
delete_soln = True
res = {}
else:
result.append(res)
return result, res, delete_soln
def _append_new_soln(rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult, eq=None):
"""If `rnew` (A dict <symbol: soln>) contains valid soln
append it to `newresult` list.
`imgset_yes` is (base, dummy_var) if there was imageset in previously
calculated result(otherwise empty tuple). `original_imageset` is dict
of imageset expr and imageset from this result.
`soln_imageset` dict of imageset expr and imageset of new soln.
"""
satisfy_exclude = _check_exclude(rnew, imgset_yes)
delete_soln = False
# soln should not satisfy expr present in `exclude` list.
if not satisfy_exclude:
local_n = None
# if it is imageset
if imgset_yes:
local_n = imgset_yes[0]
base = imgset_yes[1]
if sym and sol:
# when `sym` and `sol` is `None` means no new
# soln. In that case we will append rnew directly after
# substituting original imagesets in rnew values if present
# (second last line of this function using _restore_imgset)
dummy_list = list(sol.atoms(Dummy))
# use one dummy `n` which is in
# previous imageset
local_n_list = [
local_n for i in range(
0, len(dummy_list))]
dummy_zip = zip(dummy_list, local_n_list)
lam = Lambda(local_n, sol.subs(dummy_zip))
rnew[sym] = ImageSet(lam, base)
if eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln, local_n)
elif eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln)
elif sol in soln_imageset.keys():
rnew[sym] = soln_imageset[sol]
# restore original imageset
_restore_imgset(rnew, original_imageset, newresult)
else:
newresult.append(rnew)
elif satisfy_exclude:
delete_soln = True
rnew = {}
_restore_imgset(rnew, original_imageset, newresult)
return newresult, delete_soln
# end of def _append_new_soln()
def _new_order_result(result, eq):
# separate first, second priority. `res` that makes `eq` value equals
# to zero, should be used first then other result(second priority).
# If it is not done then we may miss some soln.
first_priority = []
second_priority = []
for res in result:
if not any(isinstance(val, ImageSet) for val in res.values()):
if eq.subs(res) == 0:
first_priority.append(res)
else:
second_priority.append(res)
if first_priority or second_priority:
return first_priority + second_priority
return result
def _solve_using_known_values(result, solver):
"""Solves the system using already known solution
(result contains the dict <symbol: value>).
solver is :func:`~.solveset_complex` or :func:`~.solveset_real`.
"""
# stores imageset <expr: imageset(Lambda(n, expr), base)>.
soln_imageset = {}
total_solvest_call = 0
total_conditionst = 0
# sort such that equation with the fewest potential symbols is first.
# means eq with less variable first
for index, eq in enumerate(eqs_in_better_order):
newresult = []
original_imageset = {}
# if imageset expr is used to solve other symbol
imgset_yes = False
result = _new_order_result(result, eq)
for res in result:
got_symbol = set() # symbols solved in one iteration
# find the imageset and use its expr.
for key_res, value_res in res.items():
if isinstance(value_res, ImageSet):
res[key_res] = value_res.lamda.expr
original_imageset[key_res] = value_res
dummy_n = value_res.lamda.expr.atoms(Dummy).pop()
(base,) = value_res.base_sets
imgset_yes = (dummy_n, base)
# update eq with everything that is known so far
eq2 = eq.subs(res).expand()
unsolved_syms = _unsolved_syms(eq2, sort=True)
if not unsolved_syms:
if res:
newresult, delete_res = _append_new_soln(
res, None, None, imgset_yes, soln_imageset,
original_imageset, newresult, eq2)
if delete_res:
# `delete_res` is true, means substituting `res` in
# eq2 doesn't return `zero` or deleting the `res`
# (a soln) since it staisfies expr of `exclude`
# list.
result.remove(res)
continue # skip as it's independent of desired symbols
depen1, depen2 = (eq2.rewrite(Add)).as_independent(*unsolved_syms)
if (depen1.has(Abs) or depen2.has(Abs)) and solver == solveset_complex:
# Absolute values cannot be inverted in the
# complex domain
continue
soln_imageset = {}
for sym in unsolved_syms:
not_solvable = False
try:
soln = solver(eq2, sym)
total_solvest_call += 1
soln_new = S.EmptySet
if isinstance(soln, Complement):
# separate solution and complement
complements[sym] = soln.args[1]
soln = soln.args[0]
# complement will be added at the end
if isinstance(soln, Intersection):
# Interval will be at 0th index always
if soln.args[0] != Interval(-oo, oo):
# sometimes solveset returns soln
# with intersection S.Reals, to confirm that
# soln is in domain=S.Reals
intersections[sym] = soln.args[0]
soln_new += soln.args[1]
soln = soln_new if soln_new else soln
if index > 0 and solver == solveset_real:
# one symbol's real soln, another symbol may have
# corresponding complex soln.
if not isinstance(soln, (ImageSet, ConditionSet)):
soln += solveset_complex(eq2, sym) # might give ValueError with Abs
except (NotImplementedError, ValueError):
# If solveset is not able to solve equation `eq2`. Next
# time we may get soln using next equation `eq2`
continue
if isinstance(soln, ConditionSet):
if soln.base_set in (S.Reals, S.Complexes):
soln = S.EmptySet
# don't do `continue` we may get soln
# in terms of other symbol(s)
not_solvable = True
total_conditionst += 1
else:
soln = soln.base_set
if soln is not S.EmptySet:
soln, soln_imageset = _extract_main_soln(
sym, soln, soln_imageset)
for sol in soln:
# sol is not a `Union` since we checked it
# before this loop
sol, soln_imageset = _extract_main_soln(
sym, sol, soln_imageset)
sol = set(sol).pop()
free = sol.free_symbols
if got_symbol and any(
ss in free for ss in got_symbol
):
# sol depends on previously solved symbols
# then continue
continue
rnew = res.copy()
# put each solution in res and append the new result
# in the new result list (solution for symbol `s`)
# along with old results.
for k, v in res.items():
if isinstance(v, Expr) and isinstance(sol, Expr):
# if any unsolved symbol is present
# Then subs known value
rnew[k] = v.subs(sym, sol)
# and add this new solution
if sol in soln_imageset.keys():
# replace all lambda variables with 0.
imgst = soln_imageset[sol]
rnew[sym] = imgst.lamda(
*[0 for i in range(0, len(
imgst.lamda.variables))])
else:
rnew[sym] = sol
newresult, delete_res = _append_new_soln(
rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult)
if delete_res:
# deleting the `res` (a soln) since it staisfies
# eq of `exclude` list
result.remove(res)
# solution got for sym
if not not_solvable:
got_symbol.add(sym)
# next time use this new soln
if newresult:
result = newresult
return result, total_solvest_call, total_conditionst
# end def _solve_using_know_values()
new_result_real, solve_call1, cnd_call1 = _solve_using_known_values(
old_result, solveset_real)
new_result_complex, solve_call2, cnd_call2 = _solve_using_known_values(
old_result, solveset_complex)
# If total_solveset_call is equal to total_conditionset
# then solveset failed to solve all of the equations.
# In this case we return a ConditionSet here.
total_conditionset += (cnd_call1 + cnd_call2)
total_solveset_call += (solve_call1 + solve_call2)
if total_conditionset == total_solveset_call and total_solveset_call != -1:
return _return_conditionset(eqs_in_better_order, all_symbols)
# don't keep duplicate solutions
filtered_complex = []
for i in list(new_result_complex):
for j in list(new_result_real):
if i.keys() != j.keys():
continue
if all(a.dummy_eq(b) for a, b in zip(i.values(), j.values()) \
if type(a) != int or type(b) != int):
break
else:
filtered_complex.append(i)
# overall result
result = new_result_real + filtered_complex
result_all_variables = []
result_infinite = []
for res in result:
if not res:
# means {None : None}
continue
# If length < len(all_symbols) means infinite soln.
# Some or all the soln is dependent on 1 symbol.
# eg. {x: y+2} then final soln {x: y+2, y: y}
if len(res) < len(all_symbols):
solved_symbols = res.keys()
unsolved = list(filter(
lambda x: x not in solved_symbols, all_symbols))
for unsolved_sym in unsolved:
res[unsolved_sym] = unsolved_sym
result_infinite.append(res)
if res not in result_all_variables:
result_all_variables.append(res)
if result_infinite:
# we have general soln
# eg : [{x: -1, y : 1}, {x : -y, y: y}] then
# return [{x : -y, y : y}]
result_all_variables = result_infinite
if intersections or complements:
result_all_variables = add_intersection_complement(
result_all_variables, intersections, complements)
# convert to ordered tuple
result = S.EmptySet
for r in result_all_variables:
temp = [r[symb] for symb in all_symbols]
result += FiniteSet(tuple(temp))
return result
# end of def substitution()
def _solveset_work(system, symbols):
soln = solveset(system[0], symbols[0])
if isinstance(soln, FiniteSet):
_soln = FiniteSet(*[tuple((s,)) for s in soln])
return _soln
else:
return FiniteSet(tuple(FiniteSet(soln)))
def _handle_positive_dimensional(polys, symbols, denominators):
from sympy.polys.polytools import groebner
# substitution method where new system is groebner basis of the system
_symbols = list(symbols)
_symbols.sort(key=default_sort_key)
basis = groebner(polys, _symbols, polys=True)
new_system = []
for poly_eq in basis:
new_system.append(poly_eq.as_expr())
result = [{}]
result = substitution(
new_system, symbols, result, [],
denominators)
return result
# end of def _handle_positive_dimensional()
def _handle_zero_dimensional(polys, symbols, system):
# solve 0 dimensional poly system using `solve_poly_system`
result = solve_poly_system(polys, *symbols)
# May be some extra soln is added because
# we used `unrad` in `_separate_poly_nonpoly`, so
# need to check and remove if it is not a soln.
result_update = S.EmptySet
for res in result:
dict_sym_value = dict(list(zip(symbols, res)))
if all(checksol(eq, dict_sym_value) for eq in system):
result_update += FiniteSet(res)
return result_update
# end of def _handle_zero_dimensional()
def _separate_poly_nonpoly(system, symbols):
polys = []
polys_expr = []
nonpolys = []
denominators = set()
poly = None
for eq in system:
# Store denom expressions that contain symbols
denominators.update(_simple_dens(eq, symbols))
# Convert equality to expression
if isinstance(eq, Equality):
eq = eq.rewrite(Add)
# try to remove sqrt and rational power
without_radicals = unrad(simplify(eq), *symbols)
if without_radicals:
eq_unrad, cov = without_radicals
if not cov:
eq = eq_unrad
if isinstance(eq, Expr):
eq = eq.as_numer_denom()[0]
poly = eq.as_poly(*symbols, extension=True)
elif simplify(eq).is_number:
continue
if poly is not None:
polys.append(poly)
polys_expr.append(poly.as_expr())
else:
nonpolys.append(eq)
return polys, polys_expr, nonpolys, denominators
# end of def _separate_poly_nonpoly()
def nonlinsolve(system, *symbols):
r"""
Solve system of $N$ nonlinear equations with $M$ variables, which means both
under and overdetermined systems are supported. Positive dimensional
system is also supported (A system with infinitely many solutions is said
to be positive-dimensional). In a positive dimensional system the solution will
be dependent on at least one symbol. Returns both real solution
and complex solution (if they exist). The possible number of solutions
is zero, one or infinite.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of Symbols
symbols should be given as a sequence eg. list
Returns
=======
A :class:`~.FiniteSet` of ordered tuple of values of `symbols` for which the `system`
has solution. Order of values in the tuple is same as symbols present in
the parameter `symbols`.
Please note that general :class:`~.FiniteSet` is unordered, the solution
returned here is not simply a :class:`~.FiniteSet` of solutions, rather it
is a :class:`~.FiniteSet` of ordered tuple, i.e. the first and only
argument to :class:`~.FiniteSet` is a tuple of solutions, which is
ordered, and, hence ,the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper ``{}`` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
For the given set of equations, the respective input types
are given below:
.. math:: xy - 1 = 0
.. math:: 4x^2 + y^2 - 5 = 0
::
system = [x*y - 1, 4*x**2 + y**2 - 5]
symbols = [x, y]
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not `Symbol` type.
Examples
========
>>> from sympy import symbols, nonlinsolve
>>> x, y, z = symbols('x, y, z', real=True)
>>> nonlinsolve([x*y - 1, 4*x**2 + y**2 - 5], [x, y])
{(-1, -1), (-1/2, -2), (1/2, 2), (1, 1)}
1. Positive dimensional system and complements:
>>> from sympy import pprint
>>> from sympy.polys.polytools import is_zero_dimensional
>>> a, b, c, d = symbols('a, b, c, d', extended_real=True)
>>> eq1 = a + b + c + d
>>> eq2 = a*b + b*c + c*d + d*a
>>> eq3 = a*b*c + b*c*d + c*d*a + d*a*b
>>> eq4 = a*b*c*d - 1
>>> system = [eq1, eq2, eq3, eq4]
>>> is_zero_dimensional(system)
False
>>> pprint(nonlinsolve(system, [a, b, c, d]), use_unicode=False)
-1 1 1 -1
{(---, -d, -, {d} \ {0}), (-, -d, ---, {d} \ {0})}
d d d d
>>> nonlinsolve([(x+y)**2 - 4, x + y - 2], [x, y])
{(2 - y, y)}
2. If some of the equations are non-polynomial then `nonlinsolve`
will call the ``substitution`` function and return real and complex solutions,
if present.
>>> from sympy import exp, sin
>>> nonlinsolve([exp(x) - sin(y), y**2 - 4], [x, y])
{(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
(ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}
3. If system is non-linear polynomial and zero-dimensional then it
returns both solution (real and complex solutions, if present) using
:func:`~.solve_poly_system`:
>>> from sympy import sqrt
>>> nonlinsolve([x**2 - 2*y**2 -2, x*y - 2], [x, y])
{(-2, -1), (2, 1), (-sqrt(2)*I, sqrt(2)*I), (sqrt(2)*I, -sqrt(2)*I)}
4. ``nonlinsolve`` can solve some linear (zero or positive dimensional)
system (because it uses the :func:`sympy.polys.polytools.groebner` function to get the
groebner basis and then uses the ``substitution`` function basis as the
new `system`). But it is not recommended to solve linear system using
``nonlinsolve``, because :func:`~.linsolve` is better for general linear systems.
>>> nonlinsolve([x + 2*y -z - 3, x - y - 4*z + 9, y + z - 4], [x, y, z])
{(3*z - 5, 4 - z, z)}
5. System having polynomial equations and only real solution is
solved using :func:`~.solve_poly_system`:
>>> e1 = sqrt(x**2 + y**2) - 10
>>> e2 = sqrt(y**2 + (-x + 10)**2) - 3
>>> nonlinsolve((e1, e2), (x, y))
{(191/20, -3*sqrt(391)/20), (191/20, 3*sqrt(391)/20)}
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [x, y])
{(1, 2), (1 - sqrt(5), 2 + sqrt(5)), (1 + sqrt(5), 2 - sqrt(5))}
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [y, x])
{(2, 1), (2 - sqrt(5), 1 + sqrt(5)), (2 + sqrt(5), 1 - sqrt(5))}
6. It is better to use symbols instead of trigonometric functions or
:class:`~.Function`. For example, replace $\sin(x)$ with a symbol, replace
$f(x)$ with a symbol and so on. Get a solution from ``nonlinsolve`` and then
use :func:`~.solveset` to get the value of $x$.
How nonlinsolve is better than old solver ``_solve_system`` :
=============================================================
1. A positive dimensional system solver: nonlinsolve can return
solution for positive dimensional system. It finds the
Groebner Basis of the positive dimensional system(calling it as
basis) then we can start solving equation(having least number of
variable first in the basis) using solveset and substituting that
solved solutions into other equation(of basis) to get solution in
terms of minimum variables. Here the important thing is how we
are substituting the known values and in which equations.
2. Real and complex solutions: nonlinsolve returns both real
and complex solution. If all the equations in the system are polynomial
then using :func:`~.solve_poly_system` both real and complex solution is returned.
If all the equations in the system are not polynomial equation then goes to
``substitution`` method with this polynomial and non polynomial equation(s),
to solve for unsolved variables. Here to solve for particular variable
solveset_real and solveset_complex is used. For both real and complex
solution ``_solve_using_know_values`` is used inside ``substitution``
(``substitution`` will be called when any non-polynomial equation is present).
If a solution is valid its general solution is added to the final result.
3. :class:`~.Complement` and :class:`~.Intersection` will be added:
nonlinsolve maintains dict for complements and intersections. If solveset
find complements or/and intersections with any interval or set during the
execution of ``substitution`` function, then complement or/and
intersection for that variable is added before returning final solution.
"""
from sympy.polys.polytools import is_zero_dimensional
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
if not is_sequence(symbols) or not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise IndexError(filldedent(msg))
system, symbols, swap = recast_to_symbols(system, symbols)
if swap:
soln = nonlinsolve(system, symbols)
return FiniteSet(*[tuple(i.xreplace(swap) for i in s) for s in soln])
if len(system) == 1 and len(symbols) == 1:
return _solveset_work(system, symbols)
# main code of def nonlinsolve() starts from here
polys, polys_expr, nonpolys, denominators = _separate_poly_nonpoly(
system, symbols)
if len(symbols) == len(polys):
# If all the equations in the system are poly
if is_zero_dimensional(polys, symbols):
# finite number of soln (Zero dimensional system)
try:
return _handle_zero_dimensional(polys, symbols, system)
except NotImplementedError:
# Right now it doesn't fail for any polynomial system of
# equation. If `solve_poly_system` fails then `substitution`
# method will handle it.
result = substitution(
polys_expr, symbols, exclude=denominators)
return result
# positive dimensional system
res = _handle_positive_dimensional(polys, symbols, denominators)
if res is S.EmptySet and any(not p.domain.is_Exact for p in polys):
raise NotImplementedError("Equation not in exact domain. Try converting to rational")
else:
return res
else:
# If all the equations are not polynomial.
# Use `substitution` method for the system
result = substitution(
polys_expr + nonpolys, symbols, exclude=denominators)
return result
|
7fcacc00af2aa64095c2dbbddbdd0d50e84f941e17e4a9436037f79cae361863 | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from functools import reduce
from itertools import combinations_with_replacement
from sympy.simplify import simplify # type: ignore
from sympy.core import Add, S
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups, is_sequence
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func, hint=hint, simplify=True,
type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'], hints['func'],
hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form:
.. math::
f(x, y) = F(- a y + b x ) e^{- \frac{c (a x + b y)}{a^2 + b^2}}
and can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y}
+ c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is:
.. math::
f(x, y) = \left. \left[F(\eta) + \frac{1}{a^2 + b^2}
\int\limits^{a x + b y} G\left(\frac{a \xi + b \eta}{a^2 + b^2},
\frac{- a \eta + b \xi}{a^2 + b^2} \right)
e^{\frac{c \xi}{a^2 + b^2}}\, d\xi\right]
e^{- \frac{c \xi}{a^2 + b^2}}
\right|_{\substack{\eta=- a y + b x\\ \xi=a x + b y }}\, ,
where `F(\eta)` is an arbitrary single-valued function. The solution
can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u - G(x,y)
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// a*x + b*y \
|| / |
|| | |
|| | c*xi |
|| | ------- |
|| | 2 2 |
|| | /a*xi + b*eta -a*eta + b*xi\ a + b |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ a + b a + b / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ a + b /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-c*xi ||
-------||
2 2||
a + b ||
e ||
||
/|eta=-a*y + b*x, xi=a*x + b*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y)*exp(x/2) + exp(x + 4*y)/15)*exp(-y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial
differential equation is
.. math:: a(x, y) \frac{\partial f(x, y)}{\partial x}
+ b(x, y) \frac{\partial f(x, y)}{\partial y}
+ c(x, y) f(x, y) = G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary
functions in `x` and `y`. This PDE is converted into an ODE by
making the following transformation:
1. `\xi` as `x`
2. `\eta` as the constant in the solution to the differential
equation `\frac{dy}{dx} = -\frac{b}{a}`
Making the previous substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - G(\xi, \eta) = 0
which can be solved using ``dsolve``.
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
for key, sym in enumerate(syms):
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
|
2e7850490c83ec507d172153da350bd67a307c40b4e694edf500af80caa326ec | """Solvers of systems of polynomial equations. """
from sympy.core import S
from sympy.core.sorting import default_sort_key
from sympy.polys import Poly, groebner, roots
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyerrors import (ComputationFailed,
PolificationFailed, CoercionFailed)
from sympy.simplify import rcollect
from sympy.utilities import postfixes
from sympy.utilities.misc import filldedent
class SolveFailed(Exception):
"""Raised when solver's conditions weren't met. """
def solve_poly_system(seq, *gens, **args):
"""
Solve a system of polynomial equations.
Parameters
==========
seq: a list/tuple/set
Listing all the equations that are needed to be solved
gens: generators
generators of the equations in seq for which we want the
solutions
args: Keyword arguments
Special options for solving the equations
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in seq
Examples
========
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -sqrt(2)), (2, sqrt(2))]
"""
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('solve_poly_system', len(seq), exc)
if len(polys) == len(opt.gens) == 2:
f, g = polys
if all(i <= 2 for i in f.degree_list() + g.degree_list()):
try:
return solve_biquadratic(f, g, opt)
except SolveFailed:
pass
return solve_generic(polys, opt)
def solve_biquadratic(f, g, opt):
"""Solve a system of two bivariate quadratic polynomial equations.
Parameters
==========
f: a single Expr or Poly
First equation
g: a single Expr or Poly
Second Equation
opt: an Options object
For specifying keyword arguments and generators
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in seq.
Examples
========
>>> from sympy.polys import Options, Poly
>>> from sympy.abc import x, y
>>> from sympy.solvers.polysys import solve_biquadratic
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')
>>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(1/3, 3), (41/27, 11/9)]
>>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')
>>> b = Poly(-y + x - 4, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(7/2 - sqrt(29)/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + \
sqrt(29)/2)]
"""
G = groebner([f, g])
if len(G) == 1 and G[0].is_ground:
return None
if len(G) != 2:
raise SolveFailed
x, y = opt.gens
p, q = G
if not p.gcd(q).is_ground:
# not 0-dimensional
raise SolveFailed
p = Poly(p, x, expand=False)
p_roots = [rcollect(expr, y) for expr in roots(p).keys()]
q = q.ltrim(-1)
q_roots = list(roots(q).keys())
solutions = []
for q_root in q_roots:
for p_root in p_roots:
solution = (p_root.subs(y, q_root), q_root)
solutions.append(solution)
return sorted(solutions, key=default_sort_key)
def solve_generic(polys, opt):
"""
Solve a generic system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
Parameters
==========
polys: a list/tuple/set
Listing all the polynomial equations that are needed to be solved
opt: an Options object
For specifying keyword arguments and generators
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in seq
References
==========
.. [Buchberger01] B. Buchberger, Groebner Bases: A Short
Introduction for Systems Theorists, In: R. Moreno-Diaz,
B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
February, 2001
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
and Algorithms, Springer, Second Edition, 1997, pp. 112
Examples
========
>>> from sympy.polys import Poly, Options
>>> from sympy.solvers.polysys import solve_generic
>>> from sympy.abc import x, y
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(x - y + 5, x, y, domain='ZZ')
>>> b = Poly(x + y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(-1, 4)]
>>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
>>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(11/3, 13/3)]
>>> a = Poly(x**2 + y, x, y, domain='ZZ')
>>> b = Poly(x + y*4, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(0, 0), (1/4, -1/16)]
"""
def _is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.monoms():
if any(monom[:-1]):
return False
return True
def _subs_root(f, gen, zero):
"""Replace generator with a root so that the result is nice. """
p = f.as_expr({gen: zero})
if f.degree(gen) >= 2:
p = p.expand(deep=False)
return p
def _solve_reduced_system(system, gens, entry=False):
"""Recursively solves reduced polynomial systems. """
if len(system) == len(gens) == 1:
zeros = list(roots(system[0], gens[-1]).keys())
return [(zero,) for zero in zeros]
basis = groebner(system, gens, polys=True)
if len(basis) == 1 and basis[0].is_ground:
if not entry:
return []
else:
return None
univariate = list(filter(_is_univariate, basis))
if len(basis) < len(gens):
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
if len(univariate) == 1:
f = univariate.pop()
else:
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
gens = f.gens
gen = gens[-1]
zeros = list(roots(f.ltrim(gen)).keys())
if not zeros:
return []
if len(basis) == 1:
return [(zero,) for zero in zeros]
solutions = []
for zero in zeros:
new_system = []
new_gens = gens[:-1]
for b in basis[:-1]:
eq = _subs_root(b, gen, zero)
if eq is not S.Zero:
new_system.append(eq)
for solution in _solve_reduced_system(new_system, new_gens):
solutions.append(solution + (zero,))
if solutions and len(solutions[0]) != len(gens):
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
return solutions
try:
result = _solve_reduced_system(polys, opt.gens, entry=True)
except CoercionFailed:
raise NotImplementedError
if result is not None:
return sorted(result, key=default_sort_key)
else:
return None
def solve_triangulated(polys, *gens, **args):
"""
Solve a polynomial system using Gianni-Kalkbrenner algorithm.
The algorithm proceeds by computing one Groebner basis in the ground
domain and then by iteratively computing polynomial factorizations in
appropriately constructed algebraic extensions of the ground domain.
Parameters
==========
polys: a list/tuple/set
Listing all the equations that are needed to be solved
gens: generators
generators of the equations in polys for which we want the
solutions
args: Keyword arguments
Special options for solving the equations
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in polys
Examples
========
>>> from sympy.solvers.polysys import solve_triangulated
>>> from sympy.abc import x, y, z
>>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1]
>>> solve_triangulated(F, x, y, z)
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
References
==========
1. Patrizia Gianni, Teo Mora, Algebraic Solution of System of
Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra,
Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989
"""
G = groebner(polys, gens, polys=True)
G = list(reversed(G))
domain = args.get('domain')
if domain is not None:
for i, g in enumerate(G):
G[i] = g.set_domain(domain)
f, G = G[0].ltrim(-1), G[1:]
dom = f.get_domain()
zeros = f.ground_roots()
solutions = set()
for zero in zeros:
solutions.add(((zero,), dom))
var_seq = reversed(gens[:-1])
vars_seq = postfixes(gens[1:])
for var, vars in zip(var_seq, vars_seq):
_solutions = set()
for values, dom in solutions:
H, mapping = [], list(zip(vars, values))
for g in G:
_vars = (var,) + vars
if g.has_only_gens(*_vars) and g.degree(var) != 0:
h = g.ltrim(var).eval(dict(mapping))
if g.degree(var) == h.degree():
H.append(h)
p = min(H, key=lambda h: h.degree())
zeros = p.ground_roots()
for zero in zeros:
if not zero.is_Rational:
dom_zero = dom.algebraic_field(zero)
else:
dom_zero = dom
_solutions.add(((zero,) + values, dom_zero))
solutions = _solutions
solutions = list(solutions)
for i, (solution, _) in enumerate(solutions):
solutions[i] = solution
return sorted(solutions, key=default_sort_key)
|
12b787afef706491336a17cd1c3ef28ca1453ce994c4df40b755514cebfcec19 | """Tools for solving inequalities and systems of inequalities. """
from sympy.core import Symbol, Dummy, sympify
from sympy.core.exprtools import factor_terms
from sympy.core.relational import Relational, Eq, Ge, Lt
from sympy.sets.sets import Interval, FiniteSet, Union, Intersection
from sympy.core.singleton import S
from sympy.core.function import expand_mul
from sympy.functions import Abs
from sympy.logic import And
from sympy.polys import Poly, PolynomialError, parallel_poly_from_expr
from sympy.polys.polyutils import _nsort
from sympy.utilities.iterables import sift, iterable
from sympy.utilities.misc import filldedent
def solve_poly_inequality(poly, rel):
"""Solve a polynomial inequality with rational coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import solve_poly_inequality
>>> solve_poly_inequality(Poly(x, x, domain='ZZ'), '==')
[{0}]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '!=')
[Interval.open(-oo, -1), Interval.open(-1, 1), Interval.open(1, oo)]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '==')
[{-1}, {1}]
See Also
========
solve_poly_inequalities
"""
if not isinstance(poly, Poly):
raise ValueError(
'For efficiency reasons, `poly` should be a Poly instance')
if poly.as_expr().is_number:
t = Relational(poly.as_expr(), 0, rel)
if t is S.true:
return [S.Reals]
elif t is S.false:
return [S.EmptySet]
else:
raise NotImplementedError(
"could not determine truth value of %s" % t)
reals, intervals = poly.real_roots(multiple=False), []
if rel == '==':
for root, _ in reals:
interval = Interval(root, root)
intervals.append(interval)
elif rel == '!=':
left = S.NegativeInfinity
for right, _ in reals + [(S.Infinity, 1)]:
interval = Interval(left, right, True, True)
intervals.append(interval)
left = right
else:
if poly.LC() > 0:
sign = +1
else:
sign = -1
eq_sign, equal = None, False
if rel == '>':
eq_sign = +1
elif rel == '<':
eq_sign = -1
elif rel == '>=':
eq_sign, equal = +1, True
elif rel == '<=':
eq_sign, equal = -1, True
else:
raise ValueError("'%s' is not a valid relation" % rel)
right, right_open = S.Infinity, True
for left, multiplicity in reversed(reals):
if multiplicity % 2:
if sign == eq_sign:
intervals.insert(
0, Interval(left, right, not equal, right_open))
sign, right, right_open = -sign, left, not equal
else:
if sign == eq_sign and not equal:
intervals.insert(
0, Interval(left, right, True, right_open))
right, right_open = left, True
elif sign != eq_sign and equal:
intervals.insert(0, Interval(left, left))
if sign == eq_sign:
intervals.insert(
0, Interval(S.NegativeInfinity, right, True, right_open))
return intervals
def solve_poly_inequalities(polys):
"""Solve polynomial inequalities with rational coefficients.
Examples
========
>>> from sympy.solvers.inequalities import solve_poly_inequalities
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> solve_poly_inequalities(((
... Poly(x**2 - 3), ">"), (
... Poly(-x**2 + 1), ">")))
Union(Interval.open(-oo, -sqrt(3)), Interval.open(-1, 1), Interval.open(sqrt(3), oo))
"""
return Union(*[s for p in polys for s in solve_poly_inequality(*p)])
def solve_rational_inequalities(eqs):
"""Solve a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy.abc import x
>>> from sympy import Poly
>>> from sympy.solvers.inequalities import solve_rational_inequalities
>>> solve_rational_inequalities([[
... ((Poly(-x + 1), Poly(1, x)), '>='),
... ((Poly(-x + 1), Poly(1, x)), '<=')]])
{1}
>>> solve_rational_inequalities([[
... ((Poly(x), Poly(1, x)), '!='),
... ((Poly(-x + 1), Poly(1, x)), '>=')]])
Union(Interval.open(-oo, 0), Interval.Lopen(0, 1))
See Also
========
solve_poly_inequality
"""
result = S.EmptySet
for _eqs in eqs:
if not _eqs:
continue
global_intervals = [Interval(S.NegativeInfinity, S.Infinity)]
for (numer, denom), rel in _eqs:
numer_intervals = solve_poly_inequality(numer*denom, rel)
denom_intervals = solve_poly_inequality(denom, '==')
intervals = []
for numer_interval in numer_intervals:
for global_interval in global_intervals:
interval = numer_interval.intersect(global_interval)
if interval is not S.EmptySet:
intervals.append(interval)
global_intervals = intervals
intervals = []
for global_interval in global_intervals:
for denom_interval in denom_intervals:
global_interval -= denom_interval
if global_interval is not S.EmptySet:
intervals.append(global_interval)
global_intervals = intervals
if not global_intervals:
break
for interval in global_intervals:
result = result.union(interval)
return result
def reduce_rational_inequalities(exprs, gen, relational=True):
"""Reduce a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy import Symbol
>>> from sympy.solvers.inequalities import reduce_rational_inequalities
>>> x = Symbol('x', real=True)
>>> reduce_rational_inequalities([[x**2 <= 0]], x)
Eq(x, 0)
>>> reduce_rational_inequalities([[x + 2 > 0]], x)
-2 < x
>>> reduce_rational_inequalities([[(x + 2, ">")]], x)
-2 < x
>>> reduce_rational_inequalities([[x + 2]], x)
Eq(x, -2)
This function find the non-infinite solution set so if the unknown symbol
is declared as extended real rather than real then the result may include
finiteness conditions:
>>> y = Symbol('y', extended_real=True)
>>> reduce_rational_inequalities([[y + 2 > 0]], y)
(-2 < y) & (y < oo)
"""
exact = True
eqs = []
solution = S.Reals if exprs else S.EmptySet
for _exprs in exprs:
_eqs = []
for expr in _exprs:
if isinstance(expr, tuple):
expr, rel = expr
else:
if expr.is_Relational:
expr, rel = expr.lhs - expr.rhs, expr.rel_op
else:
expr, rel = expr, '=='
if expr is S.true:
numer, denom, rel = S.Zero, S.One, '=='
elif expr is S.false:
numer, denom, rel = S.One, S.One, '=='
else:
numer, denom = expr.together().as_numer_denom()
try:
(numer, denom), opt = parallel_poly_from_expr(
(numer, denom), gen)
except PolynomialError:
raise PolynomialError(filldedent('''
only polynomials and rational functions are
supported in this context.
'''))
if not opt.domain.is_Exact:
numer, denom, exact = numer.to_exact(), denom.to_exact(), False
domain = opt.domain.get_exact()
if not (domain.is_ZZ or domain.is_QQ):
expr = numer/denom
expr = Relational(expr, 0, rel)
solution &= solve_univariate_inequality(expr, gen, relational=False)
else:
_eqs.append(((numer, denom), rel))
if _eqs:
eqs.append(_eqs)
if eqs:
solution &= solve_rational_inequalities(eqs)
exclude = solve_rational_inequalities([[((d, d.one), '==')
for i in eqs for ((n, d), _) in i if d.has(gen)]])
solution -= exclude
if not exact and solution:
solution = solution.evalf()
if relational:
solution = solution.as_relational(gen)
return solution
def reduce_abs_inequality(expr, rel, gen):
"""Reduce an inequality with nested absolute values.
Examples
========
>>> from sympy import Abs, Symbol
>>> from sympy.solvers.inequalities import reduce_abs_inequality
>>> x = Symbol('x', real=True)
>>> reduce_abs_inequality(Abs(x - 5) - 3, '<', x)
(2 < x) & (x < 8)
>>> reduce_abs_inequality(Abs(x + 2)*3 - 13, '<', x)
(-19/3 < x) & (x < 7/3)
See Also
========
reduce_abs_inequalities
"""
if gen.is_extended_real is False:
raise TypeError(filldedent('''
Cannot solve inequalities with absolute values containing
non-real variables.
'''))
def _bottom_up_scan(expr):
exprs = []
if expr.is_Add or expr.is_Mul:
op = expr.func
for arg in expr.args:
_exprs = _bottom_up_scan(arg)
if not exprs:
exprs = _exprs
else:
args = []
for expr, conds in exprs:
for _expr, _conds in _exprs:
args.append((op(expr, _expr), conds + _conds))
exprs = args
elif expr.is_Pow:
n = expr.exp
if not n.is_Integer:
raise ValueError("Only Integer Powers are allowed on Abs.")
_exprs = _bottom_up_scan(expr.base)
for expr, conds in _exprs:
exprs.append((expr**n, conds))
elif isinstance(expr, Abs):
_exprs = _bottom_up_scan(expr.args[0])
for expr, conds in _exprs:
exprs.append(( expr, conds + [Ge(expr, 0)]))
exprs.append((-expr, conds + [Lt(expr, 0)]))
else:
exprs = [(expr, [])]
return exprs
exprs = _bottom_up_scan(expr)
mapping = {'<': '>', '<=': '>='}
inequalities = []
for expr, conds in exprs:
if rel not in mapping.keys():
expr = Relational( expr, 0, rel)
else:
expr = Relational(-expr, 0, mapping[rel])
inequalities.append([expr] + conds)
return reduce_rational_inequalities(inequalities, gen)
def reduce_abs_inequalities(exprs, gen):
"""Reduce a system of inequalities with nested absolute values.
Examples
========
>>> from sympy import Abs, Symbol
>>> from sympy.solvers.inequalities import reduce_abs_inequalities
>>> x = Symbol('x', extended_real=True)
>>> reduce_abs_inequalities([(Abs(3*x - 5) - 7, '<'),
... (Abs(x + 25) - 13, '>')], x)
(-2/3 < x) & (x < 4) & (((-oo < x) & (x < -38)) | ((-12 < x) & (x < oo)))
>>> reduce_abs_inequalities([(Abs(x - 4) + Abs(3*x - 5) - 7, '<')], x)
(1/2 < x) & (x < 4)
See Also
========
reduce_abs_inequality
"""
return And(*[ reduce_abs_inequality(expr, rel, gen)
for expr, rel in exprs ])
def solve_univariate_inequality(expr, gen, relational=True, domain=S.Reals, continuous=False):
"""Solves a real univariate inequality.
Parameters
==========
expr : Relational
The target inequality
gen : Symbol
The variable for which the inequality is solved
relational : bool
A Relational type output is expected or not
domain : Set
The domain over which the equation is solved
continuous: bool
True if expr is known to be continuous over the given domain
(and so continuous_domain() doesn't need to be called on it)
Raises
======
NotImplementedError
The solution of the inequality cannot be determined due to limitation
in :func:`sympy.solvers.solveset.solvify`.
Notes
=====
Currently, we cannot solve all the inequalities due to limitations in
:func:`sympy.solvers.solveset.solvify`. Also, the solution returned for trigonometric inequalities
are restricted in its periodic interval.
See Also
========
sympy.solvers.solveset.solvify: solver returning solveset solutions with solve's output API
Examples
========
>>> from sympy.solvers.inequalities import solve_univariate_inequality
>>> from sympy import Symbol, sin, Interval, S
>>> x = Symbol('x')
>>> solve_univariate_inequality(x**2 >= 4, x)
((2 <= x) & (x < oo)) | ((-oo < x) & (x <= -2))
>>> solve_univariate_inequality(x**2 >= 4, x, relational=False)
Union(Interval(-oo, -2), Interval(2, oo))
>>> domain = Interval(0, S.Infinity)
>>> solve_univariate_inequality(x**2 >= 4, x, False, domain)
Interval(2, oo)
>>> solve_univariate_inequality(sin(x) > 0, x, relational=False)
Interval.open(0, pi)
"""
from sympy.functions.elementary.complexes import im
from sympy.calculus.util import (continuous_domain, periodicity,
function_range)
from sympy.solvers.solvers import denoms
from sympy.solvers.solveset import solvify, solveset
if domain.is_subset(S.Reals) is False:
raise NotImplementedError(filldedent('''
Inequalities in the complex domain are
not supported. Try the real domain by
setting domain=S.Reals'''))
elif domain is not S.Reals:
rv = solve_univariate_inequality(
expr, gen, relational=False, continuous=continuous).intersection(domain)
if relational:
rv = rv.as_relational(gen)
return rv
else:
pass # continue with attempt to solve in Real domain
# This keeps the function independent of the assumptions about `gen`.
# `solveset` makes sure this function is called only when the domain is
# real.
_gen = gen
_domain = domain
if gen.is_extended_real is False:
rv = S.EmptySet
return rv if not relational else rv.as_relational(_gen)
elif gen.is_extended_real is None:
gen = Dummy('gen', extended_real=True)
try:
expr = expr.xreplace({_gen: gen})
except TypeError:
raise TypeError(filldedent('''
When gen is real, the relational has a complex part
which leads to an invalid comparison like I < 0.
'''))
rv = None
if expr is S.true:
rv = domain
elif expr is S.false:
rv = S.EmptySet
else:
e = expr.lhs - expr.rhs
period = periodicity(e, gen)
if period == S.Zero:
e = expand_mul(e)
const = expr.func(e, 0)
if const is S.true:
rv = domain
elif const is S.false:
rv = S.EmptySet
elif period is not None:
frange = function_range(e, gen, domain)
rel = expr.rel_op
if rel in ('<', '<='):
if expr.func(frange.sup, 0):
rv = domain
elif not expr.func(frange.inf, 0):
rv = S.EmptySet
elif rel in ('>', '>='):
if expr.func(frange.inf, 0):
rv = domain
elif not expr.func(frange.sup, 0):
rv = S.EmptySet
inf, sup = domain.inf, domain.sup
if sup - inf is S.Infinity:
domain = Interval(0, period, False, True).intersect(_domain)
_domain = domain
if rv is None:
n, d = e.as_numer_denom()
try:
if gen not in n.free_symbols and len(e.free_symbols) > 1:
raise ValueError
# this might raise ValueError on its own
# or it might give None...
solns = solvify(e, gen, domain)
if solns is None:
# in which case we raise ValueError
raise ValueError
except (ValueError, NotImplementedError):
# replace gen with generic x since it's
# univariate anyway
raise NotImplementedError(filldedent('''
The inequality, %s, cannot be solved using
solve_univariate_inequality.
''' % expr.subs(gen, Symbol('x'))))
expanded_e = expand_mul(e)
def valid(x):
# this is used to see if gen=x satisfies the
# relational by substituting it into the
# expanded form and testing against 0, e.g.
# if expr = x*(x + 1) < 2 then e = x*(x + 1) - 2
# and expanded_e = x**2 + x - 2; the test is
# whether a given value of x satisfies
# x**2 + x - 2 < 0
#
# expanded_e, expr and gen used from enclosing scope
v = expanded_e.subs(gen, expand_mul(x))
try:
r = expr.func(v, 0)
except TypeError:
r = S.false
if r in (S.true, S.false):
return r
if v.is_extended_real is False:
return S.false
else:
v = v.n(2)
if v.is_comparable:
return expr.func(v, 0)
# not comparable or couldn't be evaluated
raise NotImplementedError(
'relationship did not evaluate: %s' % r)
singularities = []
for d in denoms(expr, gen):
singularities.extend(solvify(d, gen, domain))
if not continuous:
domain = continuous_domain(expanded_e, gen, domain)
include_x = '=' in expr.rel_op and expr.rel_op != '!='
try:
discontinuities = set(domain.boundary -
FiniteSet(domain.inf, domain.sup))
# remove points that are not between inf and sup of domain
critical_points = FiniteSet(*(solns + singularities + list(
discontinuities))).intersection(
Interval(domain.inf, domain.sup,
domain.inf not in domain, domain.sup not in domain))
if all(r.is_number for r in critical_points):
reals = _nsort(critical_points, separated=True)[0]
else:
sifted = sift(critical_points, lambda x: x.is_extended_real)
if sifted[None]:
# there were some roots that weren't known
# to be real
raise NotImplementedError
try:
reals = sifted[True]
if len(reals) > 1:
reals = list(sorted(reals))
except TypeError:
raise NotImplementedError
except NotImplementedError:
raise NotImplementedError('sorting of these roots is not supported')
# If expr contains imaginary coefficients, only take real
# values of x for which the imaginary part is 0
make_real = S.Reals
if im(expanded_e) != S.Zero:
check = True
im_sol = FiniteSet()
try:
a = solveset(im(expanded_e), gen, domain)
if not isinstance(a, Interval):
for z in a:
if z not in singularities and valid(z) and z.is_extended_real:
im_sol += FiniteSet(z)
else:
start, end = a.inf, a.sup
for z in _nsort(critical_points + FiniteSet(end)):
valid_start = valid(start)
if start != end:
valid_z = valid(z)
pt = _pt(start, z)
if pt not in singularities and pt.is_extended_real and valid(pt):
if valid_start and valid_z:
im_sol += Interval(start, z)
elif valid_start:
im_sol += Interval.Ropen(start, z)
elif valid_z:
im_sol += Interval.Lopen(start, z)
else:
im_sol += Interval.open(start, z)
start = z
for s in singularities:
im_sol -= FiniteSet(s)
except (TypeError):
im_sol = S.Reals
check = False
if im_sol is S.EmptySet:
raise ValueError(filldedent('''
%s contains imaginary parts which cannot be
made 0 for any value of %s satisfying the
inequality, leading to relations like I < 0.
''' % (expr.subs(gen, _gen), _gen)))
make_real = make_real.intersect(im_sol)
sol_sets = [S.EmptySet]
start = domain.inf
if start in domain and valid(start) and start.is_finite:
sol_sets.append(FiniteSet(start))
for x in reals:
end = x
if valid(_pt(start, end)):
sol_sets.append(Interval(start, end, True, True))
if x in singularities:
singularities.remove(x)
else:
if x in discontinuities:
discontinuities.remove(x)
_valid = valid(x)
else: # it's a solution
_valid = include_x
if _valid:
sol_sets.append(FiniteSet(x))
start = end
end = domain.sup
if end in domain and valid(end) and end.is_finite:
sol_sets.append(FiniteSet(end))
if valid(_pt(start, end)):
sol_sets.append(Interval.open(start, end))
if im(expanded_e) != S.Zero and check:
rv = (make_real).intersect(_domain)
else:
rv = Intersection(
(Union(*sol_sets)), make_real, _domain).subs(gen, _gen)
return rv if not relational else rv.as_relational(_gen)
def _pt(start, end):
"""Return a point between start and end"""
if not start.is_infinite and not end.is_infinite:
pt = (start + end)/2
elif start.is_infinite and end.is_infinite:
pt = S.Zero
else:
if (start.is_infinite and start.is_extended_positive is None or
end.is_infinite and end.is_extended_positive is None):
raise ValueError('cannot proceed with unsigned infinite values')
if (end.is_infinite and end.is_extended_negative or
start.is_infinite and start.is_extended_positive):
start, end = end, start
# if possible, use a multiple of self which has
# better behavior when checking assumptions than
# an expression obtained by adding or subtracting 1
if end.is_infinite:
if start.is_extended_positive:
pt = start*2
elif start.is_extended_negative:
pt = start*S.Half
else:
pt = start + 1
elif start.is_infinite:
if end.is_extended_positive:
pt = end*S.Half
elif end.is_extended_negative:
pt = end*2
else:
pt = end - 1
return pt
def _solve_inequality(ie, s, linear=False):
"""Return the inequality with s isolated on the left, if possible.
If the relationship is non-linear, a solution involving And or Or
may be returned. False or True are returned if the relationship
is never True or always True, respectively.
If `linear` is True (default is False) an `s`-dependent expression
will be isolated on the left, if possible
but it will not be solved for `s` unless the expression is linear
in `s`. Furthermore, only "safe" operations which do not change the
sense of the relationship are applied: no division by an unsigned
value is attempted unless the relationship involves Eq or Ne and
no division by a value not known to be nonzero is ever attempted.
Examples
========
>>> from sympy import Eq, Symbol
>>> from sympy.solvers.inequalities import _solve_inequality as f
>>> from sympy.abc import x, y
For linear expressions, the symbol can be isolated:
>>> f(x - 2 < 0, x)
x < 2
>>> f(-x - 6 < x, x)
x > -3
Sometimes nonlinear relationships will be False
>>> f(x**2 + 4 < 0, x)
False
Or they may involve more than one region of values:
>>> f(x**2 - 4 < 0, x)
(-2 < x) & (x < 2)
To restrict the solution to a relational, set linear=True
and only the x-dependent portion will be isolated on the left:
>>> f(x**2 - 4 < 0, x, linear=True)
x**2 < 4
Division of only nonzero quantities is allowed, so x cannot
be isolated by dividing by y:
>>> y.is_nonzero is None # it is unknown whether it is 0 or not
True
>>> f(x*y < 1, x)
x*y < 1
And while an equality (or inequality) still holds after dividing by a
non-zero quantity
>>> nz = Symbol('nz', nonzero=True)
>>> f(Eq(x*nz, 1), x)
Eq(x, 1/nz)
the sign must be known for other inequalities involving > or <:
>>> f(x*nz <= 1, x)
nz*x <= 1
>>> p = Symbol('p', positive=True)
>>> f(x*p <= 1, x)
x <= 1/p
When there are denominators in the original expression that
are removed by expansion, conditions for them will be returned
as part of the result:
>>> f(x < x*(2/x - 1), x)
(x < 1) & Ne(x, 0)
"""
from sympy.solvers.solvers import denoms
if s not in ie.free_symbols:
return ie
if ie.rhs == s:
ie = ie.reversed
if ie.lhs == s and s not in ie.rhs.free_symbols:
return ie
def classify(ie, s, i):
# return True or False if ie evaluates when substituting s with
# i else None (if unevaluated) or NaN (when there is an error
# in evaluating)
try:
v = ie.subs(s, i)
if v is S.NaN:
return v
elif v not in (True, False):
return
return v
except TypeError:
return S.NaN
rv = None
oo = S.Infinity
expr = ie.lhs - ie.rhs
try:
p = Poly(expr, s)
if p.degree() == 0:
rv = ie.func(p.as_expr(), 0)
elif not linear and p.degree() > 1:
# handle in except clause
raise NotImplementedError
except (PolynomialError, NotImplementedError):
if not linear:
try:
rv = reduce_rational_inequalities([[ie]], s)
except PolynomialError:
rv = solve_univariate_inequality(ie, s)
# remove restrictions wrt +/-oo that may have been
# applied when using sets to simplify the relationship
okoo = classify(ie, s, oo)
if okoo is S.true and classify(rv, s, oo) is S.false:
rv = rv.subs(s < oo, True)
oknoo = classify(ie, s, -oo)
if (oknoo is S.true and
classify(rv, s, -oo) is S.false):
rv = rv.subs(-oo < s, True)
rv = rv.subs(s > -oo, True)
if rv is S.true:
rv = (s <= oo) if okoo is S.true else (s < oo)
if oknoo is not S.true:
rv = And(-oo < s, rv)
else:
p = Poly(expr)
conds = []
if rv is None:
e = p.as_expr() # this is in expanded form
# Do a safe inversion of e, moving non-s terms
# to the rhs and dividing by a nonzero factor if
# the relational is Eq/Ne; for other relationals
# the sign must also be positive or negative
rhs = 0
b, ax = e.as_independent(s, as_Add=True)
e -= b
rhs -= b
ef = factor_terms(e)
a, e = ef.as_independent(s, as_Add=False)
if (a.is_zero != False or # don't divide by potential 0
a.is_negative ==
a.is_positive is None and # if sign is not known then
ie.rel_op not in ('!=', '==')): # reject if not Eq/Ne
e = ef
a = S.One
rhs /= a
if a.is_positive:
rv = ie.func(e, rhs)
else:
rv = ie.reversed.func(e, rhs)
# return conditions under which the value is
# valid, too.
beginning_denoms = denoms(ie.lhs) | denoms(ie.rhs)
current_denoms = denoms(rv)
for d in beginning_denoms - current_denoms:
c = _solve_inequality(Eq(d, 0), s, linear=linear)
if isinstance(c, Eq) and c.lhs == s:
if classify(rv, s, c.rhs) is S.true:
# rv is permitting this value but it shouldn't
conds.append(~c)
for i in (-oo, oo):
if (classify(rv, s, i) is S.true and
classify(ie, s, i) is not S.true):
conds.append(s < i if i is oo else i < s)
conds.append(rv)
return And(*conds)
def _reduce_inequalities(inequalities, symbols):
# helper for reduce_inequalities
poly_part, abs_part = {}, {}
other = []
for inequality in inequalities:
expr, rel = inequality.lhs, inequality.rel_op # rhs is 0
# check for gens using atoms which is more strict than free_symbols to
# guard against EX domain which won't be handled by
# reduce_rational_inequalities
gens = expr.atoms(Symbol)
if len(gens) == 1:
gen = gens.pop()
else:
common = expr.free_symbols & symbols
if len(common) == 1:
gen = common.pop()
other.append(_solve_inequality(Relational(expr, 0, rel), gen))
continue
else:
raise NotImplementedError(filldedent('''
inequality has more than one symbol of interest.
'''))
if expr.is_polynomial(gen):
poly_part.setdefault(gen, []).append((expr, rel))
else:
components = expr.find(lambda u:
u.has(gen) and (
u.is_Function or u.is_Pow and not u.exp.is_Integer))
if components and all(isinstance(i, Abs) for i in components):
abs_part.setdefault(gen, []).append((expr, rel))
else:
other.append(_solve_inequality(Relational(expr, 0, rel), gen))
poly_reduced = []
abs_reduced = []
for gen, exprs in poly_part.items():
poly_reduced.append(reduce_rational_inequalities([exprs], gen))
for gen, exprs in abs_part.items():
abs_reduced.append(reduce_abs_inequalities(exprs, gen))
return And(*(poly_reduced + abs_reduced + other))
def reduce_inequalities(inequalities, symbols=[]):
"""Reduce a system of inequalities with rational coefficients.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.inequalities import reduce_inequalities
>>> reduce_inequalities(0 <= x + 3, [])
(-3 <= x) & (x < oo)
>>> reduce_inequalities(0 <= x + y*2 - 1, [x])
(x < oo) & (x >= 1 - 2*y)
"""
if not iterable(inequalities):
inequalities = [inequalities]
inequalities = [sympify(i) for i in inequalities]
gens = set().union(*[i.free_symbols for i in inequalities])
if not iterable(symbols):
symbols = [symbols]
symbols = (set(symbols) or gens) & gens
if any(i.is_extended_real is False for i in symbols):
raise TypeError(filldedent('''
inequalities cannot contain symbols that are not real.
'''))
# make vanilla symbol real
recast = {i: Dummy(i.name, extended_real=True)
for i in gens if i.is_extended_real is None}
inequalities = [i.xreplace(recast) for i in inequalities]
symbols = {i.xreplace(recast) for i in symbols}
# prefilter
keep = []
for i in inequalities:
if isinstance(i, Relational):
i = i.func(i.lhs.as_expr() - i.rhs.as_expr(), 0)
elif i not in (True, False):
i = Eq(i, 0)
if i == True:
continue
elif i == False:
return S.false
if i.lhs.is_number:
raise NotImplementedError(
"could not determine truth value of %s" % i)
keep.append(i)
inequalities = keep
del keep
# solve system
rv = _reduce_inequalities(inequalities, symbols)
# restore original symbols and return
return rv.xreplace({v: k for k, v in recast.items()})
|
4bb6a5274117691f536d7295901e2ff6eae4b10d91b528606460c7f0251d8e23 | """
This module contain solvers for all kinds of equations:
- algebraic or transcendental, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from sympy.core import (S, Add, Symbol, Dummy, Expr, Mul)
from sympy.core.assumptions import check_assumptions
from sympy.core.exprtools import factor_terms
from sympy.core.function import (expand_mul, expand_log, Derivative,
AppliedUndef, UndefinedFunction, nfloat,
Function, expand_power_exp, _mexpand, expand,
expand_func)
from sympy.core.logic import fuzzy_not
from sympy.core.numbers import ilcm, Float, Rational
from sympy.core.power import integer_log, Pow
from sympy.core.relational import Relational, Eq, Ne
from sympy.core.sorting import ordered, default_sort_key
from sympy.core.sympify import sympify
from sympy.core.traversal import preorder_traversal
from sympy.logic.boolalg import And, Or, BooleanAtom
from sympy.functions import (log, exp, LambertW, cos, sin, tan, acos, asin, atan,
Abs, re, im, arg, sqrt, atan2)
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.ntheory.factor_ import divisors
from sympy.simplify import (simplify, collect, powsimp, posify, # type: ignore
powdenest, nsimplify, denom, logcombine, sqrtdenest, fraction,
separatevars)
from sympy.simplify.sqrtdenest import sqrt_depth
from sympy.simplify.fu import TR1, TR2i
from sympy.matrices.common import NonInvertibleMatrixError
from sympy.matrices import Matrix, zeros
from sympy.polys import roots, cancel, factor, Poly
from sympy.polys.polyerrors import GeneratorsNeeded, PolynomialError
from sympy.polys.solvers import sympy_eqs_to_ring, solve_lin_sys
from sympy.utilities.lambdify import lambdify
from sympy.utilities.misc import filldedent, debug
from sympy.utilities.iterables import (connected_components,
generate_bell, uniq, iterable, is_sequence, subsets)
from sympy.utilities.decorator import conserve_mpmath_dps
from mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import reduce_inequalities
from types import GeneratorType
from collections import defaultdict
from itertools import product
import warnings
def recast_to_symbols(eqs, symbols):
"""
Return (e, s, d) where e and s are versions of *eqs* and
*symbols* in which any non-Symbol objects in *symbols* have
been replaced with generic Dummy symbols and d is a dictionary
that can be used to restore the original expressions.
Examples
========
>>> from sympy.solvers.solvers import recast_to_symbols
>>> from sympy import symbols, Function
>>> x, y = symbols('x y')
>>> fx = Function('f')(x)
>>> eqs, syms = [fx + 1, x, y], [fx, y]
>>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d)
([_X0 + 1, x, y], [_X0, y], {_X0: f(x)})
The original equations and symbols can be restored using d:
>>> assert [i.xreplace(d) for i in eqs] == eqs
>>> assert [d.get(i, i) for i in s] == syms
"""
if not iterable(eqs) and iterable(symbols):
raise ValueError('Both eqs and symbols must be iterable')
new_symbols = list(symbols)
swap_sym = {}
for i, s in enumerate(symbols):
if not isinstance(s, Symbol) and s not in swap_sym:
swap_sym[s] = Dummy('X%d' % i)
new_symbols[i] = swap_sym[s]
new_f = []
for i in eqs:
isubs = getattr(i, 'subs', None)
if isubs is not None:
new_f.append(isubs(swap_sym))
else:
new_f.append(i)
swap_sym = {v: k for k, v in swap_sym.items()}
return new_f, new_symbols, swap_sym
def _ispow(e):
"""Return True if e is a Pow or is exp."""
return isinstance(e, Expr) and (e.is_Pow or isinstance(e, exp))
def _simple_dens(f, symbols):
# when checking if a denominator is zero, we can just check the
# base of powers with nonzero exponents since if the base is zero
# the power will be zero, too. To keep it simple and fast, we
# limit simplification to exponents that are Numbers
dens = set()
for d in denoms(f, symbols):
if d.is_Pow and d.exp.is_Number:
if d.exp.is_zero:
continue # foo**0 is never 0
d = d.base
dens.add(d)
return dens
def denoms(eq, *symbols):
"""
Return (recursively) set of all denominators that appear in *eq*
that contain any symbol in *symbols*; if *symbols* are not
provided then all denominators will be returned.
Examples
========
>>> from sympy.solvers.solvers import denoms
>>> from sympy.abc import x, y, z
>>> denoms(x/y)
{y}
>>> denoms(x/(y*z))
{y, z}
>>> denoms(3/x + y/z)
{x, z}
>>> denoms(x/2 + y/z)
{2, z}
If *symbols* are provided then only denominators containing
those symbols will be returned:
>>> denoms(1/x + 1/y + 1/z, y, z)
{y, z}
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
# Here p might be Tuple or Relational
# Expr subtrees (e.g. lhs and rhs) will be traversed after by pot
if not isinstance(p, Expr):
continue
den = denom(p)
if den is S.One:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
elif len(symbols) == 1:
if iterable(symbols[0]):
symbols = symbols[0]
rv = []
for d in dens:
free = d.free_symbols
if any(s in free for s in symbols):
rv.append(d)
return set(rv)
def checksol(f, symbol, sol=None, **flags):
"""
Checks whether sol is a solution of equation f == 0.
Explanation
===========
Input can be either a single symbol and corresponding value
or a dictionary of symbols and values. When given as a dictionary
and flag ``simplify=True``, the values in the dictionary will be
simplified. *f* can be a single equation or an iterable of equations.
A solution must satisfy all equations in *f* to be considered valid;
if a solution does not satisfy any equation, False is returned; if one or
more checks are inconclusive (and none are False) then None is returned.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers import checksol
>>> x, y = symbols('x,y')
>>> checksol(x**4 - 1, x, 1)
True
>>> checksol(x**4 - 1, x, 0)
False
>>> checksol(x**2 + y**2 - 5**2, {x: 3, y: 4})
True
To check if an expression is zero using ``checksol()``, pass it
as *f* and send an empty dictionary for *symbol*:
>>> checksol(x**2 + x - x*(x + 1), {})
True
None is returned if ``checksol()`` could not conclude.
flags:
'numerical=True (default)'
do a fast numerical check if ``f`` has only one symbol.
'minimal=True (default is False)'
a very fast, minimal testing.
'warn=True (default is False)'
show a warning if checksol() could not conclude.
'simplify=True (default)'
simplify solution before substituting into function and
simplify the function before trying specific simplifications
'force=True (default is False)'
make positive all symbols without assumptions regarding sign.
"""
from sympy.physics.units import Unit
minimal = flags.get('minimal', False)
if sol is not None:
sol = {symbol: sol}
elif isinstance(symbol, dict):
sol = symbol
else:
msg = 'Expecting (sym, val) or ({sym: val}, None) but got (%s, %s)'
raise ValueError(msg % (symbol, sol))
if iterable(f):
if not f:
raise ValueError('no functions to check')
rv = True
for fi in f:
check = checksol(fi, sol, **flags)
if check:
continue
if check is False:
return False
rv = None # don't return, wait to see if there's a False
return rv
if isinstance(f, Poly):
f = f.as_expr()
elif isinstance(f, (Eq, Ne)):
if f.rhs in (S.true, S.false):
f = f.reversed
B, E = f.args
if isinstance(B, BooleanAtom):
f = f.subs(sol)
if not f.is_Boolean:
return
else:
f = f.rewrite(Add, evaluate=False)
if isinstance(f, BooleanAtom):
return bool(f)
elif not f.is_Relational and not f:
return True
if sol and not f.free_symbols & set(sol.keys()):
# if f(y) == 0, x=3 does not set f(y) to zero...nor does it not
return None
illegal = {S.NaN,
S.ComplexInfinity,
S.Infinity,
S.NegativeInfinity}
if any(sympify(v).atoms() & illegal for k, v in sol.items()):
return False
was = f
attempt = -1
numerical = flags.get('numerical', True)
while 1:
attempt += 1
if attempt == 0:
val = f.subs(sol)
if isinstance(val, Mul):
val = val.as_independent(Unit)[0]
if val.atoms() & illegal:
return False
elif attempt == 1:
if not val.is_number:
if not val.is_constant(*list(sol.keys()), simplify=not minimal):
return False
# there are free symbols -- simple expansion might work
_, val = val.as_content_primitive()
val = _mexpand(val.as_numer_denom()[0], recursive=True)
elif attempt == 2:
if minimal:
return
if flags.get('simplify', True):
for k in sol:
sol[k] = simplify(sol[k])
# start over without the failed expanded form, possibly
# with a simplified solution
val = simplify(f.subs(sol))
if flags.get('force', True):
val, reps = posify(val)
# expansion may work now, so try again and check
exval = _mexpand(val, recursive=True)
if exval.is_number:
# we can decide now
val = exval
else:
# if there are no radicals and no functions then this can't be
# zero anymore -- can it?
pot = preorder_traversal(expand_mul(val))
seen = set()
saw_pow_func = False
for p in pot:
if p in seen:
continue
seen.add(p)
if p.is_Pow and not p.exp.is_Integer:
saw_pow_func = True
elif p.is_Function:
saw_pow_func = True
elif isinstance(p, UndefinedFunction):
saw_pow_func = True
if saw_pow_func:
break
if saw_pow_func is False:
return False
if flags.get('force', True):
# don't do a zero check with the positive assumptions in place
val = val.subs(reps)
nz = fuzzy_not(val.is_zero)
if nz is not None:
# issue 5673: nz may be True even when False
# so these are just hacks to keep a false positive
# from being returned
# HACK 1: LambertW (issue 5673)
if val.is_number and val.has(LambertW):
# don't eval this to verify solution since if we got here,
# numerical must be False
return None
# add other HACKs here if necessary, otherwise we assume
# the nz value is correct
return not nz
break
if val == was:
continue
elif val.is_Rational:
return val == 0
if numerical and val.is_number:
return (abs(val.n(18).n(12, chop=True)) < 1e-9) is S.true
was = val
if flags.get('warn', False):
warnings.warn("\n\tWarning: could not verify solution %s." % sol)
# returns None if it can't conclude
# TODO: improve solution testing
def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* When undetermined coefficients are identified:
* That are linear:
>>> solve((a + b)*x - b + 2, a, b)
{a: -2, b: 2}
* That are nonlinear:
>>> solve((a + b)*x - b**2 + 2, a, b, set=True)
([a, b], {(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))})
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
elimate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to a linear
system with as many zeros as possible; this is very expensive.
quick=True (default is False)
When using particular=True, use a fast heuristic to find a
solution with many zeros (instead of using the very slow method
guaranteed to find the largest number of zeros possible).
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
ordered_symbols = (symbols and
symbols[0] and
(isinstance(symbols[0], Symbol) or
is_sequence(symbols[0],
include=GeneratorType)
)
)
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
symbols = list(symbols)
ordered_symbols = False
elif len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
(len(w.free_symbols & set(symbols)) > 0), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
for i, fi in enumerate(f):
f[i] = fi.xreplace({s: rhs})
f.append(s - rhs)
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
symbols = list(uniq(symbols))
if not ordered_symbols:
# we do this to make the results returned canonical in case f
# contains a system of nonlinear equations; all other cases should
# be unambiguous
symbols = sorted(symbols, key=default_sort_key)
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif not p in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
for i in range(1):
if isinstance(solution, dict):
solution = _do_dict(solution)
break
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
break
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# undo the dictionary solutions returned when the system was only partially
# solved with poly-system if all symbols are present
if (
not flags.get('dict', False) and
solution and
ordered_symbols and
not isinstance(solution, dict) and
all(isinstance(sol, dict) for sol in solution)
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if not as_set and isinstance(solution, list):
# Make sure that a list of solutions is ordered in a canonical way.
solution.sort(key=default_sort_key)
if not as_dict and not as_set:
return solution or []
# return a list of mappings or []
if not solution:
solution = []
else:
if isinstance(solution, dict):
solution = [solution]
elif iterable(solution[0]):
solution = [dict(list(zip(symbols, s))) for s in solution]
elif isinstance(solution[0], dict):
pass
else:
if len(symbols) != 1:
raise ValueError("Length should be 1")
solution = [{symbols[0]: s} for s in solution]
if as_dict:
return solution
assert as_set
if not solution:
return [], set()
k = list(ordered(solution[0].keys()))
return k, {tuple([s[ki] for ki in k]) for s in solution}
def _solve(f, *symbols, **flags):
"""
Return a checked solution for *f* in terms of one or more of the
symbols. A list should be returned except for the case when a linear
undetermined-coefficients equation is encountered (in which case
a dictionary is returned).
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised.
"""
not_impl_msg = "No algorithms are implemented to solve equation %s"
if len(symbols) != 1:
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
if len(ex) == 1:
ex = ex.pop()
try:
# soln may come back as dict, list of dicts or tuples, or
# tuple of symbol list and set of solution tuples
soln = solve_undetermined_coeffs(f, symbols, ex, **flags)
except NotImplementedError:
pass
if soln:
if flags.get('simplify', True):
if isinstance(soln, dict):
for k in soln:
soln[k] = simplify(soln[k])
elif isinstance(soln, list):
if isinstance(soln[0], dict):
for d in soln:
for k in d:
d[k] = simplify(d[k])
elif isinstance(soln[0], tuple):
soln = [tuple(simplify(i) for i in j) for j in soln]
else:
raise TypeError('unrecognized args in list')
elif isinstance(soln, tuple):
sym, sols = soln
soln = sym, {tuple(simplify(i) for i in j) for j in sols}
else:
raise TypeError('unrecognized solution type')
return soln
# find first successful solution
failed = []
got_s = set()
result = []
for s in symbols:
xi, v = solve_linear(f, symbols=[s])
if xi == s:
# no need to check but we should simplify if desired
if flags.get('simplify', True):
v = simplify(v)
vfree = v.free_symbols
if got_s and any(ss in vfree for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
got_s.add(xi)
result.append({xi: v})
elif xi: # there might be a non-linear solution if xi is not 0
failed.append(s)
if not failed:
return result
for s in failed:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
else:
raise NotImplementedError(not_impl_msg % f)
symbol = symbols[0]
#expand binomials only if it has the unknown symbol
f = f.replace(lambda e: isinstance(e, binomial) and e.has(symbol),
lambda e: expand_func(e))
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
flags['check'] = checkdens = check = flags.pop('check', True)
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
if m in {S.NegativeInfinity, S.ComplexInfinity, S.Infinity}:
result = set()
break
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = flags.get('_denominators', _simple_dens(f, symbols))
result = [s for s in result if
not any(checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end; solutions for each
# factor were already checked and simplified
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for i, (expr, cond) in enumerate(f.args):
if expr.is_zero:
raise NotImplementedError(
'solve cannot represent interval solutions')
candidates = _solve(expr, symbol, **flags)
# the explicit condition for this expr is the current cond
# and none of the previous conditions
args = [~c for _, c in f.args[:i]] + [cond]
cond = And(*args)
for candidate in candidates:
if candidate in result:
# an unconditional value was already there
continue
try:
v = cond.subs(symbol, candidate)
_eval_simplify = getattr(v, '_eval_simplify', None)
if _eval_simplify is not None:
# unconditionally take the simpification of v
v = _eval_simplify(ratio=2, measure=lambda x: 1)
except TypeError:
# incompatible type with condition(s)
continue
if v == False:
continue
if v == True:
result.add(candidate)
else:
result.add(Piecewise(
(candidate, v),
(S.NaN, True)))
# set flags for quick exit at end; solutions for each
# piece were already checked and simplified
check = False
flags['simplify'] = False
else:
# first see if it really depends on symbol and whether there
# is only a linear solution
f_num, sol = solve_linear(f, symbols=symbols)
if f_num.is_zero or sol is S.NaN:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
poly = None
# check for a single Add generator
if not f_num.is_Add:
add_args = [i for i in f_num.atoms(Add)
if symbol in i.free_symbols]
if len(add_args) == 1:
gen = add_args[0]
spart = gen.as_independent(symbol)[1].as_base_exp()[0]
if spart == symbol:
try:
poly = Poly(f_num, spart)
except PolynomialError:
pass
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
try:
if poly is None:
poly = Poly(f_num)
if poly is None:
raise ValueError('could not convert %s to Poly' % f_num)
except GeneratorsNeeded:
simplified_f = simplify(f_num)
if simplified_f != f_num:
return _solve(simplified_f, symbol, **flags)
raise ValueError('expression appears to be a constant')
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.q
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c is not S.One: # c could be a Float
return b**ee, c.q
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = {b for b in bases if b.is_Function}
trig = {_ for _ in funcs if
isinstance(_, TrigonometricFunction)}
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = None
if f_num.is_Add and len(f_num.args) == 2:
# check for sin(x)**p = cos(x)**p
_args = f_num.args
t = a, b = [i.atoms(Function).intersection(
trig) for i in _args]
if all(len(i) == 1 for i in t):
a, b = [i.pop() for i in t]
if isinstance(a, cos):
a, b = b, a
_args = _args[::-1]
if isinstance(a, sin) and isinstance(b, cos
) and a.args[0] == b.args[0]:
# sin(x) + cos(x) = 0 -> tan(x) + 1 = 0
newf, _d = (TR2i(_args[0]/_args[1]) + 1
).as_numer_denom()
if not _d.is_Number:
newf = None
if newf is None:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
# don't check the rewritten form --check
# solutions in the un-rewritten form below
flags['check'] = False
result = _solve(newf, symbol, **flags)
flags['check'] = check
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
result = list(ordered(sols))
if result is False:
msg = 'multiple generators %s' % gens
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
if isinstance(u, (Pow, exp)):
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs(exp(x),y) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs(exp(x),y) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(
lambda w: w.is_Pow or isinstance(w, exp),
_expand).subs(u, t)
if not ftry.has(symbol):
soln = _solve(ftry, t, **flags)
sols = list()
for sol in soln:
for i in inv:
sols.append(i.subs(t, sol))
result = list(ordered(sols))
elif len(gens) == 1:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
# Also use composite=True with f_num since Poly won't update
# poly as documented in issue 8810.
poly = Poly(f_num, gens[0], composite=True)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
soln = None
deg = poly.degree()
flags['tsolve'] = True
solvers = {k: flags.get(k, True) for k in
('cubics', 'quartics', 'quintics')}
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
try:
soln = poly.all_roots()
except NotImplementedError:
if not flags.get('incomplete', True):
raise NotImplementedError(
filldedent('''
Neither high-order multivariate polynomials
nor sorting of EX-domain polynomials is supported.
If you want to see any results, pass keyword incomplete=True to
solve; to see numerical values of roots
for univariate expressions, use nroots.
'''))
else:
pass
else:
soln = list(soln.keys())
if soln is not None:
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered({i.subs(t, s) for i in iv for s in soln}))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
# try unrad
if flags.pop('_unrad', True):
try:
u = unrad(f_num, symbol)
except (ValueError, NotImplementedError):
u = False
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = {inv.subs(isym, xi) for xi in _solve(eq, isym, **flags)}
else:
try:
rv = set(_solve(eq, symbol, **flags))
except NotImplementedError:
rv = None
if rv is not None:
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
else:
pass # for coverage
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = _simple_dens(f, symbols)
result = [s for s in result if
not any(checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result
def _solve_system(exprs, symbols, **flags):
if not exprs:
return []
if flags.pop('_split', True):
# Split the system into connected components
V = exprs
symsset = set(symbols)
exprsyms = {e: e.free_symbols & symsset for e in exprs}
E = []
sym_indices = {sym: i for i, sym in enumerate(symbols)}
for n, e1 in enumerate(exprs):
for e2 in exprs[:n]:
# Equations are connected if they share a symbol
if exprsyms[e1] & exprsyms[e2]:
E.append((e1, e2))
G = V, E
subexprs = connected_components(G)
if len(subexprs) > 1:
subsols = []
for subexpr in subexprs:
subsyms = set()
for e in subexpr:
subsyms |= exprsyms[e]
subsyms = list(sorted(subsyms, key = lambda x: sym_indices[x]))
flags['_split'] = False # skip split step
subsol = _solve_system(subexpr, subsyms, **flags)
if not isinstance(subsol, list):
subsol = [subsol]
subsols.append(subsol)
# Full solution is cartesion product of subsystems
sols = []
for soldicts in product(*subsols):
sols.append(dict(item for sd in soldicts
for item in sd.items()))
# Return a list with one dict as just the dict
if len(sols) == 1:
return sols[0]
return sols
polys = []
dens = set()
failed = []
result = False
linear = False
manual = flags.get('manual', False)
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(_simple_dens(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
g = g.as_numer_denom()[0]
if manual:
failed.append(g)
continue
poly = g.as_poly(*symbols, extension=True)
if poly is not None:
polys.append(poly)
else:
failed.append(g)
if not polys:
solved_syms = []
else:
if all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary ({symbols: values}) or None
if flags.pop('particular', False):
result = minsolve_linear_system(matrix, *symbols, **flags)
else:
result = solve_linear_system(matrix, *symbols, **flags)
if failed:
if result:
solved_syms = list(result.keys())
else:
solved_syms = []
else:
linear = True
else:
if len(symbols) > len(polys):
free = set().union(*[p.free_symbols for p in polys])
free = list(ordered(free.intersection(symbols)))
got_s = set()
result = []
for syms in subsets(free, len(polys)):
try:
# returns [] or list of tuples of solutions for syms
res = solve_poly_system(polys, *syms)
if res:
for r in res:
skip = False
for r1 in r:
if got_s and any(ss in r1.free_symbols
for ss in got_s):
# sol depends on previously
# solved symbols: discard it
skip = True
if not skip:
got_s.update(syms)
result.extend([dict(list(zip(syms, r)))])
except NotImplementedError:
pass
if got_s:
solved_syms = list(got_s)
else:
raise NotImplementedError('no valid subset found')
else:
try:
result = solve_poly_system(polys, *symbols)
if result:
solved_syms = symbols
# we don't know here if the symbols provided
# were given or not, so let solve resolve that.
# A list of dictionaries is going to always be
# returned from here.
result = [dict(list(zip(solved_syms, r))) for r in result]
except NotImplementedError:
failed.extend([g.as_expr() for g in polys])
solved_syms = []
result = None
if result:
if isinstance(result, dict):
result = [result]
else:
result = [{}]
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = (e.free_symbols - solved_syms) & legal
# Solve first for symbols that have lower degree in the equation.
# Ideally we want to solve firstly for symbols that appear linearly
# with rational coefficients e.g. if e = x*y + z then we should
# solve for z first.
def key(sym):
ep = e.as_poly(sym)
if ep is None:
complexity = (S.Infinity, S.Infinity, S.Infinity)
else:
coeff_syms = ep.LC().free_symbols
complexity = (ep.degree(), len(coeff_syms & rv), len(coeff_syms))
return complexity + (default_sort_key(sym),)
if sort:
rv = sorted(rv, key=key)
return rv
solved_syms = set(solved_syms) # set of symbols we have solved for
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
u = Dummy() # used in solution checking
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, u, eq2, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
if r:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
try:
soln = _solve(eq2, s, **flags)
except NotImplementedError:
continue
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s in being added in-place
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs(s, sol)
# and add this new solution
rnew[s] = sol
# check that it is independent of previous solutions
iset = set(rnew.items())
for i in newresult:
if len(i) < len(iset) and not set(i.items()) - iset:
# this is a superset of a known solution that
# is smaller
break
else:
# keep it
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit:
raise NotImplementedError('could not solve %s' % eq2)
else:
result = newresult
for b in bad_results:
if b in result:
result.remove(b)
default_simplify = bool(failed) # rely on system-solvers to simplify
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and not linear:
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
result = [r for r in result if r]
if linear and result:
result = result[0]
return result
def solve_linear(lhs, rhs=0, symbols=[], exclude=[]):
r"""
Return a tuple derived from ``f = lhs - rhs`` that is one of
the following: ``(0, 1)``, ``(0, 0)``, ``(symbol, solution)``, ``(n, d)``.
Explanation
===========
``(0, 1)`` meaning that ``f`` is independent of the symbols in *symbols*
that are not in *exclude*.
``(0, 0)`` meaning that there is no solution to the equation amongst the
symbols given. If the first element of the tuple is not zero, then the
function is guaranteed to be dependent on a symbol in *symbols*.
``(symbol, solution)`` where symbol appears linearly in the numerator of
``f``, is in *symbols* (if given), and is not in *exclude* (if given). No
simplification is done to ``f`` other than a ``mul=True`` expansion, so the
solution will correspond strictly to a unique solution.
``(n, d)`` where ``n`` and ``d`` are the numerator and denominator of ``f``
when the numerator was not linear in any symbol of interest; ``n`` will
never be a symbol unless a solution for that symbol was found (in which case
the second element is the solution, not the denominator).
Examples
========
>>> from sympy import cancel, Pow
``f`` is independent of the symbols in *symbols* that are not in
*exclude*:
>>> from sympy import cos, sin, solve_linear
>>> from sympy.abc import x, y, z
>>> eq = y*cos(x)**2 + y*sin(x)**2 - y # = y*(1 - 1) = 0
>>> solve_linear(eq)
(0, 1)
>>> eq = cos(x)**2 + sin(x)**2 # = 1
>>> solve_linear(eq)
(0, 1)
>>> solve_linear(x, exclude=[x])
(0, 1)
The variable ``x`` appears as a linear variable in each of the
following:
>>> solve_linear(x + y**2)
(x, -y**2)
>>> solve_linear(1/x - y**2)
(x, y**(-2))
When not linear in ``x`` or ``y`` then the numerator and denominator are
returned:
>>> solve_linear(x**2/y**2 - 3)
(x**2 - 3*y**2, y**2)
If the numerator of the expression is a symbol, then ``(0, 0)`` is
returned if the solution for that symbol would have set any
denominator to 0:
>>> eq = 1/(1/x - 2)
>>> eq.as_numer_denom()
(x, 1 - 2*x)
>>> solve_linear(eq)
(0, 0)
But automatic rewriting may cause a symbol in the denominator to
appear in the numerator so a solution will be returned:
>>> (1/x)**-1
x
>>> solve_linear((1/x)**-1)
(x, 0)
Use an unevaluated expression to avoid this:
>>> solve_linear(Pow(1/x, -1, evaluate=False))
(0, 0)
If ``x`` is allowed to cancel in the following expression, then it
appears to be linear in ``x``, but this sort of cancellation is not
done by ``solve_linear`` so the solution will always satisfy the
original expression without causing a division by zero error.
>>> eq = x**2*(1/x - z**2/x)
>>> solve_linear(cancel(eq))
(x, 0)
>>> solve_linear(eq)
(x**2*(1 - z**2), x)
A list of symbols for which a solution is desired may be given:
>>> solve_linear(x + y + z, symbols=[y])
(y, -x - z)
A list of symbols to ignore may also be given:
>>> solve_linear(x + y + z, exclude=[x])
(y, -x - z)
(A solution for ``y`` is obtained because it is the first variable
from the canonically sorted list of symbols that had a linear
solution.)
"""
if isinstance(lhs, Eq):
if rhs:
raise ValueError(filldedent('''
If lhs is an Equality, rhs must be 0 but was %s''' % rhs))
rhs = lhs.rhs
lhs = lhs.lhs
dens = None
eq = lhs - rhs
n, d = eq.as_numer_denom()
if not n:
return S.Zero, S.One
free = n.free_symbols
if not symbols:
symbols = free
else:
bad = [s for s in symbols if not s.is_Symbol]
if bad:
if len(bad) == 1:
bad = bad[0]
if len(symbols) == 1:
eg = 'solve(%s, %s)' % (eq, symbols[0])
else:
eg = 'solve(%s, *%s)' % (eq, list(symbols))
raise ValueError(filldedent('''
solve_linear only handles symbols, not %s. To isolate
non-symbols use solve, e.g. >>> %s <<<.
''' % (bad, eg)))
symbols = free.intersection(symbols)
symbols = symbols.difference(exclude)
if not symbols:
return S.Zero, S.One
from sympy.integrals.integrals import Integral
# derivatives are easy to do but tricky to analyze to see if they
# are going to disallow a linear solution, so for simplicity we
# just evaluate the ones that have the symbols of interest
derivs = defaultdict(list)
for der in n.atoms(Derivative):
csym = der.free_symbols & symbols
for c in csym:
derivs[c].append(der)
all_zero = True
for xi in sorted(symbols, key=default_sort_key): # canonical order
# if there are derivatives in this var, calculate them now
if isinstance(derivs[xi], list):
derivs[xi] = {der: der.doit() for der in derivs[xi]}
newn = n.subs(derivs[xi])
dnewn_dxi = newn.diff(xi)
# dnewn_dxi can be nonzero if it survives differentation by any
# of its free symbols
free = dnewn_dxi.free_symbols
if dnewn_dxi and (not free or any(dnewn_dxi.diff(s) for s in free) or free == symbols):
all_zero = False
if dnewn_dxi is S.NaN:
break
if xi not in dnewn_dxi.free_symbols:
vi = -1/dnewn_dxi*(newn.subs(xi, 0))
if dens is None:
dens = _simple_dens(eq, symbols)
if not any(checksol(di, {xi: vi}, minimal=True) is True
for di in dens):
# simplify any trivial integral
irep = [(i, i.doit()) for i in vi.atoms(Integral) if
i.function.is_number]
# do a slight bit of simplification
vi = expand_mul(vi.subs(irep))
return xi, vi
if all_zero:
return S.Zero, S.One
if n.is_Symbol: # no solution for this symbol was found
return S.Zero, S.Zero
return n, d
def minsolve_linear_system(system, *symbols, **flags):
r"""
Find a particular solution to a linear system.
Explanation
===========
In particular, try to find a solution with the minimal possible number
of non-zero variables using a naive algorithm with exponential complexity.
If ``quick=True``, a heuristic is used.
"""
quick = flags.get('quick', False)
# Check if there are any non-zero solutions at all
s0 = solve_linear_system(system, *symbols, **flags)
if not s0 or all(v == 0 for v in s0.values()):
return s0
if quick:
# We just solve the system and try to heuristically find a nice
# solution.
s = solve_linear_system(system, *symbols)
def update(determined, solution):
delete = []
for k, v in solution.items():
solution[k] = v.subs(determined)
if not solution[k].free_symbols:
delete.append(k)
determined[k] = solution[k]
for k in delete:
del solution[k]
determined = {}
update(determined, s)
while s:
# NOTE sort by default_sort_key to get deterministic result
k = max((k for k in s.values()),
key=lambda x: (len(x.free_symbols), default_sort_key(x)))
x = max(k.free_symbols, key=default_sort_key)
if len(k.free_symbols) != 1:
determined[x] = S.Zero
else:
val = solve(k)[0]
if val == 0 and all(v.subs(x, val) == 0 for v in s.values()):
determined[x] = S.One
else:
determined[x] = val
update(determined, s)
return determined
else:
# We try to select n variables which we want to be non-zero.
# All others will be assumed zero. We try to solve the modified system.
# If there is a non-trivial solution, just set the free variables to
# one. If we do this for increasing n, trying all combinations of
# variables, we will find an optimal solution.
# We speed up slightly by starting at one less than the number of
# variables the quick method manages.
from itertools import combinations
N = len(symbols)
bestsol = minsolve_linear_system(system, *symbols, quick=True)
n0 = len([x for x in bestsol.values() if x != 0])
for n in range(n0 - 1, 1, -1):
debug('minsolve: %s' % n)
thissol = None
for nonzeros in combinations(list(range(N)), n):
subm = Matrix([system.col(i).T for i in nonzeros] + [system.col(-1).T]).T
s = solve_linear_system(subm, *[symbols[i] for i in nonzeros])
if s and not all(v == 0 for v in s.values()):
subs = [(symbols[v], S.One) for v in nonzeros]
for k, v in s.items():
s[k] = v.subs(subs)
for sym in symbols:
if sym not in s:
if symbols.index(sym) in nonzeros:
s[sym] = S.One
else:
s[sym] = S.Zero
thissol = s
break
if thissol is None:
break
bestsol = thissol
return bestsol
def solve_linear_system(system, *symbols, **flags):
r"""
Solve system of $N$ linear equations with $M$ variables, which means
both under- and overdetermined systems are supported.
Explanation
===========
The possible number of solutions is zero, one, or infinite. Respectively,
this procedure will return None or a dictionary with solutions. In the
case of underdetermined systems, all arbitrary parameters are skipped.
This may cause a situation in which an empty dictionary is returned.
In that case, all symbols can be assigned arbitrary values.
Input to this function is a $N\times M + 1$ matrix, which means it has
to be in augmented form. If you prefer to enter $N$ equations and $M$
unknowns then use ``solve(Neqs, *Msymbols)`` instead. Note: a local
copy of the matrix is made by this routine so the matrix that is
passed will not be modified.
The algorithm used here is fraction-free Gaussian elimination,
which results, after elimination, in an upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
Examples
========
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system::
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
A degenerate system returns an empty dictionary:
>>> system = Matrix(( (0,0,0), (0,0,0) ))
>>> solve_linear_system(system, x, y)
{}
"""
assert system.shape[1] == len(symbols) + 1
# This is just a wrapper for solve_lin_sys
eqs = list(system * Matrix(symbols + (-1,)))
eqs, ring = sympy_eqs_to_ring(eqs, symbols)
sol = solve_lin_sys(eqs, ring, _raw=False)
if sol is not None:
sol = {sym:val for sym, val in sol.items() if sym != val}
return sol
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
r"""
Solve equation of a type $p(x; a_1, \ldots, a_k) = q(x)$ where both
$p$ and $q$ are univariate polynomials that depend on $k$ parameters.
Explanation
===========
The result of this function is a dictionary with symbolic values of those
parameters with respect to coefficients in $q$.
This function accepts both equations class instances and ordinary
SymPy expressions. Specification of parameters and variables is
obligatory for efficiency and simplicity reasons.
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import a, b, c, x
>>> from sympy.solvers import solve_undetermined_coeffs
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Eq):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
equ = cancel(equ).as_numer_denom()[0]
system = list(collect(equ.expand(), sym, evaluate=False).values())
if not any(equ.has(sym) for equ in system):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
"""
Solves the augmented matrix system using ``LUsolve`` and returns a
dictionary in which solutions are keyed to the symbols of *syms* as ordered.
Explanation
===========
The matrix must be invertible.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.solvers import solve_linear_system_LU
>>> solve_linear_system_LU(Matrix([
... [1, 2, 0, 1],
... [3, 2, 2, 1],
... [2, 0, 0, 1]]), [x, y, z])
{x: 1/2, y: 1/4, z: -1/2}
See Also
========
LUsolve
"""
if matrix.rows != matrix.cols - 1:
raise ValueError("Rows should be equal to columns - 1")
A = matrix[:matrix.rows, :matrix.rows]
b = matrix[:, matrix.cols - 1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i, 0]
return solutions
def det_perm(M):
"""
Return the determinant of *M* by using permutations to select factors.
Explanation
===========
For sizes larger than 8 the number of permutations becomes prohibitively
large, or if there are no symbols in the matrix, it is better to use the
standard determinant routines (e.g., ``M.det()``.)
See Also
========
det_minor
det_quick
"""
args = []
s = True
n = M.rows
list_ = M.flat()
for perm in generate_bell(n):
fac = []
idx = 0
for j in perm:
fac.append(list_[idx + j])
idx += n
term = Mul(*fac) # disaster with unevaluated Mul -- takes forever for n=7
args.append(term if s else -term)
s = not s
return Add(*args)
def det_minor(M):
"""
Return the ``det(M)`` computed from minors without
introducing new nesting in products.
See Also
========
det_perm
det_quick
"""
n = M.rows
if n == 2:
return M[0, 0]*M[1, 1] - M[1, 0]*M[0, 1]
else:
return sum([(1, -1)[i % 2]*Add(*[M[0, i]*d for d in
Add.make_args(det_minor(M.minor_submatrix(0, i)))])
if M[0, i] else S.Zero for i in range(n)])
def det_quick(M, method=None):
"""
Return ``det(M)`` assuming that either
there are lots of zeros or the size of the matrix
is small. If this assumption is not met, then the normal
Matrix.det function will be used with method = ``method``.
See Also
========
det_minor
det_perm
"""
if any(i.has(Symbol) for i in M):
if M.rows < 8 and all(i.has(Symbol) for i in M):
return det_perm(M)
return det_minor(M)
else:
return M.det(method=method) if method else M.det()
def inv_quick(M):
"""Return the inverse of ``M``, assuming that either
there are lots of zeros or the size of the matrix
is small.
"""
if not all(i.is_Number for i in M):
if not any(i.is_Number for i in M):
det = lambda _: det_perm(_)
else:
det = lambda _: det_minor(_)
else:
return M.inv()
n = M.rows
d = det(M)
if d == S.Zero:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible")
ret = zeros(n)
s1 = -1
for i in range(n):
s = s1 = -s1
for j in range(n):
di = det(M.minor_submatrix(i, j))
ret[j, i] = s*di/d
s = -s
return ret
# these are functions that have multiple inverse values per period
multi_inverses = {
sin: lambda x: (asin(x), S.Pi - asin(x)),
cos: lambda x: (acos(x), 2*S.Pi - acos(x)),
}
def _tsolve(eq, sym, **flags):
"""
Helper for ``_solve`` that solves a transcendental equation with respect
to the given symbol. Various equations containing powers and logarithms,
can be solved.
There is currently no guarantee that all solutions will be returned or
that a real solution will be favored over a complex one.
Either a list of potential solutions will be returned or None will be
returned (in the case that no method was known to get a solution
for the equation). All other errors (like the inability to cast an
expression as a Poly) are unhandled.
Examples
========
>>> from sympy import log
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy.abc import x
>>> tsolve(3**(2*x + 5) - 4, x)
[-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if 'tsolve_saw' not in flags:
flags['tsolve_saw'] = []
if eq in flags['tsolve_saw']:
return None
else:
flags['tsolve_saw'].append(eq)
rhs, lhs = _invert(eq, sym)
if lhs == sym:
return [rhs]
try:
if lhs.is_Add:
# it's time to try factoring; powdenest is used
# to try get powers in standard form for better factoring
f = factor(powdenest(lhs - rhs))
if f.is_Mul:
return _solve(f, sym, **flags)
if rhs:
f = logcombine(lhs, force=flags.get('force', True))
if f.count(log) != lhs.count(log):
if isinstance(f, log):
return _solve(f.args[0] - exp(rhs), sym, **flags)
return _tsolve(f - rhs, sym, **flags)
elif lhs.is_Pow:
if lhs.exp.is_Integer:
if lhs - rhs != eq:
return _solve(lhs - rhs, sym, **flags)
if sym not in lhs.exp.free_symbols:
return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags)
# _tsolve calls this with Dummy before passing the actual number in.
if any(t.is_Dummy for t in rhs.free_symbols):
raise NotImplementedError # _tsolve will call here again...
# a ** g(x) == 0
if not rhs:
# f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at
# the same place
sol_base = _solve(lhs.base, sym, **flags)
return [s for s in sol_base if lhs.exp.subs(sym, s) != 0]
# a ** g(x) == b
if not lhs.base.has(sym):
if lhs.base == 0:
return _solve(lhs.exp, sym, **flags) if rhs != 0 else []
# Gets most solutions...
if lhs.base == rhs.as_base_exp()[0]:
# handles case when bases are equal
sol = _solve(lhs.exp - rhs.as_base_exp()[1], sym, **flags)
else:
# handles cases when bases are not equal and exp
# may or may not be equal
sol = _solve(exp(log(lhs.base)*lhs.exp)-exp(log(rhs)), sym, **flags)
# Check for duplicate solutions
def equal(expr1, expr2):
_ = Dummy()
eq = checksol(expr1 - _, _, expr2)
if eq is None:
if nsimplify(expr1) != nsimplify(expr2):
return False
# they might be coincidentally the same
# so check more rigorously
eq = expr1.equals(expr2)
return eq
# Guess a rational exponent
e_rat = nsimplify(log(abs(rhs))/log(abs(lhs.base)))
e_rat = simplify(posify(e_rat)[0])
n, d = fraction(e_rat)
if expand(lhs.base**n - rhs**d) == 0:
sol = [s for s in sol if not equal(lhs.exp.subs(sym, s), e_rat)]
sol.extend(_solve(lhs.exp - e_rat, sym, **flags))
return list(ordered(set(sol)))
# f(x) ** g(x) == c
else:
sol = []
logform = lhs.exp*log(lhs.base) - log(rhs)
if logform != lhs - rhs:
try:
sol.extend(_solve(logform, sym, **flags))
except NotImplementedError:
pass
# Collect possible solutions and check with substitution later.
check = []
if rhs == 1:
# f(x) ** g(x) = 1 -- g(x)=0 or f(x)=+-1
check.extend(_solve(lhs.exp, sym, **flags))
check.extend(_solve(lhs.base - 1, sym, **flags))
check.extend(_solve(lhs.base + 1, sym, **flags))
elif rhs.is_Rational:
for d in (i for i in divisors(abs(rhs.p)) if i != 1):
e, t = integer_log(rhs.p, d)
if not t:
continue # rhs.p != d**b
for s in divisors(abs(rhs.q)):
if s**e== rhs.q:
r = Rational(d, s)
check.extend(_solve(lhs.base - r, sym, **flags))
check.extend(_solve(lhs.base + r, sym, **flags))
check.extend(_solve(lhs.exp - e, sym, **flags))
elif rhs.is_irrational:
b_l, e_l = lhs.base.as_base_exp()
n, d = (e_l*lhs.exp).as_numer_denom()
b, e = sqrtdenest(rhs).as_base_exp()
check = [sqrtdenest(i) for i in (_solve(lhs.base - b, sym, **flags))]
check.extend([sqrtdenest(i) for i in (_solve(lhs.exp - e, sym, **flags))])
if e_l*d != 1:
check.extend(_solve(b_l**n - rhs**(e_l*d), sym, **flags))
for s in check:
ok = checksol(eq, sym, s)
if ok is None:
ok = eq.subs(sym, s).equals(0)
if ok:
sol.append(s)
return list(ordered(set(sol)))
elif lhs.is_Function and len(lhs.args) == 1:
if lhs.func in multi_inverses:
# sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3))
soln = []
for i in multi_inverses[lhs.func](rhs):
soln.extend(_solve(lhs.args[0] - i, sym, **flags))
return list(ordered(soln))
elif lhs.func == LambertW:
return _solve(lhs.args[0] - rhs*exp(rhs), sym, **flags)
rewrite = lhs.rewrite(exp)
if rewrite != lhs:
return _solve(rewrite - rhs, sym, **flags)
except NotImplementedError:
pass
# maybe it is a lambert pattern
if flags.pop('bivariate', True):
# lambert forms may need some help being recognized, e.g. changing
# 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1
# to 2**(3*x) + (x*log(2) + 1)**3
g = _filtered_gens(eq.as_poly(), sym)
up_or_log = set()
for gi in g:
if isinstance(gi, (exp, log)) or (gi.is_Pow and gi.base == S.Exp1):
up_or_log.add(gi)
elif gi.is_Pow:
gisimp = powdenest(expand_power_exp(gi))
if gisimp.is_Pow and sym in gisimp.exp.free_symbols:
up_or_log.add(gi)
eq_down = expand_log(expand_power_exp(eq)).subs(
dict(list(zip(up_or_log, [0]*len(up_or_log)))))
eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down))
rhs, lhs = _invert(eq, sym)
if lhs.has(sym):
try:
poly = lhs.as_poly()
g = _filtered_gens(poly, sym)
_eq = lhs - rhs
sols = _solve_lambert(_eq, sym, g)
# use a simplified form if it satisfies eq
# and has fewer operations
for n, s in enumerate(sols):
ns = nsimplify(s)
if ns != s and ns.count_ops() <= s.count_ops():
ok = checksol(_eq, sym, ns)
if ok is None:
ok = _eq.subs(sym, ns).equals(0)
if ok:
sols[n] = ns
return sols
except NotImplementedError:
# maybe it's a convoluted function
if len(g) == 2:
try:
gpu = bivariate_type(lhs - rhs, *g)
if gpu is None:
raise NotImplementedError
g, p, u = gpu
flags['bivariate'] = False
inversion = _tsolve(g - u, sym, **flags)
if inversion:
sol = _solve(p, u, **flags)
return list(ordered({i.subs(u, s)
for i in inversion for s in sol}))
except NotImplementedError:
pass
else:
pass
if flags.pop('force', True):
flags['force'] = False
pos, reps = posify(lhs - rhs)
if rhs == S.ComplexInfinity:
return []
for u, s in reps.items():
if s == sym:
break
else:
u = sym
if pos.has(u):
try:
soln = _solve(pos, u, **flags)
return list(ordered([s.subs(reps) for s in soln]))
except NotImplementedError:
pass
else:
pass # here for coverage
return # here for coverage
# TODO: option for calculating J numerically
@conserve_mpmath_dps
def nsolve(*args, dict=False, **kwargs):
r"""
Solve a nonlinear equation system numerically: ``nsolve(f, [args,] x0,
modules=['mpmath'], **kwargs)``.
Explanation
===========
``f`` is a vector function of symbolic expressions representing the system.
*args* are the variables. If there is only one variable, this argument can
be omitted. ``x0`` is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to
evaluate the function and the Jacobian matrix. Make sure to use a module
that supports matrices. For more information on the syntax, please see the
docstring of ``lambdify``.
If the keyword arguments contain ``dict=True`` (default is False) ``nsolve``
will return a list (perhaps empty) of solution mappings. This might be
especially useful if you want to use ``nsolve`` as a fallback to solve since
using the dict argument for both methods produces return values of
consistent type structure. Please note: to keep this consistent with
``solve``, the solution will be returned in a list even though ``nsolve``
(currently at least) only finds one solution at a time.
Overdetermined systems are supported.
Examples
========
>>> from sympy import Symbol, nsolve
>>> import mpmath
>>> mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print(nsolve((f1, f2), (x1, x2), (-1, 1)))
Matrix([[-1.19287309935246], [1.27844411169911]])
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
To solve with higher precision than the default, use the prec argument:
>>> from sympy import cos
>>> nsolve(cos(x) - x, 1)
0.739085133215161
>>> nsolve(cos(x) - x, 1, prec=50)
0.73908513321516064165531208767387340401341175890076
>>> cos(_)
0.73908513321516064165531208767387340401341175890076
To solve for complex roots of real functions, a nonreal initial point
must be specified:
>>> from sympy import I
>>> nsolve(x**2 + 2, I)
1.4142135623731*I
``mpmath.findroot`` is used and you can find their more extensive
documentation, especially concerning keyword parameters and
available solvers. Note, however, that functions which are very
steep near the root, the verification of the solution may fail. In
this case you should use the flag ``verify=False`` and
independently verify the solution.
>>> from sympy import cos, cosh
>>> f = cos(x)*cosh(x) - 1
>>> nsolve(f, 3.14*100)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1.39267e+230 > 2.1684e-19)
>>> ans = nsolve(f, 3.14*100, verify=False); ans
312.588469032184
>>> f.subs(x, ans).n(2)
2.1e+121
>>> (f/f.diff(x)).subs(x, ans).n(2)
7.4e-15
One might safely skip the verification if bounds of the root are known
and a bisection method is used:
>>> bounds = lambda i: (3.14*i, 3.14*(i + 1))
>>> nsolve(f, bounds(100), solver='bisect', verify=False)
315.730061685774
Alternatively, a function may be better behaved when the
denominator is ignored. Since this is not always the case, however,
the decision of what function to use is left to the discretion of
the user.
>>> eq = x**2/(1 - x)/(1 - 2*x)**2 - 100
>>> nsolve(eq, 0.46)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (10000 > 2.1684e-19)
Try another starting point or tweak arguments.
>>> nsolve(eq.as_numer_denom()[0], 0.46)
0.46792545969349058
"""
# there are several other SymPy functions that use method= so
# guard against that here
if 'method' in kwargs:
raise ValueError(filldedent('''
Keyword "method" should not be used in this context. When using
some mpmath solvers directly, the keyword "method" is
used, but when using nsolve (and findroot) the keyword to use is
"solver".'''))
if 'prec' in kwargs:
import mpmath
mpmath.mp.dps = kwargs.pop('prec')
# keyword argument to return result as a dictionary
as_dict = dict
from builtins import dict # to unhide the builtin
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
if iterable(fargs) and iterable(x0):
if len(x0) != len(fargs):
raise TypeError('nsolve expected exactly %i guess vectors, got %i'
% (len(fargs), len(x0)))
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
if iterable(f):
raise TypeError('nsolve expected 3 arguments, got 2')
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if iterable(f):
f = list(f)
for i, fi in enumerate(f):
if isinstance(fi, Eq):
f[i] = fi.lhs - fi.rhs
f = Matrix(f).T
if iterable(x0):
x0 = list(x0)
if not isinstance(f, Matrix):
# assume it's a SymPy expression
if isinstance(f, Eq):
f = f.lhs - f.rhs
syms = f.free_symbols
if fargs is None:
fargs = syms.copy().pop()
if not (len(syms) == 1 and (fargs in syms or fargs[0] in syms)):
raise ValueError(filldedent('''
expected a one-dimensional and numerical function'''))
# the function is much better behaved if there is no denominator
# but sending the numerator is left to the user since sometimes
# the function is better behaved when the denominator is present
# e.g., issue 11768
f = lambdify(fargs, f, modules)
x = sympify(findroot(f, x0, **kwargs))
if as_dict:
return [{fargs: x}]
return x
if len(fargs) > f.cols:
raise NotImplementedError(filldedent('''
need at least as many equations as variables'''))
verbose = kwargs.get('verbose', False)
if verbose:
print('f(x):')
print(f)
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print('J(x):')
print(J)
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
if as_dict:
return [dict(zip(fargs, [sympify(xi) for xi in x]))]
return Matrix(x)
def _invert(eq, *symbols, **kwargs):
"""
Return tuple (i, d) where ``i`` is independent of *symbols* and ``d``
contains symbols.
Explanation
===========
``i`` and ``d`` are obtained after recursively using algebraic inversion
until an uninvertible ``d`` remains. If there are no free symbols then
``d`` will be zero. Some (but not necessarily all) solutions to the
expression ``i - d`` will be related to the solutions of the original
expression.
Examples
========
>>> from sympy.solvers.solvers import _invert as invert
>>> from sympy import sqrt, cos
>>> from sympy.abc import x, y
>>> invert(x - 3)
(3, x)
>>> invert(3)
(3, 0)
>>> invert(2*cos(x) - 1)
(1/2, cos(x))
>>> invert(sqrt(x) - 3)
(3, sqrt(x))
>>> invert(sqrt(x) + y, x)
(-y, sqrt(x))
>>> invert(sqrt(x) + y, y)
(-sqrt(x), y)
>>> invert(sqrt(x) + y, x, y)
(0, sqrt(x) + y)
If there is more than one symbol in a power's base and the exponent
is not an Integer, then the principal root will be used for the
inversion:
>>> invert(sqrt(x + y) - 2)
(4, x + y)
>>> invert(sqrt(x + y) - 2)
(4, x + y)
If the exponent is an Integer, setting ``integer_power`` to True
will force the principal root to be selected:
>>> invert(x**2 - 4, integer_power=True)
(2, x)
"""
eq = sympify(eq)
if eq.args:
# make sure we are working with flat eq
eq = eq.func(*eq.args)
free = eq.free_symbols
if not symbols:
symbols = free
if not free & set(symbols):
return eq, S.Zero
dointpow = bool(kwargs.get('integer_power', False))
lhs = eq
rhs = S.Zero
while True:
was = lhs
while True:
indep, dep = lhs.as_independent(*symbols)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep.is_zero:
break
lhs = dep
rhs -= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs /= indep
# collect like-terms in symbols
if lhs.is_Add:
terms = {}
for a in lhs.args:
i, d = a.as_independent(*symbols)
terms.setdefault(d, []).append(i)
if any(len(v) > 1 for v in terms.values()):
args = []
for d, i in terms.items():
if len(i) > 1:
args.append(Add(*i)*d)
else:
args.append(i[0]*d)
lhs = Add(*args)
# if it's a two-term Add with rhs = 0 and two powers we can get the
# dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3
if lhs.is_Add and not rhs and len(lhs.args) == 2 and \
not lhs.is_polynomial(*symbols):
a, b = ordered(lhs.args)
ai, ad = a.as_independent(*symbols)
bi, bd = b.as_independent(*symbols)
if any(_ispow(i) for i in (ad, bd)):
a_base, a_exp = ad.as_base_exp()
b_base, b_exp = bd.as_base_exp()
if a_base == b_base:
# a = -b
lhs = powsimp(powdenest(ad/bd))
rhs = -bi/ai
else:
rat = ad/bd
_lhs = powsimp(ad/bd)
if _lhs != rat:
lhs = _lhs
rhs = -bi/ai
elif ai == -bi:
if isinstance(ad, Function) and ad.func == bd.func:
if len(ad.args) == len(bd.args) == 1:
lhs = ad.args[0] - bd.args[0]
elif len(ad.args) == len(bd.args):
# should be able to solve
# f(x, y) - f(2 - x, 0) == 0 -> x == 1
raise NotImplementedError(
'equal function with more than 1 argument')
else:
raise ValueError(
'function with different numbers of args')
elif lhs.is_Mul and any(_ispow(a) for a in lhs.args):
lhs = powsimp(powdenest(lhs))
if lhs.is_Function:
if hasattr(lhs, 'inverse') and lhs.inverse() is not None and len(lhs.args) == 1:
# -1
# f(x) = g -> x = f (g)
#
# /!\ inverse should not be defined if there are multiple values
# for the function -- these are handled in _tsolve
#
rhs = lhs.inverse()(rhs)
lhs = lhs.args[0]
elif isinstance(lhs, atan2):
y, x = lhs.args
lhs = 2*atan(y/(sqrt(x**2 + y**2) + x))
elif lhs.func == rhs.func:
if len(lhs.args) == len(rhs.args) == 1:
lhs = lhs.args[0]
rhs = rhs.args[0]
elif len(lhs.args) == len(rhs.args):
# should be able to solve
# f(x, y) == f(2, 3) -> x == 2
# f(x, x + y) == f(2, 3) -> x == 2
raise NotImplementedError(
'equal function with more than 1 argument')
else:
raise ValueError(
'function with different numbers of args')
if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0:
lhs = 1/lhs
rhs = 1/rhs
# base**a = b -> base = b**(1/a) if
# a is an Integer and dointpow=True (this gives real branch of root)
# a is not an Integer and the equation is multivariate and the
# base has more than 1 symbol in it
# The rationale for this is that right now the multi-system solvers
# doesn't try to resolve generators to see, for example, if the whole
# system is written in terms of sqrt(x + y) so it will just fail, so we
# do that step here.
if lhs.is_Pow and (
lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and
len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1):
rhs = rhs**(1/lhs.exp)
lhs = lhs.base
if lhs == was:
break
return rhs, lhs
def unrad(eq, *syms, **flags):
"""
Remove radicals with symbolic arguments and return (eq, cov),
None, or raise an error.
Explanation
===========
None is returned if there are no radicals to remove.
NotImplementedError is raised if there are radicals and they cannot be
removed or if the relationship between the original symbols and the
change of variable needed to rewrite the system as a polynomial cannot
be solved.
Otherwise the tuple, ``(eq, cov)``, is returned where:
*eq*, ``cov``
*eq* is an equation without radicals (in the symbol(s) of
interest) whose solutions are a superset of the solutions to the
original expression. *eq* might be rewritten in terms of a new
variable; the relationship to the original variables is given by
``cov`` which is a list containing ``v`` and ``v**p - b`` where
``p`` is the power needed to clear the radical and ``b`` is the
radical now expressed as a polynomial in the symbols of interest.
For example, for sqrt(2 - x) the tuple would be
``(c, c**2 - 2 + x)``. The solutions of *eq* will contain
solutions to the original equation (if there are any).
*syms*
An iterable of symbols which, if provided, will limit the focus of
radical removal: only radicals with one or more of the symbols of
interest will be cleared. All free symbols are used if *syms* is not
set.
*flags* are used internally for communication during recursive calls.
Two options are also recognized:
``take``, when defined, is interpreted as a single-argument function
that returns True if a given Pow should be handled.
Radicals can be removed from an expression if:
* All bases of the radicals are the same; a change of variables is
done in this case.
* If all radicals appear in one term of the expression.
* There are only four terms with sqrt() factors or there are less than
four terms having sqrt() factors.
* There are only two terms with radicals.
Examples
========
>>> from sympy.solvers.solvers import unrad
>>> from sympy.abc import x
>>> from sympy import sqrt, Rational, root
>>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
(x**5 - 64, [])
>>> unrad(sqrt(x) + root(x + 1, 3))
(-x**3 + x**2 + 2*x + 1, [])
>>> eq = sqrt(x) + root(x, 3) - 2
>>> unrad(eq)
(_p**3 + _p**2 - 2, [_p, _p**6 - x])
"""
uflags = dict(check=False, simplify=False)
def _cov(p, e):
if cov:
# XXX - uncovered
oldp, olde = cov
if Poly(e, p).degree(p) in (1, 2):
cov[:] = [p, olde.subs(oldp, _solve(e, p, **uflags)[0])]
else:
raise NotImplementedError
else:
cov[:] = [p, e]
def _canonical(eq, cov):
if cov:
# change symbol to vanilla so no solutions are eliminated
p, e = cov
rep = {p: Dummy(p.name)}
eq = eq.xreplace(rep)
cov = [p.xreplace(rep), e.xreplace(rep)]
# remove constants and powers of factors since these don't change
# the location of the root; XXX should factor or factor_terms be used?
eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True)
if eq.is_Mul:
args = []
for f in eq.args:
if f.is_number:
continue
if f.is_Pow:
args.append(f.base)
else:
args.append(f)
eq = Mul(*args) # leave as Mul for more efficient solving
# make the sign canonical
margs = list(Mul.make_args(eq))
changed = False
for i, m in enumerate(margs):
if m.could_extract_minus_sign():
margs[i] = -m
changed = True
if changed:
eq = Mul(*margs, evaluate=False)
return eq, cov
def _Q(pow):
# return leading Rational of denominator of Pow's exponent
c = pow.as_base_exp()[1].as_coeff_Mul()[0]
if not c.is_Rational:
return S.One
return c.q
# define the _take method that will determine whether a term is of interest
def _take(d):
# return True if coefficient of any factor's exponent's den is not 1
for pow in Mul.make_args(d):
if not pow.is_Pow:
continue
if _Q(pow) == 1:
continue
if pow.free_symbols & syms:
return True
return False
_take = flags.setdefault('_take', _take)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs # XXX legacy Eq as Eqn support
elif not isinstance(eq, Expr):
return
cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
sorted(dict(cov=[], n=None, rpt=0).items())]
# preconditioning
eq = powdenest(factor_terms(eq, radical=True, clear=True))
eq = eq.as_numer_denom()[0]
eq = _mexpand(eq, recursive=True)
if eq.is_number:
return
# see if there are radicals in symbols of interest
syms = set(syms) or eq.free_symbols # _take uses this
poly = eq.as_poly()
gens = [g for g in poly.gens if _take(g)]
if not gens:
return
# recast poly in terms of eigen-gens
poly = eq.as_poly(*gens)
# - an exponent has a symbol of interest (don't handle)
if any(g.exp.has(*syms) for g in gens):
return
def _rads_bases_lcm(poly):
# if all the bases are the same or all the radicals are in one
# term, `lcm` will be the lcm of the denominators of the
# exponents of the radicals
lcm = 1
rads = set()
bases = set()
for g in poly.gens:
q = _Q(g)
if q != 1:
rads.add(g)
lcm = ilcm(lcm, q)
bases.add(g.base)
return rads, bases, lcm
rads, bases, lcm = _rads_bases_lcm(poly)
covsym = Dummy('p', nonnegative=True)
# only keep in syms symbols that actually appear in radicals;
# and update gens
newsyms = set()
for r in rads:
newsyms.update(syms & r.free_symbols)
if newsyms != syms:
syms = newsyms
# get terms together that have common generators
drad = dict(list(zip(rads, list(range(len(rads))))))
rterms = {(): []}
args = Add.make_args(poly.as_expr())
for t in args:
if _take(t):
common = set(t.as_poly().gens).intersection(rads)
key = tuple(sorted([drad[i] for i in common]))
else:
key = ()
rterms.setdefault(key, []).append(t)
others = Add(*rterms.pop(()))
rterms = [Add(*rterms[k]) for k in rterms.keys()]
# the output will depend on the order terms are processed, so
# make it canonical quickly
rterms = list(reversed(list(ordered(rterms))))
ok = False # we don't have a solution yet
depth = sqrt_depth(eq)
if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
eq = rterms[0]**lcm - ((-others)**lcm)
ok = True
else:
if len(rterms) == 1 and rterms[0].is_Add:
rterms = list(rterms[0].args)
if len(bases) == 1:
b = bases.pop()
if len(syms) > 1:
x = b.free_symbols
else:
x = syms
x = list(ordered(x))[0]
try:
inv = _solve(covsym**lcm - b, x, **uflags)
if not inv:
raise NotImplementedError
eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
_cov(covsym, covsym**lcm - b)
return _canonical(eq, cov)
except NotImplementedError:
pass
if len(rterms) == 2:
if not others:
eq = rterms[0]**lcm - (-rterms[1])**lcm
ok = True
elif not log(lcm, 2).is_Integer:
# the lcm-is-power-of-two case is handled below
r0, r1 = rterms
if flags.get('_reverse', False):
r1, r0 = r0, r1
i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
for reverse in range(2):
if reverse:
i0, i1 = i1, i0
r0, r1 = r1, r0
_rads1, _, lcm1 = i1
_rads1 = Mul(*_rads1)
t1 = _rads1**lcm1
c = covsym**lcm1 - t1
for x in syms:
try:
sol = _solve(c, x, **uflags)
if not sol:
raise NotImplementedError
neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
others
tmp = unrad(neweq, covsym)
if tmp:
eq, newcov = tmp
if newcov:
newp, newc = newcov
_cov(newp, c.subs(covsym,
_solve(newc, covsym, **uflags)[0]))
else:
_cov(covsym, c)
else:
eq = neweq
_cov(covsym, c)
ok = True
break
except NotImplementedError:
if reverse:
raise NotImplementedError(
'no successful change of variable found')
else:
pass
if ok:
break
elif len(rterms) == 3:
# two cube roots and another with order less than 5
# (so an analytical solution can be found) or a base
# that matches one of the cube root bases
info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
RAD = 0
BASES = 1
LCM = 2
if info[0][LCM] != 3:
info.append(info.pop(0))
rterms.append(rterms.pop(0))
elif info[1][LCM] != 3:
info.append(info.pop(1))
rterms.append(rterms.pop(1))
if info[0][LCM] == info[1][LCM] == 3:
if info[1][BASES] != info[2][BASES]:
info[0], info[1] = info[1], info[0]
rterms[0], rterms[1] = rterms[1], rterms[0]
if info[1][BASES] == info[2][BASES]:
eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
ok = True
elif info[2][LCM] < 5:
# a*root(A, 3) + b*root(B, 3) + others = c
a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
# zz represents the unraded expression into which the
# specifics for this case are substituted
zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
9*c*d**8 + d**9)
def _t(i):
b = Mul(*info[i][RAD])
return cancel(rterms[i]/b), Mul(*info[i][BASES])
aa, AA = _t(0)
bb, BB = _t(1)
cc = -rterms[2]
dd = others
eq = zz.xreplace(dict(zip(
(a, A, b, B, c, d),
(aa, AA, bb, BB, cc, dd))))
ok = True
# handle power-of-2 cases
if not ok:
if log(lcm, 2).is_Integer and (not others and
len(rterms) == 4 or len(rterms) < 4):
def _norm2(a, b):
return a**2 + b**2 + 2*a*b
if len(rterms) == 4:
# (r0+r1)**2 - (r2+r3)**2
r0, r1, r2, r3 = rterms
eq = _norm2(r0, r1) - _norm2(r2, r3)
ok = True
elif len(rterms) == 3:
# (r1+r2)**2 - (r0+others)**2
r0, r1, r2 = rterms
eq = _norm2(r1, r2) - _norm2(r0, others)
ok = True
elif len(rterms) == 2:
# r0**2 - (r1+others)**2
r0, r1 = rterms
eq = r0**2 - _norm2(r1, others)
ok = True
new_depth = sqrt_depth(eq) if ok else depth
rpt += 1 # XXX how many repeats with others unchanging is enough?
if not ok or (
nwas is not None and len(rterms) == nwas and
new_depth is not None and new_depth == depth and
rpt > 3):
raise NotImplementedError('Cannot remove all radicals')
flags.update(dict(cov=cov, n=len(rterms), rpt=rpt))
neq = unrad(eq, *syms, **flags)
if neq:
eq, cov = neq
eq, cov = _canonical(eq, cov)
return eq, cov
# Delayed imports
from sympy.solvers.bivariate import (
bivariate_type, _solve_lambert, _filtered_gens)
|
e5d0e043d8127e3fdd93ddd85728287a12624cef63c92f470e5fdec0c5dcb5a3 | """
Discrete Fourier Transform, Number Theoretic Transform,
Walsh Hadamard Transform, Mobius Transform
"""
from sympy.core import S, Symbol, sympify
from sympy.core.function import expand_mul
from sympy.core.numbers import pi, I
from sympy.functions.elementary.trigonometric import sin, cos
from sympy.ntheory import isprime, primitive_root
from sympy.utilities.iterables import ibin, iterable
from sympy.utilities.misc import as_int
#----------------------------------------------------------------------------#
# #
# Discrete Fourier Transform #
# #
#----------------------------------------------------------------------------#
def _fourier_transform(seq, dps, inverse=False):
"""Utility function for the Discrete Fourier Transform"""
if not iterable(seq):
raise TypeError("Expected a sequence of numeric coefficients "
"for Fourier Transform")
a = [sympify(arg) for arg in seq]
if any(x.has(Symbol) for x in a):
raise ValueError("Expected non-symbolic coefficients")
n = len(a)
if n < 2:
return a
b = n.bit_length() - 1
if n&(n - 1): # not a power of 2
b += 1
n = 2**b
a += [S.Zero]*(n - len(a))
for i in range(1, n):
j = int(ibin(i, b, str=True)[::-1], 2)
if i < j:
a[i], a[j] = a[j], a[i]
ang = -2*pi/n if inverse else 2*pi/n
if dps is not None:
ang = ang.evalf(dps + 2)
w = [cos(ang*i) + I*sin(ang*i) for i in range(n // 2)]
h = 2
while h <= n:
hf, ut = h // 2, n // h
for i in range(0, n, h):
for j in range(hf):
u, v = a[i + j], expand_mul(a[i + j + hf]*w[ut * j])
a[i + j], a[i + j + hf] = u + v, u - v
h *= 2
if inverse:
a = [(x/n).evalf(dps) for x in a] if dps is not None \
else [x/n for x in a]
return a
def fft(seq, dps=None):
r"""
Performs the Discrete Fourier Transform (**DFT**) in the complex domain.
The sequence is automatically padded to the right with zeros, as the
*radix-2 FFT* requires the number of sample points to be a power of 2.
This method should be used with default arguments only for short sequences
as the complexity of expressions increases with the size of the sequence.
Parameters
==========
seq : iterable
The sequence on which **DFT** is to be applied.
dps : Integer
Specifies the number of decimal digits for precision.
Examples
========
>>> from sympy import fft, ifft
>>> fft([1, 2, 3, 4])
[10, -2 - 2*I, -2, -2 + 2*I]
>>> ifft(_)
[1, 2, 3, 4]
>>> ifft([1, 2, 3, 4])
[5/2, -1/2 + I/2, -1/2, -1/2 - I/2]
>>> fft(_)
[1, 2, 3, 4]
>>> ifft([1, 7, 3, 4], dps=15)
[3.75, -0.5 - 0.75*I, -1.75, -0.5 + 0.75*I]
>>> fft(_)
[1.0, 7.0, 3.0, 4.0]
References
==========
.. [1] https://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm
.. [2] http://mathworld.wolfram.com/FastFourierTransform.html
"""
return _fourier_transform(seq, dps=dps)
def ifft(seq, dps=None):
return _fourier_transform(seq, dps=dps, inverse=True)
ifft.__doc__ = fft.__doc__
#----------------------------------------------------------------------------#
# #
# Number Theoretic Transform #
# #
#----------------------------------------------------------------------------#
def _number_theoretic_transform(seq, prime, inverse=False):
"""Utility function for the Number Theoretic Transform"""
if not iterable(seq):
raise TypeError("Expected a sequence of integer coefficients "
"for Number Theoretic Transform")
p = as_int(prime)
if not isprime(p):
raise ValueError("Expected prime modulus for "
"Number Theoretic Transform")
a = [as_int(x) % p for x in seq]
n = len(a)
if n < 1:
return a
b = n.bit_length() - 1
if n&(n - 1):
b += 1
n = 2**b
if (p - 1) % n:
raise ValueError("Expected prime modulus of the form (m*2**k + 1)")
a += [0]*(n - len(a))
for i in range(1, n):
j = int(ibin(i, b, str=True)[::-1], 2)
if i < j:
a[i], a[j] = a[j], a[i]
pr = primitive_root(p)
rt = pow(pr, (p - 1) // n, p)
if inverse:
rt = pow(rt, p - 2, p)
w = [1]*(n // 2)
for i in range(1, n // 2):
w[i] = w[i - 1]*rt % p
h = 2
while h <= n:
hf, ut = h // 2, n // h
for i in range(0, n, h):
for j in range(hf):
u, v = a[i + j], a[i + j + hf]*w[ut * j]
a[i + j], a[i + j + hf] = (u + v) % p, (u - v) % p
h *= 2
if inverse:
rv = pow(n, p - 2, p)
a = [x*rv % p for x in a]
return a
def ntt(seq, prime):
r"""
Performs the Number Theoretic Transform (**NTT**), which specializes the
Discrete Fourier Transform (**DFT**) over quotient ring `Z/pZ` for prime
`p` instead of complex numbers `C`.
The sequence is automatically padded to the right with zeros, as the
*radix-2 NTT* requires the number of sample points to be a power of 2.
Parameters
==========
seq : iterable
The sequence on which **DFT** is to be applied.
prime : Integer
Prime modulus of the form `(m 2^k + 1)` to be used for performing
**NTT** on the sequence.
Examples
========
>>> from sympy import ntt, intt
>>> ntt([1, 2, 3, 4], prime=3*2**8 + 1)
[10, 643, 767, 122]
>>> intt(_, 3*2**8 + 1)
[1, 2, 3, 4]
>>> intt([1, 2, 3, 4], prime=3*2**8 + 1)
[387, 415, 384, 353]
>>> ntt(_, prime=3*2**8 + 1)
[1, 2, 3, 4]
References
==========
.. [1] http://www.apfloat.org/ntt.html
.. [2] http://mathworld.wolfram.com/NumberTheoreticTransform.html
.. [3] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general%29
"""
return _number_theoretic_transform(seq, prime=prime)
def intt(seq, prime):
return _number_theoretic_transform(seq, prime=prime, inverse=True)
intt.__doc__ = ntt.__doc__
#----------------------------------------------------------------------------#
# #
# Walsh Hadamard Transform #
# #
#----------------------------------------------------------------------------#
def _walsh_hadamard_transform(seq, inverse=False):
"""Utility function for the Walsh Hadamard Transform"""
if not iterable(seq):
raise TypeError("Expected a sequence of coefficients "
"for Walsh Hadamard Transform")
a = [sympify(arg) for arg in seq]
n = len(a)
if n < 2:
return a
if n&(n - 1):
n = 2**n.bit_length()
a += [S.Zero]*(n - len(a))
h = 2
while h <= n:
hf = h // 2
for i in range(0, n, h):
for j in range(hf):
u, v = a[i + j], a[i + j + hf]
a[i + j], a[i + j + hf] = u + v, u - v
h *= 2
if inverse:
a = [x/n for x in a]
return a
def fwht(seq):
r"""
Performs the Walsh Hadamard Transform (**WHT**), and uses Hadamard
ordering for the sequence.
The sequence is automatically padded to the right with zeros, as the
*radix-2 FWHT* requires the number of sample points to be a power of 2.
Parameters
==========
seq : iterable
The sequence on which WHT is to be applied.
Examples
========
>>> from sympy import fwht, ifwht
>>> fwht([4, 2, 2, 0, 0, 2, -2, 0])
[8, 0, 8, 0, 8, 8, 0, 0]
>>> ifwht(_)
[4, 2, 2, 0, 0, 2, -2, 0]
>>> ifwht([19, -1, 11, -9, -7, 13, -15, 5])
[2, 0, 4, 0, 3, 10, 0, 0]
>>> fwht(_)
[19, -1, 11, -9, -7, 13, -15, 5]
References
==========
.. [1] https://en.wikipedia.org/wiki/Hadamard_transform
.. [2] https://en.wikipedia.org/wiki/Fast_Walsh%E2%80%93Hadamard_transform
"""
return _walsh_hadamard_transform(seq)
def ifwht(seq):
return _walsh_hadamard_transform(seq, inverse=True)
ifwht.__doc__ = fwht.__doc__
#----------------------------------------------------------------------------#
# #
# Mobius Transform for Subset Lattice #
# #
#----------------------------------------------------------------------------#
def _mobius_transform(seq, sgn, subset):
r"""Utility function for performing Mobius Transform using
Yate's Dynamic Programming method"""
if not iterable(seq):
raise TypeError("Expected a sequence of coefficients")
a = [sympify(arg) for arg in seq]
n = len(a)
if n < 2:
return a
if n&(n - 1):
n = 2**n.bit_length()
a += [S.Zero]*(n - len(a))
if subset:
i = 1
while i < n:
for j in range(n):
if j & i:
a[j] += sgn*a[j ^ i]
i *= 2
else:
i = 1
while i < n:
for j in range(n):
if j & i:
continue
a[j] += sgn*a[j ^ i]
i *= 2
return a
def mobius_transform(seq, subset=True):
r"""
Performs the Mobius Transform for subset lattice with indices of
sequence as bitmasks.
The indices of each argument, considered as bit strings, correspond
to subsets of a finite set.
The sequence is automatically padded to the right with zeros, as the
definition of subset/superset based on bitmasks (indices) requires
the size of sequence to be a power of 2.
Parameters
==========
seq : iterable
The sequence on which Mobius Transform is to be applied.
subset : bool
Specifies if Mobius Transform is applied by enumerating subsets
or supersets of the given set.
Examples
========
>>> from sympy import symbols
>>> from sympy import mobius_transform, inverse_mobius_transform
>>> x, y, z = symbols('x y z')
>>> mobius_transform([x, y, z])
[x, x + y, x + z, x + y + z]
>>> inverse_mobius_transform(_)
[x, y, z, 0]
>>> mobius_transform([x, y, z], subset=False)
[x + y + z, y, z, 0]
>>> inverse_mobius_transform(_, subset=False)
[x, y, z, 0]
>>> mobius_transform([1, 2, 3, 4])
[1, 3, 4, 10]
>>> inverse_mobius_transform(_)
[1, 2, 3, 4]
>>> mobius_transform([1, 2, 3, 4], subset=False)
[10, 6, 7, 4]
>>> inverse_mobius_transform(_, subset=False)
[1, 2, 3, 4]
References
==========
.. [1] https://en.wikipedia.org/wiki/M%C3%B6bius_inversion_formula
.. [2] https://people.csail.mit.edu/rrw/presentations/subset-conv.pdf
.. [3] https://arxiv.org/pdf/1211.0189.pdf
"""
return _mobius_transform(seq, sgn=+1, subset=subset)
def inverse_mobius_transform(seq, subset=True):
return _mobius_transform(seq, sgn=-1, subset=subset)
inverse_mobius_transform.__doc__ = mobius_transform.__doc__
|
c5e5c22f13d2a4135faca3653c61a9b94dc3f40cdfd2e474d4ea4318238000b8 | """
Convolution (using **FFT**, **NTT**, **FWHT**), Subset Convolution,
Covering Product, Intersecting Product
"""
from sympy.core import S, sympify
from sympy.core.function import expand_mul
from sympy.discrete.transforms import (
fft, ifft, ntt, intt, fwht, ifwht,
mobius_transform, inverse_mobius_transform)
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import as_int
def convolution(a, b, cycle=0, dps=None, prime=None, dyadic=None, subset=None):
"""
Performs convolution by determining the type of desired
convolution using hints.
Exactly one of ``dps``, ``prime``, ``dyadic``, ``subset`` arguments
should be specified explicitly for identifying the type of convolution,
and the argument ``cycle`` can be specified optionally.
For the default arguments, linear convolution is performed using **FFT**.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
cycle : Integer
Specifies the length for doing cyclic convolution.
dps : Integer
Specifies the number of decimal digits for precision for
performing **FFT** on the sequence.
prime : Integer
Prime modulus of the form `(m 2^k + 1)` to be used for
performing **NTT** on the sequence.
dyadic : bool
Identifies the convolution type as dyadic (*bitwise-XOR*)
convolution, which is performed using **FWHT**.
subset : bool
Identifies the convolution type as subset convolution.
Examples
========
>>> from sympy import convolution, symbols, S, I
>>> u, v, w, x, y, z = symbols('u v w x y z')
>>> convolution([1 + 2*I, 4 + 3*I], [S(5)/4, 6], dps=3)
[1.25 + 2.5*I, 11.0 + 15.8*I, 24.0 + 18.0*I]
>>> convolution([1, 2, 3], [4, 5, 6], cycle=3)
[31, 31, 28]
>>> convolution([111, 777], [888, 444], prime=19*2**10 + 1)
[1283, 19351, 14219]
>>> convolution([111, 777], [888, 444], prime=19*2**10 + 1, cycle=2)
[15502, 19351]
>>> convolution([u, v], [x, y, z], dyadic=True)
[u*x + v*y, u*y + v*x, u*z, v*z]
>>> convolution([u, v], [x, y, z], dyadic=True, cycle=2)
[u*x + u*z + v*y, u*y + v*x + v*z]
>>> convolution([u, v, w], [x, y, z], subset=True)
[u*x, u*y + v*x, u*z + w*x, v*z + w*y]
>>> convolution([u, v, w], [x, y, z], subset=True, cycle=3)
[u*x + v*z + w*y, u*y + v*x, u*z + w*x]
"""
c = as_int(cycle)
if c < 0:
raise ValueError("The length for cyclic convolution "
"must be non-negative")
dyadic = True if dyadic else None
subset = True if subset else None
if sum(x is not None for x in (prime, dps, dyadic, subset)) > 1:
raise TypeError("Ambiguity in determining the type of convolution")
if prime is not None:
ls = convolution_ntt(a, b, prime=prime)
return ls if not c else [sum(ls[i::c]) % prime for i in range(c)]
if dyadic:
ls = convolution_fwht(a, b)
elif subset:
ls = convolution_subset(a, b)
else:
ls = convolution_fft(a, b, dps=dps)
return ls if not c else [sum(ls[i::c]) for i in range(c)]
#----------------------------------------------------------------------------#
# #
# Convolution for Complex domain #
# #
#----------------------------------------------------------------------------#
def convolution_fft(a, b, dps=None):
"""
Performs linear convolution using Fast Fourier Transform.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
dps : Integer
Specifies the number of decimal digits for precision.
Examples
========
>>> from sympy import S, I
>>> from sympy.discrete.convolutions import convolution_fft
>>> convolution_fft([2, 3], [4, 5])
[8, 22, 15]
>>> convolution_fft([2, 5], [6, 7, 3])
[12, 44, 41, 15]
>>> convolution_fft([1 + 2*I, 4 + 3*I], [S(5)/4, 6])
[5/4 + 5*I/2, 11 + 63*I/4, 24 + 18*I]
References
==========
.. [1] https://en.wikipedia.org/wiki/Convolution_theorem
.. [2] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general%29
"""
a, b = a[:], b[:]
n = m = len(a) + len(b) - 1 # convolution size
if n > 0 and n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = fft(a, dps), fft(b, dps)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = ifft(a, dps)[:m]
return a
#----------------------------------------------------------------------------#
# #
# Convolution for GF(p) #
# #
#----------------------------------------------------------------------------#
def convolution_ntt(a, b, prime):
"""
Performs linear convolution using Number Theoretic Transform.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
prime : Integer
Prime modulus of the form `(m 2^k + 1)` to be used for performing
**NTT** on the sequence.
Examples
========
>>> from sympy.discrete.convolutions import convolution_ntt
>>> convolution_ntt([2, 3], [4, 5], prime=19*2**10 + 1)
[8, 22, 15]
>>> convolution_ntt([2, 5], [6, 7, 3], prime=19*2**10 + 1)
[12, 44, 41, 15]
>>> convolution_ntt([333, 555], [222, 666], prime=19*2**10 + 1)
[15555, 14219, 19404]
References
==========
.. [1] https://en.wikipedia.org/wiki/Convolution_theorem
.. [2] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general%29
"""
a, b, p = a[:], b[:], as_int(prime)
n = m = len(a) + len(b) - 1 # convolution size
if n > 0 and n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [0]*(n - len(a))
b += [0]*(n - len(b))
a, b = ntt(a, p), ntt(b, p)
a = [x*y % p for x, y in zip(a, b)]
a = intt(a, p)[:m]
return a
#----------------------------------------------------------------------------#
# #
# Convolution for 2**n-group #
# #
#----------------------------------------------------------------------------#
def convolution_fwht(a, b):
"""
Performs dyadic (*bitwise-XOR*) convolution using Fast Walsh Hadamard
Transform.
The convolution is automatically padded to the right with zeros, as the
*radix-2 FWHT* requires the number of sample points to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
Examples
========
>>> from sympy import symbols, S, I
>>> from sympy.discrete.convolutions import convolution_fwht
>>> u, v, x, y = symbols('u v x y')
>>> convolution_fwht([u, v], [x, y])
[u*x + v*y, u*y + v*x]
>>> convolution_fwht([2, 3], [4, 5])
[23, 22]
>>> convolution_fwht([2, 5 + 4*I, 7], [6*I, 7, 3 + 4*I])
[56 + 68*I, -10 + 30*I, 6 + 50*I, 48 + 32*I]
>>> convolution_fwht([S(33)/7, S(55)/6, S(7)/4], [S(2)/3, 5])
[2057/42, 1870/63, 7/6, 35/4]
References
==========
.. [1] https://www.radioeng.cz/fulltexts/2002/02_03_40_42.pdf
.. [2] https://en.wikipedia.org/wiki/Hadamard_transform
"""
if not a or not b:
return []
a, b = a[:], b[:]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = fwht(a), fwht(b)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = ifwht(a)
return a
#----------------------------------------------------------------------------#
# #
# Subset Convolution #
# #
#----------------------------------------------------------------------------#
def convolution_subset(a, b):
"""
Performs Subset Convolution of given sequences.
The indices of each argument, considered as bit strings, correspond to
subsets of a finite set.
The sequence is automatically padded to the right with zeros, as the
definition of subset based on bitmasks (indices) requires the size of
sequence to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
Examples
========
>>> from sympy import symbols, S
>>> from sympy.discrete.convolutions import convolution_subset
>>> u, v, x, y, z = symbols('u v x y z')
>>> convolution_subset([u, v], [x, y])
[u*x, u*y + v*x]
>>> convolution_subset([u, v, x], [y, z])
[u*y, u*z + v*y, x*y, x*z]
>>> convolution_subset([1, S(2)/3], [3, 4])
[3, 6]
>>> convolution_subset([1, 3, S(5)/7], [7])
[7, 21, 5, 0]
References
==========
.. [1] https://people.csail.mit.edu/rrw/presentations/subset-conv.pdf
"""
if not a or not b:
return []
if not iterable(a) or not iterable(b):
raise TypeError("Expected a sequence of coefficients for convolution")
a = [sympify(arg) for arg in a]
b = [sympify(arg) for arg in b]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
c = [S.Zero]*n
for mask in range(n):
smask = mask
while smask > 0:
c[mask] += expand_mul(a[smask] * b[mask^smask])
smask = (smask - 1)&mask
c[mask] += expand_mul(a[smask] * b[mask^smask])
return c
#----------------------------------------------------------------------------#
# #
# Covering Product #
# #
#----------------------------------------------------------------------------#
def covering_product(a, b):
"""
Returns the covering product of given sequences.
The indices of each argument, considered as bit strings, correspond to
subsets of a finite set.
The covering product of given sequences is a sequence which contains
the sum of products of the elements of the given sequences grouped by
the *bitwise-OR* of the corresponding indices.
The sequence is automatically padded to the right with zeros, as the
definition of subset based on bitmasks (indices) requires the size of
sequence to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which covering product is to be obtained.
Examples
========
>>> from sympy import symbols, S, I, covering_product
>>> u, v, x, y, z = symbols('u v x y z')
>>> covering_product([u, v], [x, y])
[u*x, u*y + v*x + v*y]
>>> covering_product([u, v, x], [y, z])
[u*y, u*z + v*y + v*z, x*y, x*z]
>>> covering_product([1, S(2)/3], [3, 4 + 5*I])
[3, 26/3 + 25*I/3]
>>> covering_product([1, 3, S(5)/7], [7, 8])
[7, 53, 5, 40/7]
References
==========
.. [1] https://people.csail.mit.edu/rrw/presentations/subset-conv.pdf
"""
if not a or not b:
return []
a, b = a[:], b[:]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = mobius_transform(a), mobius_transform(b)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = inverse_mobius_transform(a)
return a
#----------------------------------------------------------------------------#
# #
# Intersecting Product #
# #
#----------------------------------------------------------------------------#
def intersecting_product(a, b):
"""
Returns the intersecting product of given sequences.
The indices of each argument, considered as bit strings, correspond to
subsets of a finite set.
The intersecting product of given sequences is the sequence which
contains the sum of products of the elements of the given sequences
grouped by the *bitwise-AND* of the corresponding indices.
The sequence is automatically padded to the right with zeros, as the
definition of subset based on bitmasks (indices) requires the size of
sequence to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which intersecting product is to be obtained.
Examples
========
>>> from sympy import symbols, S, I, intersecting_product
>>> u, v, x, y, z = symbols('u v x y z')
>>> intersecting_product([u, v], [x, y])
[u*x + u*y + v*x, v*y]
>>> intersecting_product([u, v, x], [y, z])
[u*y + u*z + v*y + x*y + x*z, v*z, 0, 0]
>>> intersecting_product([1, S(2)/3], [3, 4 + 5*I])
[9 + 5*I, 8/3 + 10*I/3]
>>> intersecting_product([1, 3, S(5)/7], [7, 8])
[327/7, 24, 0, 0]
References
==========
.. [1] https://people.csail.mit.edu/rrw/presentations/subset-conv.pdf
"""
if not a or not b:
return []
a, b = a[:], b[:]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = mobius_transform(a, subset=False), mobius_transform(b, subset=False)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = inverse_mobius_transform(a, subset=False)
return a
|
bce3044ef93127f32bd8838b5c5c31949152ff48457ab71eb23f5cc1dd9990c7 | """
Recurrences
"""
from sympy.core import S, sympify
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import as_int
def linrec(coeffs, init, n):
r"""
Evaluation of univariate linear recurrences of homogeneous type
having coefficients independent of the recurrence variable.
Parameters
==========
coeffs : iterable
Coefficients of the recurrence
init : iterable
Initial values of the recurrence
n : Integer
Point of evaluation for the recurrence
Notes
=====
Let `y(n)` be the recurrence of given type, ``c`` be the sequence
of coefficients, ``b`` be the sequence of initial/base values of the
recurrence and ``k`` (equal to ``len(c)``) be the order of recurrence.
Then,
.. math :: y(n) = \begin{cases} b_n & 0 \le n < k \\
c_0 y(n-1) + c_1 y(n-2) + \cdots + c_{k-1} y(n-k) & n \ge k
\end{cases}
Let `x_0, x_1, \ldots, x_n` be a sequence and consider the transformation
that maps each polynomial `f(x)` to `T(f(x))` where each power `x^i` is
replaced by the corresponding value `x_i`. The sequence is then a solution
of the recurrence if and only if `T(x^i p(x)) = 0` for each `i \ge 0` where
`p(x) = x^k - c_0 x^(k-1) - \cdots - c_{k-1}` is the characteristic
polynomial.
Then `T(f(x)p(x)) = 0` for each polynomial `f(x)` (as it is a linear
combination of powers `x^i`). Now, if `x^n` is congruent to
`g(x) = a_0 x^0 + a_1 x^1 + \cdots + a_{k-1} x^{k-1}` modulo `p(x)`, then
`T(x^n) = x_n` is equal to
`T(g(x)) = a_0 x_0 + a_1 x_1 + \cdots + a_{k-1} x_{k-1}`.
Computation of `x^n`,
given `x^k = c_0 x^{k-1} + c_1 x^{k-2} + \cdots + c_{k-1}`
is performed using exponentiation by squaring (refer to [1]_) with
an additional reduction step performed to retain only first `k` powers
of `x` in the representation of `x^n`.
Examples
========
>>> from sympy.discrete.recurrences import linrec
>>> from sympy.abc import x, y, z
>>> linrec(coeffs=[1, 1], init=[0, 1], n=10)
55
>>> linrec(coeffs=[1, 1], init=[x, y], n=10)
34*x + 55*y
>>> linrec(coeffs=[x, y], init=[0, 1], n=5)
x**2*y + x*(x**3 + 2*x*y) + y**2
>>> linrec(coeffs=[1, 2, 3, 0, 0, 4], init=[x, y, z], n=16)
13576*x + 5676*y + 2356*z
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentiation_by_squaring
.. [2] https://en.wikipedia.org/w/index.php?title=Modular_exponentiation§ion=6#Matrices
See Also
========
sympy.polys.agca.extensions.ExtensionElement.__pow__
"""
if not coeffs:
return S.Zero
if not iterable(coeffs):
raise TypeError("Expected a sequence of coefficients for"
" the recurrence")
if not iterable(init):
raise TypeError("Expected a sequence of values for the initialization"
" of the recurrence")
n = as_int(n)
if n < 0:
raise ValueError("Point of evaluation of recurrence must be a "
"non-negative integer")
c = [sympify(arg) for arg in coeffs]
b = [sympify(arg) for arg in init]
k = len(c)
if len(b) > k:
raise TypeError("Count of initial values should not exceed the "
"order of the recurrence")
else:
b += [S.Zero]*(k - len(b)) # remaining initial values default to zero
if n < k:
return b[n]
terms = [u*v for u, v in zip(linrec_coeffs(c, n), b)]
return sum(terms[:-1], terms[-1])
def linrec_coeffs(c, n):
r"""
Compute the coefficients of n'th term in linear recursion
sequence defined by c.
`x^k = c_0 x^{k-1} + c_1 x^{k-2} + \cdots + c_{k-1}`.
It computes the coefficients by using binary exponentiation.
This function is used by `linrec` and `_eval_pow_by_cayley`.
Parameters
==========
c = coefficients of the divisor polynomial
n = exponent of x, so dividend is x^n
"""
k = len(c)
def _square_and_reduce(u, offset):
# squares `(u_0 + u_1 x + u_2 x^2 + \cdots + u_{k-1} x^k)` (and
# multiplies by `x` if offset is 1) and reduces the above result of
# length upto `2k` to `k` using the characteristic equation of the
# recurrence given by, `x^k = c_0 x^{k-1} + c_1 x^{k-2} + \cdots + c_{k-1}`
w = [S.Zero]*(2*len(u) - 1 + offset)
for i, p in enumerate(u):
for j, q in enumerate(u):
w[offset + i + j] += p*q
for j in range(len(w) - 1, k - 1, -1):
for i in range(k):
w[j - i - 1] += w[j]*c[i]
return w[:k]
def _final_coeffs(n):
# computes the final coefficient list - `cf` corresponding to the
# point at which recurrence is to be evalauted - `n`, such that,
# `y(n) = cf_0 y(k-1) + cf_1 y(k-2) + \cdots + cf_{k-1} y(0)`
if n < k:
return [S.Zero]*n + [S.One] + [S.Zero]*(k - n - 1)
else:
return _square_and_reduce(_final_coeffs(n // 2), n % 2)
return _final_coeffs(n)
|
6d71aa3e22cddf1e0d02c33e6472893f114ee9163b94365472409aac8383122d | from .cartan_type import Standard_Cartan
from sympy.core.backend import eye
class TypeC(Standard_Cartan):
def __new__(cls, n):
if n < 3:
raise ValueError("n cannot be less than 3")
return Standard_Cartan.__new__(cls, "C", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("C3")
>>> c.dimension()
3
"""
n = self.n
return n
def basic_root(self, i, j):
"""Generate roots with 1 in ith position and a -1 in jth position
"""
n = self.n
root = [0]*n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""The ith simple root for the C series
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In C_n, the first n-1 simple roots are the same as
the roots in A_(n-1) (a 1 in the ith position, a -1
in the (i+1)th position, and zeroes elsewhere). The
nth simple root is the root in which there is a 2 in
the nth position and zeroes elsewhere.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("C3")
>>> c.simple_root(2)
[0, 1, -1]
"""
n = self.n
if i < n:
return self.basic_root(i-1,i)
else:
root = [0]*self.n
root[n-1] = 2
return root
def positive_roots(self):
"""Generates all the positive roots of A_n
This is half of all of the roots of C_n; by multiplying all the
positive roots by -1 we get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 2
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots for C_n"
"""
n = self.n
return 2*(n**2)
def cartan_matrix(self):
"""The Cartan matrix for C_n
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('C4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -2, 2]])
"""
n = self.n
m = 2 * eye(n)
i = 1
while i < n-1:
m[i, i+1] = -1
m[i, i-1] = -1
i += 1
m[0,1] = -1
m[n-1, n-2] = -2
return m
def basis(self):
"""
Returns the number of independent generators of C_n
"""
n = self.n
return n*(2*n + 1)
def lie_algebra(self):
"""
Returns the Lie algebra associated with C_n"
"""
n = self.n
return "sp(" + str(2*n) + ")"
def dynkin_diagram(self):
n = self.n
diag = "---".join("0" for i in range(1, n)) + "=<=0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
|
51ff90cc8fba6d125e23dd6cb461ec89eb249f004455822365a88d3fed9b2226 | from .cartan_type import Standard_Cartan
from sympy.core.backend import eye, Rational
class TypeE(Standard_Cartan):
def __new__(cls, n):
if n < 6 or n > 8:
raise ValueError("Invalid value of n")
return Standard_Cartan.__new__(cls, "E", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("E6")
>>> c.dimension()
8
"""
return 8
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a -1 in the ith position and a 1
in the jth position.
"""
root = [0]*8
root[i] = -1
root[j] = 1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
This method returns the ith simple root for E_n.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("E6")
>>> c.simple_root(2)
[1, 1, 0, 0, 0, 0, 0, 0]
"""
n = self.n
if i == 1:
root = [-0.5]*8
root[0] = 0.5
root[7] = 0.5
return root
elif i == 2:
root = [0]*8
root[1] = 1
root[0] = 1
return root
else:
if i in (7, 8) and n == 6:
raise ValueError("E6 only has six simple roots!")
if i == 8 and n == 7:
raise ValueError("E7 has only 7 simple roots!")
return self.basic_root(i - 3, i - 2)
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of E_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
if n == 6:
posroots = {}
k = 0
for i in range(n-1):
for j in range(i+1, n-1):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
root = [Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2),
Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
for a in range(0, 2):
for b in range(0, 2):
for c in range(0, 2):
for d in range(0, 2):
for e in range(0, 2):
if (a + b + c + d + e)%2 == 0:
k += 1
if a == 1:
root[0] = Rational(-1, 2)
if b == 1:
root[1] = Rational(-1, 2)
if c == 1:
root[2] = Rational(-1, 2)
if d == 1:
root[3] = Rational(-1, 2)
if e == 1:
root[4] = Rational(-1, 2)
posroots[k] = root
return posroots
if n == 7:
posroots = {}
k = 0
for i in range(n-1):
for j in range(i+1, n-1):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
k += 1
posroots[k] = [0, 0, 0, 0, 0, 1, 1, 0]
root = [Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2),
Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
for a in range(0, 2):
for b in range(0, 2):
for c in range(0, 2):
for d in range(0, 2):
for e in range(0, 2):
for f in range(0, 2):
if (a + b + c + d + e + f)%2 == 0:
k += 1
if a == 1:
root[0] = Rational(-1, 2)
if b == 1:
root[1] = Rational(-1, 2)
if c == 1:
root[2] = Rational(-1, 2)
if d == 1:
root[3] = Rational(-1, 2)
if e == 1:
root[4] = Rational(-1, 2)
if f == 1:
root[5] = Rational(1, 2)
posroots[k] = root
return posroots
if n == 8:
posroots = {}
k = 0
for i in range(n):
for j in range(i+1, n):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
root = [Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2),
Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
for a in range(0, 2):
for b in range(0, 2):
for c in range(0, 2):
for d in range(0, 2):
for e in range(0, 2):
for f in range(0, 2):
for g in range(0, 2):
if (a + b + c + d + e + f + g)%2 == 0:
k += 1
if a == 1:
root[0] = Rational(-1, 2)
if b == 1:
root[1] = Rational(-1, 2)
if c == 1:
root[2] = Rational(-1, 2)
if d == 1:
root[3] = Rational(-1, 2)
if e == 1:
root[4] = Rational(-1, 2)
if f == 1:
root[5] = Rational(1, 2)
if g == 1:
root[6] = Rational(1, 2)
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots of E_n
"""
n = self.n
if n == 6:
return 72
if n == 7:
return 126
if n == 8:
return 240
def cartan_matrix(self):
"""
Returns the Cartan matrix for G_2
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2*eye(n)
i = 3
while i < n-1:
m[i, i+1] = -1
m[i, i-1] = -1
i += 1
m[0, 2] = m[2, 0] = -1
m[1, 3] = m[3, 1] = -1
m[2, 3] = -1
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of E_n
"""
n = self.n
if n == 6:
return 78
if n == 7:
return 133
if n == 8:
return 248
def dynkin_diagram(self):
n = self.n
diag = " "*8 + str(2) + "\n"
diag += " "*8 + "0\n"
diag += " "*8 + "|\n"
diag += " "*8 + "|\n"
diag += "---".join("0" for i in range(1, n)) + "\n"
diag += "1 " + " ".join(str(i) for i in range(3, n+1))
return diag
|
e88229b83ccb0a711fc57b670555e8a612ee6a26df70ef3d548dc55187ac7f30 | from sympy.core import Basic
class CartanType_generator(Basic):
"""
Constructor for actually creating things
"""
def __call__(self, *args):
c = args[0]
if isinstance(c, list):
letter, n = c[0], int(c[1])
elif isinstance(c, str):
letter, n = c[0], int(c[1:])
else:
raise TypeError("Argument must be a string (e.g. 'A3') or a list (e.g. ['A', 3])")
if n < 0:
raise ValueError("Lie algebra rank cannot be negative")
if letter == "A":
from . import type_a
return type_a.TypeA(n)
if letter == "B":
from . import type_b
return type_b.TypeB(n)
if letter == "C":
from . import type_c
return type_c.TypeC(n)
if letter == "D":
from . import type_d
return type_d.TypeD(n)
if letter == "E":
if n >= 6 and n <= 8:
from . import type_e
return type_e.TypeE(n)
if letter == "F":
if n == 4:
from . import type_f
return type_f.TypeF(n)
if letter == "G":
if n == 2:
from . import type_g
return type_g.TypeG(n)
CartanType = CartanType_generator()
class Standard_Cartan(Basic):
"""
Concrete base class for Cartan types such as A4, etc
"""
def __new__(cls, series, n):
obj = Basic.__new__(cls, series, n)
obj.n = n
obj.series = series
return obj
def rank(self):
"""
Returns the rank of the Lie algebra
"""
return self.n
def series(self):
"""
Returns the type of the Lie algebra
"""
return self.series
|
e74c5d72b79cd67145b725163a63f6426f595b5ce96eb8306f3fd83caa79e46a | from .cartan_type import Standard_Cartan
from sympy.core.backend import eye
class TypeB(Standard_Cartan):
def __new__(cls, n):
if n < 2:
raise ValueError("n cannot be less than 2")
return Standard_Cartan.__new__(cls, "B", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.dimension()
3
"""
return self.n
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth position.
"""
root = [0]*self.n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In B_n the first n-1 simple roots are the same as the
roots in A_(n-1) (a 1 in the ith position, a -1 in
the (i+1)th position, and zeroes elsewhere). The n-th
simple root is the root with a 1 in the nth position
and zeroes elsewhere.
This method returns the ith simple root for the B series.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.simple_root(2)
[0, 1, -1]
"""
n = self.n
if i < n:
return self.basic_root(i-1, i)
else:
root = [0]*self.n
root[n-1] = 1
return root
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of B_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots for B_n"
"""
n = self.n
return 2*(n**2)
def cartan_matrix(self):
"""
Returns the Cartan matrix for B_n.
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('B4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -2],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2* eye(n)
i = 1
while i < n-1:
m[i, i+1] = -1
m[i, i-1] = -1
i += 1
m[0, 1] = -1
m[n-2, n-1] = -2
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of B_n
"""
n = self.n
return (n**2 - n)/2
def lie_algebra(self):
"""
Returns the Lie algebra associated with B_n
"""
n = self.n
return "so(" + str(2*n) + ")"
def dynkin_diagram(self):
n = self.n
diag = "---".join("0" for i in range(1, n)) + "=>=0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
|
9794942649e3d712e0c1154cb8eb159b06dcc58d95e8f539f6306db2cca5287e | from sympy.liealgebras.cartan_type import Standard_Cartan
from sympy.core.backend import eye
class TypeA(Standard_Cartan):
"""
This class contains the information about
the A series of simple Lie algebras.
====
"""
def __new__(cls, n):
if n < 1:
raise ValueError("n cannot be less than 1")
return Standard_Cartan.__new__(cls, "A", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A4")
>>> c.dimension()
5
"""
return self.n+1
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth position.
"""
n = self.n
root = [0]*(n+1)
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In A_n the ith simple root is the root which has a 1
in the ith position, a -1 in the (i+1)th position,
and zeroes elsewhere.
This method returns the ith simple root for the A series.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A4")
>>> c.simple_root(1)
[1, -1, 0, 0, 0]
"""
return self.basic_root(i-1, i)
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of A_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n):
for j in range(i+1, n+1):
k += 1
posroots[k] = self.basic_root(i, j)
return posroots
def highest_root(self):
"""
Returns the highest weight root for A_n
"""
return self.basic_root(0, self.n)
def roots(self):
"""
Returns the total number of roots for A_n
"""
n = self.n
return n*(n+1)
def cartan_matrix(self):
"""
Returns the Cartan matrix for A_n.
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2 * eye(n)
i = 1
while i < n-1:
m[i, i+1] = -1
m[i, i-1] = -1
i += 1
m[0,1] = -1
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of A_n
"""
n = self.n
return n**2 - 1
def lie_algebra(self):
"""
Returns the Lie algebra associated with A_n
"""
n = self.n
return "su(" + str(n + 1) + ")"
def dynkin_diagram(self):
n = self.n
diag = "---".join("0" for i in range(1, n+1)) + "\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
|
f42c0bd098f76614e6e34f91a4c7b445b76d6cec494ba2b399287d7d361f4392 | from .cartan_type import CartanType
from sympy.core.backend import Basic
class RootSystem(Basic):
"""Represent the root system of a simple Lie algebra
Every simple Lie algebra has a unique root system. To find the root
system, we first consider the Cartan subalgebra of g, which is the maximal
abelian subalgebra, and consider the adjoint action of g on this
subalgebra. There is a root system associated with this action. Now, a
root system over a vector space V is a set of finite vectors Phi (called
roots), which satisfy:
1. The roots span V
2. The only scalar multiples of x in Phi are x and -x
3. For every x in Phi, the set Phi is closed under reflection
through the hyperplane perpendicular to x.
4. If x and y are roots in Phi, then the projection of y onto
the line through x is a half-integral multiple of x.
Now, there is a subset of Phi, which we will call Delta, such that:
1. Delta is a basis of V
2. Each root x in Phi can be written x = sum k_y y for y in Delta
The elements of Delta are called the simple roots.
Therefore, we see that the simple roots span the root space of a given
simple Lie algebra.
References
==========
.. [1] https://en.wikipedia.org/wiki/Root_system
.. [2] Lie Algebras and Representation Theory - Humphreys
"""
def __new__(cls, cartantype):
"""Create a new RootSystem object
This method assigns an attribute called cartan_type to each instance of
a RootSystem object. When an instance of RootSystem is called, it
needs an argument, which should be an instance of a simple Lie algebra.
We then take the CartanType of this argument and set it as the
cartan_type attribute of the RootSystem instance.
"""
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def simple_roots(self):
"""Generate the simple roots of the Lie algebra
The rank of the Lie algebra determines the number of simple roots that
it has. This method obtains the rank of the Lie algebra, and then uses
the simple_root method from the Lie algebra classes to generate all the
simple roots.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> roots = c.simple_roots()
>>> roots
{1: [1, -1, 0, 0], 2: [0, 1, -1, 0], 3: [0, 0, 1, -1]}
"""
n = self.cartan_type.rank()
roots = {}
for i in range(1, n+1):
root = self.cartan_type.simple_root(i)
roots[i] = root
return roots
def all_roots(self):
"""Generate all the roots of a given root system
The result is a dictionary where the keys are integer numbers. It
generates the roots by getting the dictionary of all positive roots
from the bases classes, and then taking each root, and multiplying it
by -1 and adding it to the dictionary. In this way all the negative
roots are generated.
"""
alpha = self.cartan_type.positive_roots()
keys = list(alpha.keys())
k = max(keys)
for val in keys:
k += 1
root = alpha[val]
newroot = [-x for x in root]
alpha[k] = newroot
return alpha
def root_space(self):
"""Return the span of the simple roots
The root space is the vector space spanned by the simple roots, i.e. it
is a vector space with a distinguished basis, the simple roots. This
method returns a string that represents the root space as the span of
the simple roots, alpha[1],...., alpha[n].
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.root_space()
'alpha[1] + alpha[2] + alpha[3]'
"""
n = self.cartan_type.rank()
rs = " + ".join("alpha["+str(i) +"]" for i in range(1, n+1))
return rs
def add_simple_roots(self, root1, root2):
"""Add two simple roots together
The function takes as input two integers, root1 and root2. It then
uses these integers as keys in the dictionary of simple roots, and gets
the corresponding simple roots, and then adds them together.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> newroot = c.add_simple_roots(1, 2)
>>> newroot
[1, 0, -1, 0]
"""
alpha = self.simple_roots()
if root1 > len(alpha) or root2 > len(alpha):
raise ValueError("You've used a root that doesn't exist!")
a1 = alpha[root1]
a2 = alpha[root2]
newroot = []
length = len(a1)
for i in range(length):
newroot.append(a1[i] + a2[i])
return newroot
def add_as_roots(self, root1, root2):
"""Add two roots together if and only if their sum is also a root
It takes as input two vectors which should be roots. It then computes
their sum and checks if it is in the list of all possible roots. If it
is, it returns the sum. Otherwise it returns a string saying that the
sum is not a root.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.add_as_roots([1, 0, -1, 0], [0, 0, 1, -1])
[1, 0, 0, -1]
>>> c.add_as_roots([1, -1, 0, 0], [0, 0, -1, 1])
'The sum of these two roots is not a root'
"""
alpha = self.all_roots()
newroot = []
for entry in range(len(root1)):
newroot.append(root1[entry] + root2[entry])
if newroot in alpha.values():
return newroot
else:
return "The sum of these two roots is not a root"
def cartan_matrix(self):
"""Cartan matrix of Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, -1, 2]])
"""
return self.cartan_type.cartan_matrix()
def dynkin_diagram(self):
"""Dynkin diagram of the Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> print(c.dynkin_diagram())
0---0---0
1 2 3
"""
return self.cartan_type.dynkin_diagram()
|
1e66d978ec7fd7d41acc55c7fd2621b8617d02d1c394b58af0b462a823a34970 | # -*- coding: utf-8 -*-
from .cartan_type import CartanType
from mpmath import fac
from sympy.core.backend import Matrix, eye, Rational, Basic, igcd
class WeylGroup(Basic):
"""
For each semisimple Lie group, we have a Weyl group. It is a subgroup of
the isometry group of the root system. Specifically, it's the subgroup
that is generated by reflections through the hyperplanes orthogonal to
the roots. Therefore, Weyl groups are reflection groups, and so a Weyl
group is a finite Coxeter group.
"""
def __new__(cls, cartantype):
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def generators(self):
"""
This method creates the generating reflections of the Weyl group for
a given Lie algebra. For a Lie algebra of rank n, there are n
different generating reflections. This function returns them as
a list.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("F4")
>>> c.generators()
['r1', 'r2', 'r3', 'r4']
"""
n = self.cartan_type.rank()
generators = []
for i in range(1, n+1):
reflection = "r"+str(i)
generators.append(reflection)
return generators
def group_order(self):
"""
This method returns the order of the Weyl group.
For types A, B, C, D, and E the order depends on
the rank of the Lie algebra. For types F and G,
the order is fixed.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("D4")
>>> c.group_order()
192.0
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return fac(n+1)
if self.cartan_type.series in ("B", "C"):
return fac(n)*(2**n)
if self.cartan_type.series == "D":
return fac(n)*(2**(n-1))
if self.cartan_type.series == "E":
if n == 6:
return 51840
if n == 7:
return 2903040
if n == 8:
return 696729600
if self.cartan_type.series == "F":
return 1152
if self.cartan_type.series == "G":
return 12
def group_name(self):
"""
This method returns some general information about the Weyl group for
a given Lie algebra. It returns the name of the group and the elements
it acts on, if relevant.
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return "S"+str(n+1) + ": the symmetric group acting on " + str(n+1) + " elements."
if self.cartan_type.series in ("B", "C"):
return "The hyperoctahedral group acting on " + str(2*n) + " elements."
if self.cartan_type.series == "D":
return "The symmetry group of the " + str(n) + "-dimensional demihypercube."
if self.cartan_type.series == "E":
if n == 6:
return "The symmetry group of the 6-polytope."
if n == 7:
return "The symmetry group of the 7-polytope."
if n == 8:
return "The symmetry group of the 8-polytope."
if self.cartan_type.series == "F":
return "The symmetry group of the 24-cell, or icositetrachoron."
if self.cartan_type.series == "G":
return "D6, the dihedral group of order 12, and symmetry group of the hexagon."
def element_order(self, weylelt):
"""
This method returns the order of a given Weyl group element, which should
be specified by the user in the form of products of the generating
reflections, i.e. of the form r1*r2 etc.
For types A-F, this method current works by taking the matrix form of
the specified element, and then finding what power of the matrix is the
identity. It then returns this power.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> b = WeylGroup("B4")
>>> b.element_order('r1*r4*r2')
4
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n+1):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "D":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "E":
a = self.matrix_form(weylelt)
order = 1
while a != eye(8):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "G":
elts = list(weylelt)
reflections = elts[1::3]
m = self.delete_doubles(reflections)
while self.delete_doubles(m) != m:
m = self.delete_doubles(m)
reflections = m
if len(reflections) % 2 == 1:
return 2
elif len(reflections) == 0:
return 1
else:
if len(reflections) == 1:
return 2
else:
m = len(reflections) // 2
lcm = (6 * m)/ igcd(m, 6)
order = lcm / m
return order
if self.cartan_type.series == 'F':
a = self.matrix_form(weylelt)
order = 1
while a != eye(4):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series in ("B", "C"):
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
def delete_doubles(self, reflections):
"""
This is a helper method for determining the order of an element in the
Weyl group of G2. It takes a Weyl element and if repeated simple reflections
in it, it deletes them.
"""
counter = 0
copy = list(reflections)
for elt in copy:
if counter < len(copy)-1:
if copy[counter + 1] == elt:
del copy[counter]
del copy[counter]
counter += 1
return copy
def matrix_form(self, weylelt):
"""
This method takes input from the user in the form of products of the
generating reflections, and returns the matrix corresponding to the
element of the Weyl group. Since each element of the Weyl group is
a reflection of some type, there is a corresponding matrix representation.
This method uses the standard representation for all the generating
reflections.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> f = WeylGroup("F4")
>>> f.matrix_form('r2*r3')
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1],
[0, 0, 1, 0]])
"""
elts = list(weylelt)
reflections = elts[1::3]
n = self.cartan_type.rank()
if self.cartan_type.series == 'A':
matrixform = eye(n+1)
for elt in reflections:
a = int(elt)
mat = eye(n+1)
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'D':
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a < n:
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
else:
mat[n-2, n-1] = -1
mat[n-2, n-2] = 0
mat[n-1, n-2] = -1
mat[n-1, n-1] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'G':
matrixform = eye(3)
for elt in reflections:
a = int(elt)
if a == 1:
gen1 = Matrix([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
matrixform *= gen1
else:
gen2 = Matrix([[Rational(2, 3), Rational(2, 3), Rational(-1, 3)],
[Rational(2, 3), Rational(-1, 3), Rational(2, 3)],
[Rational(-1, 3), Rational(2, 3), Rational(2, 3)]])
matrixform *= gen2
return matrixform
if self.cartan_type.series == 'F':
matrixform = eye(4)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
matrixform *= mat
elif a == 2:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
matrixform *= mat
elif a == 3:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
matrixform *= mat
else:
mat = Matrix([[Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2)],
[Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]])
matrixform *= mat
return matrixform
if self.cartan_type.series == 'E':
matrixform = eye(8)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[Rational(3, 4), Rational(1, 4), Rational(1, 4), Rational(1, 4),
Rational(1, 4), Rational(1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(3, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(3, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(3, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(3, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-3, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4)]])
matrixform *= mat
elif a == 2:
mat = eye(8)
mat[0, 0] = 0
mat[0, 1] = -1
mat[1, 0] = -1
mat[1, 1] = 0
matrixform *= mat
else:
mat = eye(8)
mat[a-3, a-3] = 0
mat[a-3, a-2] = 1
mat[a-2, a-3] = 1
mat[a-2, a-2] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series in ("B", "C"):
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a == 1:
mat[0, 0] = -1
matrixform *= mat
else:
mat[a - 2, a - 2] = 0
mat[a-2, a-1] = 1
mat[a - 1, a - 2] = 1
mat[a -1, a - 1] = 0
matrixform *= mat
return matrixform
def coxeter_diagram(self):
"""
This method returns the Coxeter diagram corresponding to a Weyl group.
The Coxeter diagram can be obtained from a Lie algebra's Dynkin diagram
by deleting all arrows; the Coxeter diagram is the undirected graph.
The vertices of the Coxeter diagram represent the generating reflections
of the Weyl group, $s_i$. An edge is drawn between $s_i$ and $s_j$ if the order
$m(i, j)$ of $s_is_j$ is greater than two. If there is one edge, the order
$m(i, j)$ is 3. If there are two edges, the order $m(i, j)$ is 4, and if there
are three edges, the order $m(i, j)$ is 6.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("B3")
>>> print(c.coxeter_diagram())
0---0===0
1 2 3
"""
n = self.cartan_type.rank()
if self.cartan_type.series in ("A", "D", "E"):
return self.cartan_type.dynkin_diagram()
if self.cartan_type.series in ("B", "C"):
diag = "---".join("0" for i in range(1, n)) + "===0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
if self.cartan_type.series == "F":
diag = "0---0===0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag
if self.cartan_type.series == "G":
diag = "0≡≡≡0\n1 2"
return diag
|
adb9d254606cd26b58f2d4718eea2e9d219e5cf07fe944af92fdb2b0fc89f719 | """
Singularities
=============
This module implements algorithms for finding singularities for a function
and identifying types of functions.
The differential calculus methods in this module include methods to identify
the following function types in the given ``Interval``:
- Increasing
- Strictly Increasing
- Decreasing
- Strictly Decreasing
- Monotonic
"""
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.solvers.solveset import solveset
from sympy.utilities.misc import filldedent
def singularities(expression, symbol, domain=None):
"""
Find singularities of a given function.
Parameters
==========
expression : Expr
The target function in which singularities need to be found.
symbol : Symbol
The symbol over the values of which the singularity in
expression in being searched for.
Returns
=======
Set
A set of values for ``symbol`` for which ``expression`` has a
singularity. An ``EmptySet`` is returned if ``expression`` has no
singularities for any given value of ``Symbol``.
Raises
======
NotImplementedError
Methods for determining the singularities of this function have
not been developed.
Notes
=====
This function does not find non-isolated singularities
nor does it find branch points of the expression.
Currently supported functions are:
- univariate continuous (real or complex) functions
References
==========
.. [1] https://en.wikipedia.org/wiki/Mathematical_singularity
Examples
========
>>> from sympy import singularities, Symbol, log
>>> x = Symbol('x', real=True)
>>> y = Symbol('y', real=False)
>>> singularities(x**2 + x + 1, x)
EmptySet
>>> singularities(1/(x + 1), x)
{-1}
>>> singularities(1/(y**2 + 1), y)
{-I, I}
>>> singularities(1/(y**3 + 1), y)
{-1, 1/2 - sqrt(3)*I/2, 1/2 + sqrt(3)*I/2}
>>> singularities(log(x), x)
{0}
"""
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.trigonometric import sec, csc, cot, tan, cos
if domain is None:
domain = S.Reals if symbol.is_real else S.Complexes
try:
sings = S.EmptySet
for i in expression.rewrite([sec, csc, cot, tan], cos).atoms(Pow):
if i.exp.is_infinite:
raise NotImplementedError
if i.exp.is_negative:
sings += solveset(i.base, symbol, domain)
for i in expression.atoms(log):
sings += solveset(i.args[0], symbol, domain)
return sings
except NotImplementedError:
raise NotImplementedError(filldedent('''
Methods for determining the singularities
of this function have not been developed.'''))
###########################################################################
# DIFFERENTIAL CALCULUS METHODS #
###########################################################################
def monotonicity_helper(expression, predicate, interval=S.Reals, symbol=None):
"""
Helper function for functions checking function monotonicity.
Parameters
==========
expression : Expr
The target function which is being checked
predicate : function
The property being tested for. The function takes in an integer
and returns a boolean. The integer input is the derivative and
the boolean result should be true if the property is being held,
and false otherwise.
interval : Set, optional
The range of values in which we are testing, defaults to all reals.
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
It returns a boolean indicating whether the interval in which
the function's derivative satisfies given predicate is a superset
of the given interval.
Returns
=======
Boolean
True if ``predicate`` is true for all the derivatives when ``symbol``
is varied in ``range``, False otherwise.
"""
expression = sympify(expression)
free = expression.free_symbols
if symbol is None:
if len(free) > 1:
raise NotImplementedError(
'The function has not yet been implemented'
' for all multivariate expressions.'
)
variable = symbol or (free.pop() if free else Symbol('x'))
derivative = expression.diff(variable)
predicate_interval = solveset(predicate(derivative), variable, S.Reals)
return interval.is_subset(predicate_interval)
def is_increasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is increasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is increasing (either strictly increasing or
constant) in the given ``interval``, False otherwise.
Examples
========
>>> from sympy import is_increasing
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_increasing(x**3 - 3*x**2 + 4*x, S.Reals)
True
>>> is_increasing(-x**2, Interval(-oo, 0))
True
>>> is_increasing(-x**2, Interval(0, oo))
False
>>> is_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval(-2, 3))
False
>>> is_increasing(x**2 + y, Interval(1, 2), x)
True
"""
return monotonicity_helper(expression, lambda x: x >= 0, interval, symbol)
def is_strictly_increasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is strictly increasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is strictly increasing in the given ``interval``,
False otherwise.
Examples
========
>>> from sympy import is_strictly_increasing
>>> from sympy.abc import x, y
>>> from sympy import Interval, oo
>>> is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Ropen(-oo, -2))
True
>>> is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Lopen(3, oo))
True
>>> is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.open(-2, 3))
False
>>> is_strictly_increasing(-x**2, Interval(0, oo))
False
>>> is_strictly_increasing(-x**2 + y, Interval(-oo, 0), x)
False
"""
return monotonicity_helper(expression, lambda x: x > 0, interval, symbol)
def is_decreasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is decreasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is decreasing (either strictly decreasing or
constant) in the given ``interval``, False otherwise.
Examples
========
>>> from sympy import is_decreasing
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))
True
>>> is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
True
>>> is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2))
False
>>> is_decreasing(-x**2, Interval(-oo, 0))
False
>>> is_decreasing(-x**2 + y, Interval(-oo, 0), x)
False
"""
return monotonicity_helper(expression, lambda x: x <= 0, interval, symbol)
def is_strictly_decreasing(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is strictly decreasing in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is strictly decreasing in the given ``interval``,
False otherwise.
Examples
========
>>> from sympy import is_strictly_decreasing
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_strictly_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
True
>>> is_strictly_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2))
False
>>> is_strictly_decreasing(-x**2, Interval(-oo, 0))
False
>>> is_strictly_decreasing(-x**2 + y, Interval(-oo, 0), x)
False
"""
return monotonicity_helper(expression, lambda x: x < 0, interval, symbol)
def is_monotonic(expression, interval=S.Reals, symbol=None):
"""
Return whether the function is monotonic in the given interval.
Parameters
==========
expression : Expr
The target function which is being checked.
interval : Set, optional
The range of values in which we are testing (defaults to set of
all real numbers).
symbol : Symbol, optional
The symbol present in expression which gets varied over the given range.
Returns
=======
Boolean
True if ``expression`` is monotonic in the given ``interval``,
False otherwise.
Raises
======
NotImplementedError
Monotonicity check has not been implemented for the queried function.
Examples
========
>>> from sympy import is_monotonic
>>> from sympy.abc import x, y
>>> from sympy import S, Interval, oo
>>> is_monotonic(1/(x**2 - 3*x), Interval.open(1.5, 3))
True
>>> is_monotonic(1/(x**2 - 3*x), Interval.Lopen(3, oo))
True
>>> is_monotonic(x**3 - 3*x**2 + 4*x, S.Reals)
True
>>> is_monotonic(-x**2, S.Reals)
False
>>> is_monotonic(x**2 + y + 1, Interval(1, 2), x)
True
"""
expression = sympify(expression)
free = expression.free_symbols
if symbol is None and len(free) > 1:
raise NotImplementedError(
'is_monotonic has not yet been implemented'
' for all multivariate expressions.'
)
variable = symbol or (free.pop() if free else Symbol('x'))
turning_points = solveset(expression.diff(variable), variable, interval)
return interval.intersection(turning_points) is S.EmptySet
|
d10ac70fd4d82a5499b8ef1dae75376d4149ad2bbcbf94a772ab6445258b8c63 | """
This module implements a method to find
Euler-Lagrange Equations for given Lagrangian.
"""
from itertools import combinations_with_replacement
from sympy.core.function import (Derivative, Function, diff)
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.utilities.iterables import iterable
def euler_equations(L, funcs=(), vars=()):
r"""
Find the Euler-Lagrange equations [1]_ for a given Lagrangian.
Parameters
==========
L : Expr
The Lagrangian that should be a function of the functions listed
in the second argument and their derivatives.
For example, in the case of two functions $f(x,y)$, $g(x,y)$ and
two independent variables $x$, $y$ the Lagrangian has the form:
.. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x},
\frac{\partial f(x,y)}{\partial y},
\frac{\partial g(x,y)}{\partial x},
\frac{\partial g(x,y)}{\partial y},x,y\right)
In many cases it is not necessary to provide anything, except the
Lagrangian, it will be auto-detected (and an error raised if this
cannot be done).
funcs : Function or an iterable of Functions
The functions that the Lagrangian depends on. The Euler equations
are differential equations for each of these functions.
vars : Symbol or an iterable of Symbols
The Symbols that are the independent variables of the functions.
Returns
=======
eqns : list of Eq
The list of differential equations, one for each function.
Examples
========
>>> from sympy import euler_equations, Symbol, Function
>>> x = Function('x')
>>> t = Symbol('t')
>>> L = (x(t).diff(t))**2/2 - x(t)**2/2
>>> euler_equations(L, x(t), t)
[Eq(-x(t) - Derivative(x(t), (t, 2)), 0)]
>>> u = Function('u')
>>> x = Symbol('x')
>>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2
>>> euler_equations(L, u(t, x), [t, x])
[Eq(-Derivative(u(t, x), (t, 2)) + Derivative(u(t, x), (x, 2)), 0)]
References
==========
.. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
"""
funcs = tuple(funcs) if iterable(funcs) else (funcs,)
if not funcs:
funcs = tuple(L.atoms(Function))
else:
for f in funcs:
if not isinstance(f, Function):
raise TypeError('Function expected, got: %s' % f)
vars = tuple(vars) if iterable(vars) else (vars,)
if not vars:
vars = funcs[0].args
else:
vars = tuple(sympify(var) for var in vars)
if not all(isinstance(v, Symbol) for v in vars):
raise TypeError('Variables are not symbols, got %s' % vars)
for f in funcs:
if not vars == f.args:
raise ValueError("Variables %s do not match args: %s" % (vars, f))
order = max([len(d.variables) for d in L.atoms(Derivative)
if d.expr in funcs] + [0])
eqns = []
for f in funcs:
eq = diff(L, f)
for i in range(1, order + 1):
for p in combinations_with_replacement(vars, i):
eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p)
new_eq = Eq(eq, 0)
if isinstance(new_eq, Eq):
eqns.append(new_eq)
return eqns
|
b93e60ae15dbf726a6c18c20a57b08089d0a9aebd5e0588b6da74bda162f5f45 | """
Finite difference weights
=========================
This module implements an algorithm for efficient generation of finite
difference weights for ordinary differentials of functions for
derivatives from 0 (interpolation) up to arbitrary order.
The core algorithm is provided in the finite difference weight generating
function (``finite_diff_weights``), and two convenience functions are provided
for:
- estimating a derivative (or interpolate) directly from a series of points
is also provided (``apply_finite_diff``).
- differentiating by using finite difference approximations
(``differentiate_finite``).
"""
from sympy.core.function import Derivative
from sympy.core.singleton import S
from sympy.core.function import Subs
from sympy.core.traversal import preorder_traversal
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iterable
from sympy.utilities.decorator import deprecated
def finite_diff_weights(order, x_list, x0=S.One):
"""
Calculates the finite difference weights for an arbitrarily spaced
one-dimensional grid (``x_list``) for derivatives at ``x0`` of order
0, 1, ..., up to ``order`` using a recursive formula. Order of accuracy
is at least ``len(x_list) - order``, if ``x_list`` is defined correctly.
Parameters
==========
order: int
Up to what derivative order weights should be calculated.
0 corresponds to interpolation.
x_list: sequence
Sequence of (unique) values for the independent variable.
It is useful (but not necessary) to order ``x_list`` from
nearest to furthest from ``x0``; see examples below.
x0: Number or Symbol
Root or value of the independent variable for which the finite
difference weights should be generated. Default is ``S.One``.
Returns
=======
list
A list of sublists, each corresponding to coefficients for
increasing derivative order, and each containing lists of
coefficients for increasing subsets of x_list.
Examples
========
>>> from sympy import finite_diff_weights, S
>>> res = finite_diff_weights(1, [-S(1)/2, S(1)/2, S(3)/2, S(5)/2], 0)
>>> res
[[[1, 0, 0, 0],
[1/2, 1/2, 0, 0],
[3/8, 3/4, -1/8, 0],
[5/16, 15/16, -5/16, 1/16]],
[[0, 0, 0, 0],
[-1, 1, 0, 0],
[-1, 1, 0, 0],
[-23/24, 7/8, 1/8, -1/24]]]
>>> res[0][-1] # FD weights for 0th derivative, using full x_list
[5/16, 15/16, -5/16, 1/16]
>>> res[1][-1] # FD weights for 1st derivative
[-23/24, 7/8, 1/8, -1/24]
>>> res[1][-2] # FD weights for 1st derivative, using x_list[:-1]
[-1, 1, 0, 0]
>>> res[1][-1][0] # FD weight for 1st deriv. for x_list[0]
-23/24
>>> res[1][-1][1] # FD weight for 1st deriv. for x_list[1], etc.
7/8
Each sublist contains the most accurate formula at the end.
Note, that in the above example ``res[1][1]`` is the same as ``res[1][2]``.
Since res[1][2] has an order of accuracy of
``len(x_list[:3]) - order = 3 - 1 = 2``, the same is true for ``res[1][1]``!
>>> res = finite_diff_weights(1, [S(0), S(1), -S(1), S(2), -S(2)], 0)[1]
>>> res
[[0, 0, 0, 0, 0],
[-1, 1, 0, 0, 0],
[0, 1/2, -1/2, 0, 0],
[-1/2, 1, -1/3, -1/6, 0],
[0, 2/3, -2/3, -1/12, 1/12]]
>>> res[0] # no approximation possible, using x_list[0] only
[0, 0, 0, 0, 0]
>>> res[1] # classic forward step approximation
[-1, 1, 0, 0, 0]
>>> res[2] # classic centered approximation
[0, 1/2, -1/2, 0, 0]
>>> res[3:] # higher order approximations
[[-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]]
Let us compare this to a differently defined ``x_list``. Pay attention to
``foo[i][k]`` corresponding to the gridpoint defined by ``x_list[k]``.
>>> foo = finite_diff_weights(1, [-S(2), -S(1), S(0), S(1), S(2)], 0)[1]
>>> foo
[[0, 0, 0, 0, 0],
[-1, 1, 0, 0, 0],
[1/2, -2, 3/2, 0, 0],
[1/6, -1, 1/2, 1/3, 0],
[1/12, -2/3, 0, 2/3, -1/12]]
>>> foo[1] # not the same and of lower accuracy as res[1]!
[-1, 1, 0, 0, 0]
>>> foo[2] # classic double backward step approximation
[1/2, -2, 3/2, 0, 0]
>>> foo[4] # the same as res[4]
[1/12, -2/3, 0, 2/3, -1/12]
Note that, unless you plan on using approximations based on subsets of
``x_list``, the order of gridpoints does not matter.
The capability to generate weights at arbitrary points can be
used e.g. to minimize Runge's phenomenon by using Chebyshev nodes:
>>> from sympy import cos, symbols, pi, simplify
>>> N, (h, x) = 4, symbols('h x')
>>> x_list = [x+h*cos(i*pi/(N)) for i in range(N,-1,-1)] # chebyshev nodes
>>> print(x_list)
[-h + x, -sqrt(2)*h/2 + x, x, sqrt(2)*h/2 + x, h + x]
>>> mycoeffs = finite_diff_weights(1, x_list, 0)[1][4]
>>> [simplify(c) for c in mycoeffs] #doctest: +NORMALIZE_WHITESPACE
[(h**3/2 + h**2*x - 3*h*x**2 - 4*x**3)/h**4,
(-sqrt(2)*h**3 - 4*h**2*x + 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
(6*h**2*x - 8*x**3)/h**4,
(sqrt(2)*h**3 - 4*h**2*x - 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
(-h**3/2 + h**2*x + 3*h*x**2 - 4*x**3)/h**4]
Notes
=====
If weights for a finite difference approximation of 3rd order
derivative is wanted, weights for 0th, 1st and 2nd order are
calculated "for free", so are formulae using subsets of ``x_list``.
This is something one can take advantage of to save computational cost.
Be aware that one should define ``x_list`` from nearest to furthest from
``x0``. If not, subsets of ``x_list`` will yield poorer approximations,
which might not grand an order of accuracy of ``len(x_list) - order``.
See also
========
sympy.calculus.finite_diff.apply_finite_diff
References
==========
.. [1] Generation of Finite Difference Formulas on Arbitrarily Spaced
Grids, Bengt Fornberg; Mathematics of computation; 51; 184;
(1988); 699-706; doi:10.1090/S0025-5718-1988-0935077-0
"""
# The notation below closely corresponds to the one used in the paper.
order = S(order)
if not order.is_number:
raise ValueError("Cannot handle symbolic order.")
if order < 0:
raise ValueError("Negative derivative order illegal.")
if int(order) != order:
raise ValueError("Non-integer order illegal")
M = order
N = len(x_list) - 1
delta = [[[0 for nu in range(N+1)] for n in range(N+1)] for
m in range(M+1)]
delta[0][0][0] = S.One
c1 = S.One
for n in range(1, N+1):
c2 = S.One
for nu in range(0, n):
c3 = x_list[n]-x_list[nu]
c2 = c2 * c3
if n <= M:
delta[n][n-1][nu] = 0
for m in range(0, min(n, M)+1):
delta[m][n][nu] = (x_list[n]-x0)*delta[m][n-1][nu] -\
m*delta[m-1][n-1][nu]
delta[m][n][nu] /= c3
for m in range(0, min(n, M)+1):
delta[m][n][n] = c1/c2*(m*delta[m-1][n-1][n-1] -
(x_list[n-1]-x0)*delta[m][n-1][n-1])
c1 = c2
return delta
def apply_finite_diff(order, x_list, y_list, x0=S.Zero):
"""
Calculates the finite difference approximation of
the derivative of requested order at ``x0`` from points
provided in ``x_list`` and ``y_list``.
Parameters
==========
order: int
order of derivative to approximate. 0 corresponds to interpolation.
x_list: sequence
Sequence of (unique) values for the independent variable.
y_list: sequence
The function value at corresponding values for the independent
variable in x_list.
x0: Number or Symbol
At what value of the independent variable the derivative should be
evaluated. Defaults to 0.
Returns
=======
sympy.core.add.Add or sympy.core.numbers.Number
The finite difference expression approximating the requested
derivative order at ``x0``.
Examples
========
>>> from sympy import apply_finite_diff
>>> cube = lambda arg: (1.0*arg)**3
>>> xlist = range(-3,3+1)
>>> apply_finite_diff(2, xlist, map(cube, xlist), 2) - 12 # doctest: +SKIP
-3.55271367880050e-15
we see that the example above only contain rounding errors.
apply_finite_diff can also be used on more abstract objects:
>>> from sympy import IndexedBase, Idx
>>> x, y = map(IndexedBase, 'xy')
>>> i = Idx('i')
>>> x_list, y_list = zip(*[(x[i+j], y[i+j]) for j in range(-1,2)])
>>> apply_finite_diff(1, x_list, y_list, x[i])
((x[i + 1] - x[i])/(-x[i - 1] + x[i]) - 1)*y[i]/(x[i + 1] - x[i]) -
(x[i + 1] - x[i])*y[i - 1]/((x[i + 1] - x[i - 1])*(-x[i - 1] + x[i])) +
(-x[i - 1] + x[i])*y[i + 1]/((x[i + 1] - x[i - 1])*(x[i + 1] - x[i]))
Notes
=====
Order = 0 corresponds to interpolation.
Only supply so many points you think makes sense
to around x0 when extracting the derivative (the function
need to be well behaved within that region). Also beware
of Runge's phenomenon.
See also
========
sympy.calculus.finite_diff.finite_diff_weights
References
==========
Fortran 90 implementation with Python interface for numerics: finitediff_
.. _finitediff: https://github.com/bjodah/finitediff
"""
# In the original paper the following holds for the notation:
# M = order
# N = len(x_list) - 1
N = len(x_list) - 1
if len(x_list) != len(y_list):
raise ValueError("x_list and y_list not equal in length.")
delta = finite_diff_weights(order, x_list, x0)
derivative = 0
for nu in range(0, len(x_list)):
derivative += delta[order][N][nu]*y_list[nu]
return derivative
def _as_finite_diff(derivative, points=1, x0=None, wrt=None):
"""
Returns an approximation of a derivative of a function in
the form of a finite difference formula. The expression is a
weighted sum of the function at a number of discrete values of
(one of) the independent variable(s).
Parameters
==========
derivative: a Derivative instance
points: sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. default: 1 (step-size 1)
x0: number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt: Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the Derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol, as_finite_diff
>>> from sympy.utilities.exceptions import SymPyDeprecationWarning
>>> import warnings
>>> warnings.simplefilter("ignore", SymPyDeprecationWarning)
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> as_finite_diff(f(x).diff(x))
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and ``order + 1``
respectively. We can change the step size by passing a symbol
as a parameter:
>>> as_finite_diff(f(x).diff(x), h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a sequence:
>>> as_finite_diff(f(x).diff(x), [x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2)
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/((-h + E*h)*(h + E*h)) +
(-(-sqrt(2)*h + h)/(2*h) - (-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) +
(-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> as_finite_diff(d2fdxdy, wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.finite_diff_weights
"""
if derivative.is_Derivative:
pass
elif derivative.is_Atom:
return derivative
else:
return derivative.fromiter(
[_as_finite_diff(ar, points, x0, wrt) for ar
in derivative.args], **derivative.assumptions0)
if wrt is None:
old = None
for v in derivative.variables:
if old is v:
continue
derivative = _as_finite_diff(derivative, points, x0, v)
old = v
return derivative
order = derivative.variables.count(wrt)
if x0 is None:
x0 = wrt
if not iterable(points):
if getattr(points, 'is_Function', False) and wrt in points.args:
points = points.subs(wrt, x0)
# points is simply the step-size, let's make it a
# equidistant sequence centered around x0
if order % 2 == 0:
# even order => odd number of points, grid point included
points = [x0 + points*i for i
in range(-order//2, order//2 + 1)]
else:
# odd order => even number of points, half-way wrt grid point
points = [x0 + points*S(i)/2 for i
in range(-order, order + 1, 2)]
others = [wrt, 0]
for v in set(derivative.variables):
if v == wrt:
continue
others += [v, derivative.variables.count(v)]
if len(points) < order+1:
raise ValueError("Too few points for order %d" % order)
return apply_finite_diff(order, points, [
Derivative(derivative.expr.subs({wrt: x}), *others) for
x in points], x0)
as_finite_diff = deprecated(
useinstead="Derivative.as_finite_difference",
deprecated_since_version="1.1", issue=11410)(_as_finite_diff)
as_finite_diff.__doc__ = """
Deprecated function. Use Diff.as_finite_difference instead.
"""
def differentiate_finite(expr, *symbols,
points=1, x0=None, wrt=None, evaluate=False):
r""" Differentiate expr and replace Derivatives with finite differences.
Parameters
==========
expr : expression
\*symbols : differentiate with respect to symbols
points: sequence, coefficient or undefined function, optional
see ``Derivative.as_finite_difference``
x0: number or Symbol, optional
see ``Derivative.as_finite_difference``
wrt: Symbol, optional
see ``Derivative.as_finite_difference``
Examples
========
>>> from sympy import sin, Function, differentiate_finite
>>> from sympy.abc import x, y, h
>>> f, g = Function('f'), Function('g')
>>> differentiate_finite(f(x)*g(x), x, points=[x-h, x+h])
-f(-h + x)*g(-h + x)/(2*h) + f(h + x)*g(h + x)/(2*h)
``differentiate_finite`` works on any expression, including the expressions
with embedded derivatives:
>>> differentiate_finite(f(x) + sin(x), x, 2)
-2*f(x) + f(x - 1) + f(x + 1) - 2*sin(x) + sin(x - 1) + sin(x + 1)
>>> differentiate_finite(f(x, y), x, y)
f(x - 1/2, y - 1/2) - f(x - 1/2, y + 1/2) - f(x + 1/2, y - 1/2) + f(x + 1/2, y + 1/2)
>>> differentiate_finite(f(x)*g(x).diff(x), x)
(-g(x) + g(x + 1))*f(x + 1/2) - (g(x) - g(x - 1))*f(x - 1/2)
To make finite difference with non-constant discretization step use
undefined functions:
>>> dx = Function('dx')
>>> differentiate_finite(f(x)*g(x).diff(x), points=dx(x))
-(-g(x - dx(x)/2 - dx(x - dx(x)/2)/2)/dx(x - dx(x)/2) +
g(x - dx(x)/2 + dx(x - dx(x)/2)/2)/dx(x - dx(x)/2))*f(x - dx(x)/2)/dx(x) +
(-g(x + dx(x)/2 - dx(x + dx(x)/2)/2)/dx(x + dx(x)/2) +
g(x + dx(x)/2 + dx(x + dx(x)/2)/2)/dx(x + dx(x)/2))*f(x + dx(x)/2)/dx(x)
"""
if any(term.is_Derivative for term in list(preorder_traversal(expr))):
evaluate = False
Dexpr = expr.diff(*symbols, evaluate=evaluate)
if evaluate:
SymPyDeprecationWarning(feature="``evaluate`` flag",
issue=17881,
deprecated_since_version="1.5").warn()
return Dexpr.replace(
lambda arg: arg.is_Derivative,
lambda arg: arg.as_finite_difference(points=points, x0=x0, wrt=wrt))
else:
DFexpr = Dexpr.as_finite_difference(points=points, x0=x0, wrt=wrt)
return DFexpr.replace(
lambda arg: isinstance(arg, Subs),
lambda arg: arg.expr.as_finite_difference(
points=points, x0=arg.point[0], wrt=arg.variables[0]))
|
3b0889cee9315aaddbea226986fcd104c2399e23f1018c5a5004208a7ad56d5a | from sympy.functions.elementary.piecewise import Piecewise
from sympy.polys.polytools import lcm_list
from sympy.series.limits import limit
from sympy.series.order import Order
from sympy.core import Add, Mul, Pow, S
from sympy.core.basic import Basic
from sympy.core.expr import AtomicExpr, Expr
from sympy.core.function import diff, expand_mul
from sympy.core.kind import NumberKind
from sympy.core.mod import Mod
from sympy.core.numbers import _sympifyit, oo, zoo
from sympy.core.relational import is_le, is_lt, is_ge, is_gt, Relational
from sympy.core.symbol import Symbol, Dummy
from sympy.core.sympify import _sympify
from sympy.functions.elementary.complexes import Abs, im, re
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.trigonometric import (
TrigonometricFunction, sin, cos, csc, sec)
from sympy.logic.boolalg import And
from sympy.polys.polytools import degree
from sympy.sets.sets import (Interval, Intersection, FiniteSet, Union,
Complement)
from sympy.sets.fancysets import ImageSet
from sympy.simplify.simplify import simplify
from sympy.solvers.decompogen import compogen, decompogen
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.utilities import filldedent
from sympy.utilities.iterables import iterable
from sympy.multipledispatch import dispatch
def continuous_domain(f, symbol, domain):
"""
Returns the intervals in the given domain for which the function
is continuous.
This method is limited by the ability to determine the various
singularities and discontinuities of the given function.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
symbol : :py:class:`~.Symbol`
The variable for which the intervals are to be determined.
domain : :py:class:`~.Interval`
The domain over which the continuity of the symbol has to be checked.
Examples
========
>>> from sympy import Interval, Symbol, S, tan, log, pi, sqrt
>>> from sympy.calculus.util import continuous_domain
>>> x = Symbol('x')
>>> continuous_domain(1/x, x, S.Reals)
Union(Interval.open(-oo, 0), Interval.open(0, oo))
>>> continuous_domain(tan(x), x, Interval(0, pi))
Union(Interval.Ropen(0, pi/2), Interval.Lopen(pi/2, pi))
>>> continuous_domain(sqrt(x - 2), x, Interval(-5, 5))
Interval(2, 5)
>>> continuous_domain(log(2*x - 1), x, S.Reals)
Interval.open(1/2, oo)
Returns
=======
:py:class:`~.Interval`
Union of all intervals where the function is continuous.
Raises
======
NotImplementedError
If the method to determine continuity of such a function
has not yet been developed.
"""
from sympy.calculus.singularities import singularities
if domain.is_subset(S.Reals):
constrained_interval = domain
for atom in f.atoms(Pow):
den = atom.exp.as_numer_denom()[1]
if den.is_even and den.is_nonzero:
constraint = solve_univariate_inequality(atom.base >= 0,
symbol).as_set()
constrained_interval = Intersection(constraint,
constrained_interval)
for atom in f.atoms(log):
constraint = solve_univariate_inequality(atom.args[0] > 0,
symbol).as_set()
constrained_interval = Intersection(constraint,
constrained_interval)
return constrained_interval - singularities(f, symbol, domain)
def function_range(f, symbol, domain):
"""
Finds the range of a function in a given domain.
This method is limited by the ability to determine the singularities and
determine limits.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
symbol : :py:class:`~.Symbol`
The variable for which the range of function is to be determined.
domain : :py:class:`~.Interval`
The domain under which the range of the function has to be found.
Examples
========
>>> from sympy import Interval, Symbol, S, exp, log, pi, sqrt, sin, tan
>>> from sympy.calculus.util import function_range
>>> x = Symbol('x')
>>> function_range(sin(x), x, Interval(0, 2*pi))
Interval(-1, 1)
>>> function_range(tan(x), x, Interval(-pi/2, pi/2))
Interval(-oo, oo)
>>> function_range(1/x, x, S.Reals)
Union(Interval.open(-oo, 0), Interval.open(0, oo))
>>> function_range(exp(x), x, S.Reals)
Interval.open(0, oo)
>>> function_range(log(x), x, S.Reals)
Interval(-oo, oo)
>>> function_range(sqrt(x), x, Interval(-5, 9))
Interval(0, 3)
Returns
=======
:py:class:`~.Interval`
Union of all ranges for all intervals under domain where function is
continuous.
Raises
======
NotImplementedError
If any of the intervals, in the given domain, for which function
is continuous are not finite or real,
OR if the critical points of the function on the domain cannot be found.
"""
from sympy.solvers.solveset import solveset
if domain is S.EmptySet:
return S.EmptySet
period = periodicity(f, symbol)
if period == S.Zero:
# the expression is constant wrt symbol
return FiniteSet(f.expand())
if period is not None:
if isinstance(domain, Interval):
if (domain.inf - domain.sup).is_infinite:
domain = Interval(0, period)
elif isinstance(domain, Union):
for sub_dom in domain.args:
if isinstance(sub_dom, Interval) and \
((sub_dom.inf - sub_dom.sup).is_infinite):
domain = Interval(0, period)
intervals = continuous_domain(f, symbol, domain)
range_int = S.EmptySet
if isinstance(intervals,(Interval, FiniteSet)):
interval_iter = (intervals,)
elif isinstance(intervals, Union):
interval_iter = intervals.args
else:
raise NotImplementedError(filldedent('''
Unable to find range for the given domain.
'''))
for interval in interval_iter:
if isinstance(interval, FiniteSet):
for singleton in interval:
if singleton in domain:
range_int += FiniteSet(f.subs(symbol, singleton))
elif isinstance(interval, Interval):
vals = S.EmptySet
critical_points = S.EmptySet
critical_values = S.EmptySet
bounds = ((interval.left_open, interval.inf, '+'),
(interval.right_open, interval.sup, '-'))
for is_open, limit_point, direction in bounds:
if is_open:
critical_values += FiniteSet(limit(f, symbol, limit_point, direction))
vals += critical_values
else:
vals += FiniteSet(f.subs(symbol, limit_point))
solution = solveset(f.diff(symbol), symbol, interval)
if not iterable(solution):
raise NotImplementedError(
'Unable to find critical points for {}'.format(f))
if isinstance(solution, ImageSet):
raise NotImplementedError(
'Infinite number of critical points for {}'.format(f))
critical_points += solution
for critical_point in critical_points:
vals += FiniteSet(f.subs(symbol, critical_point))
left_open, right_open = False, False
if critical_values is not S.EmptySet:
if critical_values.inf == vals.inf:
left_open = True
if critical_values.sup == vals.sup:
right_open = True
range_int += Interval(vals.inf, vals.sup, left_open, right_open)
else:
raise NotImplementedError(filldedent('''
Unable to find range for the given domain.
'''))
return range_int
def not_empty_in(finset_intersection, *syms):
"""
Finds the domain of the functions in ``finset_intersection`` in which the
``finite_set`` is not-empty
Parameters
==========
finset_intersection : Intersection of FiniteSet
The unevaluated intersection of FiniteSet containing
real-valued functions with Union of Sets
syms : Tuple of symbols
Symbol for which domain is to be found
Raises
======
NotImplementedError
The algorithms to find the non-emptiness of the given FiniteSet are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report it to the github issue tracker
(https://github.com/sympy/sympy/issues).
Examples
========
>>> from sympy import FiniteSet, Interval, not_empty_in, oo
>>> from sympy.abc import x
>>> not_empty_in(FiniteSet(x/2).intersect(Interval(0, 1)), x)
Interval(0, 2)
>>> not_empty_in(FiniteSet(x, x**2).intersect(Interval(1, 2)), x)
Union(Interval(1, 2), Interval(-sqrt(2), -1))
>>> not_empty_in(FiniteSet(x**2/(x + 2)).intersect(Interval(1, oo)), x)
Union(Interval.Lopen(-2, -1), Interval(2, oo))
"""
# TODO: handle piecewise defined functions
# TODO: handle transcendental functions
# TODO: handle multivariate functions
if len(syms) == 0:
raise ValueError("One or more symbols must be given in syms.")
if finset_intersection is S.EmptySet:
return S.EmptySet
if isinstance(finset_intersection, Union):
elm_in_sets = finset_intersection.args[0]
return Union(not_empty_in(finset_intersection.args[1], *syms),
elm_in_sets)
if isinstance(finset_intersection, FiniteSet):
finite_set = finset_intersection
_sets = S.Reals
else:
finite_set = finset_intersection.args[1]
_sets = finset_intersection.args[0]
if not isinstance(finite_set, FiniteSet):
raise ValueError('A FiniteSet must be given, not %s: %s' %
(type(finite_set), finite_set))
if len(syms) == 1:
symb = syms[0]
else:
raise NotImplementedError('more than one variables %s not handled' %
(syms,))
def elm_domain(expr, intrvl):
""" Finds the domain of an expression in any given interval """
from sympy.solvers.solveset import solveset
_start = intrvl.start
_end = intrvl.end
_singularities = solveset(expr.as_numer_denom()[1], symb,
domain=S.Reals)
if intrvl.right_open:
if _end is S.Infinity:
_domain1 = S.Reals
else:
_domain1 = solveset(expr < _end, symb, domain=S.Reals)
else:
_domain1 = solveset(expr <= _end, symb, domain=S.Reals)
if intrvl.left_open:
if _start is S.NegativeInfinity:
_domain2 = S.Reals
else:
_domain2 = solveset(expr > _start, symb, domain=S.Reals)
else:
_domain2 = solveset(expr >= _start, symb, domain=S.Reals)
# domain in the interval
expr_with_sing = Intersection(_domain1, _domain2)
expr_domain = Complement(expr_with_sing, _singularities)
return expr_domain
if isinstance(_sets, Interval):
return Union(*[elm_domain(element, _sets) for element in finite_set])
if isinstance(_sets, Union):
_domain = S.EmptySet
for intrvl in _sets.args:
_domain_element = Union(*[elm_domain(element, intrvl)
for element in finite_set])
_domain = Union(_domain, _domain_element)
return _domain
def periodicity(f, symbol, check=False):
"""
Tests the given function for periodicity in the given symbol.
Parameters
==========
f : :py:class:`~.Expr`.
The concerned function.
symbol : :py:class:`~.Symbol`
The variable for which the period is to be determined.
check : bool, optional
The flag to verify whether the value being returned is a period or not.
Returns
=======
period
The period of the function is returned.
``None`` is returned when the function is aperiodic or has a complex period.
The value of $0$ is returned as the period of a constant function.
Raises
======
NotImplementedError
The value of the period computed cannot be verified.
Notes
=====
Currently, we do not support functions with a complex period.
The period of functions having complex periodic values such
as ``exp``, ``sinh`` is evaluated to ``None``.
The value returned might not be the "fundamental" period of the given
function i.e. it may not be the smallest periodic value of the function.
The verification of the period through the ``check`` flag is not reliable
due to internal simplification of the given expression. Hence, it is set
to ``False`` by default.
Examples
========
>>> from sympy import periodicity, Symbol, sin, cos, tan, exp
>>> x = Symbol('x')
>>> f = sin(x) + sin(2*x) + sin(3*x)
>>> periodicity(f, x)
2*pi
>>> periodicity(sin(x)*cos(x), x)
pi
>>> periodicity(exp(tan(2*x) - 1), x)
pi/2
>>> periodicity(sin(4*x)**cos(2*x), x)
pi
>>> periodicity(exp(x), x)
"""
if symbol.kind is not NumberKind:
raise NotImplementedError("Cannot use symbol of kind %s" % symbol.kind)
temp = Dummy('x', real=True)
f = f.subs(symbol, temp)
symbol = temp
def _check(orig_f, period):
'''Return the checked period or raise an error.'''
new_f = orig_f.subs(symbol, symbol + period)
if new_f.equals(orig_f):
return period
else:
raise NotImplementedError(filldedent('''
The period of the given function cannot be verified.
When `%s` was replaced with `%s + %s` in `%s`, the result
was `%s` which was not recognized as being the same as
the original function.
So either the period was wrong or the two forms were
not recognized as being equal.
Set check=False to obtain the value.''' %
(symbol, symbol, period, orig_f, new_f)))
orig_f = f
period = None
if isinstance(f, Relational):
f = f.lhs - f.rhs
f = simplify(f)
if symbol not in f.free_symbols:
return S.Zero
if isinstance(f, TrigonometricFunction):
try:
period = f.period(symbol)
except NotImplementedError:
pass
if isinstance(f, Abs):
arg = f.args[0]
if isinstance(arg, (sec, csc, cos)):
# all but tan and cot might have a
# a period that is half as large
# so recast as sin
arg = sin(arg.args[0])
period = periodicity(arg, symbol)
if period is not None and isinstance(arg, sin):
# the argument of Abs was a trigonometric other than
# cot or tan; test to see if the half-period
# is valid. Abs(arg) has behaviour equivalent to
# orig_f, so use that for test:
orig_f = Abs(arg)
try:
return _check(orig_f, period/2)
except NotImplementedError as err:
if check:
raise NotImplementedError(err)
# else let new orig_f and period be
# checked below
if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
f = Pow(S.Exp1, expand_mul(f.exp))
if im(f) != 0:
period_real = periodicity(re(f), symbol)
period_imag = periodicity(im(f), symbol)
if period_real is not None and period_imag is not None:
period = lcim([period_real, period_imag])
if f.is_Pow and f.base != S.Exp1:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if base_has_sym and not expo_has_sym:
period = periodicity(base, symbol)
elif expo_has_sym and not base_has_sym:
period = periodicity(expo, symbol)
else:
period = _periodicity(f.args, symbol)
elif f.is_Mul:
coeff, g = f.as_independent(symbol, as_Add=False)
if isinstance(g, TrigonometricFunction) or coeff is not S.One:
period = periodicity(g, symbol)
else:
period = _periodicity(g.args, symbol)
elif f.is_Add:
k, g = f.as_independent(symbol)
if k is not S.Zero:
return periodicity(g, symbol)
period = _periodicity(g.args, symbol)
elif isinstance(f, Mod):
a, n = f.args
if a == symbol:
period = n
elif isinstance(a, TrigonometricFunction):
period = periodicity(a, symbol)
#check if 'f' is linear in 'symbol'
elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and
symbol not in n.free_symbols):
period = Abs(n / a.diff(symbol))
elif isinstance(f, Piecewise):
pass # not handling Piecewise yet as the return type is not favorable
elif period is None:
g_s = decompogen(f, symbol)
num_of_gs = len(g_s)
if num_of_gs > 1:
for index, g in enumerate(reversed(g_s)):
start_index = num_of_gs - 1 - index
g = compogen(g_s[start_index:], symbol)
if g not in (orig_f, f): # Fix for issue 12620
period = periodicity(g, symbol)
if period is not None:
break
if period is not None:
if check:
return _check(orig_f, period)
return period
return None
def _periodicity(args, symbol):
"""
Helper for `periodicity` to find the period of a list of simpler
functions.
It uses the `lcim` method to find the least common period of
all the functions.
Parameters
==========
args : Tuple of :py:class:`~.Symbol`
All the symbols present in a function.
symbol : :py:class:`~.Symbol`
The symbol over which the function is to be evaluated.
Returns
=======
period
The least common period of the function for all the symbols
of the function.
``None`` if for at least one of the symbols the function is aperiodic.
"""
periods = []
for f in args:
period = periodicity(f, symbol)
if period is None:
return None
if period is not S.Zero:
periods.append(period)
if len(periods) > 1:
return lcim(periods)
if periods:
return periods[0]
def lcim(numbers):
"""Returns the least common integral multiple of a list of numbers.
The numbers can be rational or irrational or a mixture of both.
`None` is returned for incommensurable numbers.
Parameters
==========
numbers : list
Numbers (rational and/or irrational) for which lcim is to be found.
Returns
=======
number
lcim if it exists, otherwise ``None`` for incommensurable numbers.
Examples
========
>>> from sympy.calculus.util import lcim
>>> from sympy import S, pi
>>> lcim([S(1)/2, S(3)/4, S(5)/6])
15/2
>>> lcim([2*pi, 3*pi, pi, pi/2])
6*pi
>>> lcim([S(1), 2*pi])
"""
result = None
if all(num.is_irrational for num in numbers):
factorized_nums = list(map(lambda num: num.factor(), numbers))
factors_num = list(
map(lambda num: num.as_coeff_Mul(),
factorized_nums))
term = factors_num[0][1]
if all(factor == term for coeff, factor in factors_num):
common_term = term
coeffs = [coeff for coeff, factor in factors_num]
result = lcm_list(coeffs) * common_term
elif all(num.is_rational for num in numbers):
result = lcm_list(numbers)
else:
pass
return result
def is_convex(f, *syms, domain=S.Reals):
r"""Determines the convexity of the function passed in the argument.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
syms : Tuple of :py:class:`~.Symbol`
The variables with respect to which the convexity is to be determined.
domain : :py:class:`~.Interval`, optional
The domain over which the convexity of the function has to be checked.
If unspecified, S.Reals will be the default domain.
Returns
=======
bool
The method returns ``True`` if the function is convex otherwise it
returns ``False``.
Raises
======
NotImplementedError
The check for the convexity of multivariate functions is not implemented yet.
Notes
=====
To determine concavity of a function pass `-f` as the concerned function.
To determine logarithmic convexity of a function pass `\log(f)` as
concerned function.
To determine logartihmic concavity of a function pass `-\log(f)` as
concerned function.
Currently, convexity check of multivariate functions is not handled.
Examples
========
>>> from sympy import is_convex, symbols, exp, oo, Interval
>>> x = symbols('x')
>>> is_convex(exp(x), x)
True
>>> is_convex(x**3, x, domain = Interval(-1, oo))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Convex_function
.. [2] http://www.ifp.illinois.edu/~angelia/L3_convfunc.pdf
.. [3] https://en.wikipedia.org/wiki/Logarithmically_convex_function
.. [4] https://en.wikipedia.org/wiki/Logarithmically_concave_function
.. [5] https://en.wikipedia.org/wiki/Concave_function
"""
if len(syms) > 1:
raise NotImplementedError(
"The check for the convexity of multivariate functions is not implemented yet.")
f = _sympify(f)
var = syms[0]
condition = f.diff(var, 2) < 0
if solve_univariate_inequality(condition, var, False, domain):
return False
return True
def stationary_points(f, symbol, domain=S.Reals):
"""
Returns the stationary points of a function (where derivative of the
function is 0) in the given domain.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
symbol : :py:class:`~.Symbol`
The variable for which the stationary points are to be determined.
domain : :py:class:`~.Interval`
The domain over which the stationary points have to be checked.
If unspecified, ``S.Reals`` will be the default domain.
Returns
=======
Set
A set of stationary points for the function. If there are no
stationary point, an :py:class:`~.EmptySet` is returned.
Examples
========
>>> from sympy import Interval, Symbol, S, sin, pi, pprint, stationary_points
>>> x = Symbol('x')
>>> stationary_points(1/x, x, S.Reals)
EmptySet
>>> pprint(stationary_points(sin(x), x), use_unicode=False)
pi 3*pi
{2*n*pi + -- | n in Integers} U {2*n*pi + ---- | n in Integers}
2 2
>>> stationary_points(sin(x),x, Interval(0, 4*pi))
{pi/2, 3*pi/2, 5*pi/2, 7*pi/2}
"""
from sympy.solvers.solveset import solveset
if domain is S.EmptySet:
return S.EmptySet
domain = continuous_domain(f, symbol, domain)
set = solveset(diff(f, symbol), symbol, domain)
return set
def maximum(f, symbol, domain=S.Reals):
"""
Returns the maximum value of a function in the given domain.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
symbol : :py:class:`~.Symbol`
The variable for maximum value needs to be determined.
domain : :py:class:`~.Interval`
The domain over which the maximum have to be checked.
If unspecified, then the global maximum is returned.
Returns
=======
number
Maximum value of the function in given domain.
Examples
========
>>> from sympy import Interval, Symbol, S, sin, cos, pi, maximum
>>> x = Symbol('x')
>>> f = -x**2 + 2*x + 5
>>> maximum(f, x, S.Reals)
6
>>> maximum(sin(x), x, Interval(-pi, pi/4))
sqrt(2)/2
>>> maximum(sin(x)*cos(x), x)
1/2
"""
if isinstance(symbol, Symbol):
if domain is S.EmptySet:
raise ValueError("Maximum value not defined for empty domain.")
return function_range(f, symbol, domain).sup
else:
raise ValueError("%s is not a valid symbol." % symbol)
def minimum(f, symbol, domain=S.Reals):
"""
Returns the minimum value of a function in the given domain.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
symbol : :py:class:`~.Symbol`
The variable for minimum value needs to be determined.
domain : :py:class:`~.Interval`
The domain over which the minimum have to be checked.
If unspecified, then the global minimum is returned.
Returns
=======
number
Minimum value of the function in the given domain.
Examples
========
>>> from sympy import Interval, Symbol, S, sin, cos, minimum
>>> x = Symbol('x')
>>> f = x**2 + 2*x + 5
>>> minimum(f, x, S.Reals)
4
>>> minimum(sin(x), x, Interval(2, 3))
sin(3)
>>> minimum(sin(x)*cos(x), x)
-1/2
"""
if isinstance(symbol, Symbol):
if domain is S.EmptySet:
raise ValueError("Minimum value not defined for empty domain.")
return function_range(f, symbol, domain).inf
else:
raise ValueError("%s is not a valid symbol." % symbol)
class AccumulationBounds(AtomicExpr):
r"""
# Note AccumulationBounds has an alias: AccumBounds
AccumulationBounds represent an interval `[a, b]`, which is always closed
at the ends. Here `a` and `b` can be any value from extended real numbers.
The intended meaning of AccummulationBounds is to give an approximate
location of the accumulation points of a real function at a limit point.
Let `a` and `b` be reals such that `a \le b`.
`\left\langle a, b\right\rangle = \{x \in \mathbb{R} \mid a \le x \le b\}`
`\left\langle -\infty, b\right\rangle = \{x \in \mathbb{R} \mid x \le b\} \cup \{-\infty, \infty\}`
`\left\langle a, \infty \right\rangle = \{x \in \mathbb{R} \mid a \le x\} \cup \{-\infty, \infty\}`
`\left\langle -\infty, \infty \right\rangle = \mathbb{R} \cup \{-\infty, \infty\}`
``oo`` and ``-oo`` are added to the second and third definition respectively,
since if either ``-oo`` or ``oo`` is an argument, then the other one should
be included (though not as an end point). This is forced, since we have,
for example, ``1/AccumBounds(0, 1) = AccumBounds(1, oo)``, and the limit at
`0` is not one-sided. As `x` tends to `0-`, then `1/x \rightarrow -\infty`, so `-\infty`
should be interpreted as belonging to ``AccumBounds(1, oo)`` though it need
not appear explicitly.
In many cases it suffices to know that the limit set is bounded.
However, in some other cases more exact information could be useful.
For example, all accumulation values of `\cos(x) + 1` are non-negative.
(``AccumBounds(-1, 1) + 1 = AccumBounds(0, 2)``)
A AccumulationBounds object is defined to be real AccumulationBounds,
if its end points are finite reals.
Let `X`, `Y` be real AccumulationBounds, then their sum, difference,
product are defined to be the following sets:
`X + Y = \{ x+y \mid x \in X \cap y \in Y\}`
`X - Y = \{ x-y \mid x \in X \cap y \in Y\}`
`X \times Y = \{ x \times y \mid x \in X \cap y \in Y\}`
When an AccumBounds is raised to a negative power, if 0 is contained
between the bounds then an infinite range is returned, otherwise if an
endpoint is 0 then a semi-infinite range with consistent sign will be returned.
AccumBounds in expressions behave a lot like Intervals but the
semantics are not necessarily the same. Division (or exponentiation
to a negative integer power) could be handled with *intervals* by
returning a union of the results obtained after splitting the
bounds between negatives and positives, but that is not done with
AccumBounds. In addition, bounds are assumed to be independent of
each other; if the same bound is used in more than one place in an
expression, the result may not be the supremum or infimum of the
expression (see below). Finally, when a boundary is ``1``,
exponentiation to the power of ``oo`` yields ``oo``, neither
``1`` nor ``nan``.
Examples
========
>>> from sympy import AccumBounds, sin, exp, log, pi, E, S, oo
>>> from sympy.abc import x
>>> AccumBounds(0, 1) + AccumBounds(1, 2)
AccumBounds(1, 3)
>>> AccumBounds(0, 1) - AccumBounds(0, 2)
AccumBounds(-2, 1)
>>> AccumBounds(-2, 3)*AccumBounds(-1, 1)
AccumBounds(-3, 3)
>>> AccumBounds(1, 2)*AccumBounds(3, 5)
AccumBounds(3, 10)
The exponentiation of AccumulationBounds is defined
as follows:
If 0 does not belong to `X` or `n > 0` then
`X^n = \{ x^n \mid x \in X\}`
>>> AccumBounds(1, 4)**(S(1)/2)
AccumBounds(1, 2)
otherwise, an infinite or semi-infinite result is obtained:
>>> 1/AccumBounds(-1, 1)
AccumBounds(-oo, oo)
>>> 1/AccumBounds(0, 2)
AccumBounds(1/2, oo)
>>> 1/AccumBounds(-oo, 0)
AccumBounds(-oo, 0)
A boundary of 1 will always generate all nonnegatives:
>>> AccumBounds(1, 2)**oo
AccumBounds(0, oo)
>>> AccumBounds(0, 1)**oo
AccumBounds(0, oo)
If the exponent is itself an AccumulationBounds or is not an
integer then unevaluated results will be returned unless the base
values are positive:
>>> AccumBounds(2, 3)**AccumBounds(-1, 2)
AccumBounds(1/3, 9)
>>> AccumBounds(-2, 3)**AccumBounds(-1, 2)
AccumBounds(-2, 3)**AccumBounds(-1, 2)
>>> AccumBounds(-2, -1)**(S(1)/2)
sqrt(AccumBounds(-2, -1))
Note: `\left\langle a, b\right\rangle^2` is not same as `\left\langle a, b\right\rangle \times \left\langle a, b\right\rangle`
>>> AccumBounds(-1, 1)**2
AccumBounds(0, 1)
>>> AccumBounds(1, 3) < 4
True
>>> AccumBounds(1, 3) < -1
False
Some elementary functions can also take AccumulationBounds as input.
A function `f` evaluated for some real AccumulationBounds `\left\langle a, b \right\rangle`
is defined as `f(\left\langle a, b\right\rangle) = \{ f(x) \mid a \le x \le b \}`
>>> sin(AccumBounds(pi/6, pi/3))
AccumBounds(1/2, sqrt(3)/2)
>>> exp(AccumBounds(0, 1))
AccumBounds(1, E)
>>> log(AccumBounds(1, E))
AccumBounds(0, 1)
Some symbol in an expression can be substituted for a AccumulationBounds
object. But it doesn't necessarily evaluate the AccumulationBounds for
that expression.
The same expression can be evaluated to different values depending upon
the form it is used for substitution since each instance of an
AccumulationBounds is considered independent. For example:
>>> (x**2 + 2*x + 1).subs(x, AccumBounds(-1, 1))
AccumBounds(-1, 4)
>>> ((x + 1)**2).subs(x, AccumBounds(-1, 1))
AccumBounds(0, 4)
References
==========
.. [1] https://en.wikipedia.org/wiki/Interval_arithmetic
.. [2] http://fab.cba.mit.edu/classes/S62.12/docs/Hickey_interval.pdf
Notes
=====
Do not use ``AccumulationBounds`` for floating point interval arithmetic
calculations, use ``mpmath.iv`` instead.
"""
is_extended_real = True
def __new__(cls, min, max):
min = _sympify(min)
max = _sympify(max)
# Only allow real intervals (use symbols with 'is_extended_real=True').
if not min.is_extended_real or not max.is_extended_real:
raise ValueError("Only real AccumulationBounds are supported")
if max == min:
return max
# Make sure that the created AccumBounds object will be valid.
if max.is_number and min.is_number:
bad = max.is_comparable and min.is_comparable and max < min
else:
bad = (max - min).is_extended_negative
if bad:
raise ValueError(
"Lower limit should be smaller than upper limit")
return Basic.__new__(cls, min, max)
# setting the operation priority
_op_priority = 11.0
def _eval_is_real(self):
if self.min.is_real and self.max.is_real:
return True
@property
def min(self):
"""
Returns the minimum possible value attained by AccumulationBounds
object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).min
1
"""
return self.args[0]
@property
def max(self):
"""
Returns the maximum possible value attained by AccumulationBounds
object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).max
3
"""
return self.args[1]
@property
def delta(self):
"""
Returns the difference of maximum possible value attained by
AccumulationBounds object and minimum possible value attained
by AccumulationBounds object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).delta
2
"""
return self.max - self.min
@property
def mid(self):
"""
Returns the mean of maximum possible value attained by
AccumulationBounds object and minimum possible value
attained by AccumulationBounds object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).mid
2
"""
return (self.min + self.max) / 2
@_sympifyit('other', NotImplemented)
def _eval_power(self, other):
return self.__pow__(other)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
return AccumBounds(
Add(self.min, other.min),
Add(self.max, other.max))
if other is S.Infinity and self.min is S.NegativeInfinity or \
other is S.NegativeInfinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif other.is_extended_real:
if self.min is S.NegativeInfinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif self.min is S.NegativeInfinity:
return AccumBounds(-oo, self.max + other)
elif self.max is S.Infinity:
return AccumBounds(self.min + other, oo)
else:
return AccumBounds(Add(self.min, other), Add(self.max, other))
return Add(self, other, evaluate=False)
return NotImplemented
__radd__ = __add__
def __neg__(self):
return AccumBounds(-self.max, -self.min)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
return AccumBounds(
Add(self.min, -other.max),
Add(self.max, -other.min))
if other is S.NegativeInfinity and self.min is S.NegativeInfinity or \
other is S.Infinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif other.is_extended_real:
if self.min is S.NegativeInfinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif self.min is S.NegativeInfinity:
return AccumBounds(-oo, self.max - other)
elif self.max is S.Infinity:
return AccumBounds(self.min - other, oo)
else:
return AccumBounds(
Add(self.min, -other),
Add(self.max, -other))
return Add(self, -other, evaluate=False)
return NotImplemented
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
return self.__neg__() + other
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if self.args == (-oo, oo):
return self
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
if other.args == (-oo, oo):
return other
v = set()
for a in self.args:
vi = other*a
for i in vi.args or (vi,):
v.add(i)
return AccumBounds(Min(*v), Max(*v))
if other is S.Infinity:
if self.min.is_zero:
return AccumBounds(0, oo)
if self.max.is_zero:
return AccumBounds(-oo, 0)
if other is S.NegativeInfinity:
if self.min.is_zero:
return AccumBounds(-oo, 0)
if self.max.is_zero:
return AccumBounds(0, oo)
if other.is_extended_real:
if other.is_zero:
if self.max is S.Infinity:
return AccumBounds(0, oo)
if self.min is S.NegativeInfinity:
return AccumBounds(-oo, 0)
return S.Zero
if other.is_extended_positive:
return AccumBounds(
Mul(self.min, other),
Mul(self.max, other))
elif other.is_extended_negative:
return AccumBounds(
Mul(self.max, other),
Mul(self.min, other))
if isinstance(other, Order):
return other
return Mul(self, other, evaluate=False)
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
if other.min.is_positive or other.max.is_negative:
return self * AccumBounds(1/other.max, 1/other.min)
if (self.min.is_extended_nonpositive and self.max.is_extended_nonnegative and
other.min.is_extended_nonpositive and other.max.is_extended_nonnegative):
if self.min.is_zero and other.min.is_zero:
return AccumBounds(0, oo)
if self.max.is_zero and other.min.is_zero:
return AccumBounds(-oo, 0)
return AccumBounds(-oo, oo)
if self.max.is_extended_negative:
if other.min.is_extended_negative:
if other.max.is_zero:
return AccumBounds(self.max / other.min, oo)
if other.max.is_extended_positive:
# if we were dealing with intervals we would return
# Union(Interval(-oo, self.max/other.max),
# Interval(self.max/other.min, oo))
return AccumBounds(-oo, oo)
if other.min.is_zero and other.max.is_extended_positive:
return AccumBounds(-oo, self.max / other.max)
if self.min.is_extended_positive:
if other.min.is_extended_negative:
if other.max.is_zero:
return AccumBounds(-oo, self.min / other.min)
if other.max.is_extended_positive:
# if we were dealing with intervals we would return
# Union(Interval(-oo, self.min/other.min),
# Interval(self.min/other.max, oo))
return AccumBounds(-oo, oo)
if other.min.is_zero and other.max.is_extended_positive:
return AccumBounds(self.min / other.max, oo)
elif other.is_extended_real:
if other in (S.Infinity, S.NegativeInfinity):
if self == AccumBounds(-oo, oo):
return AccumBounds(-oo, oo)
if self.max is S.Infinity:
return AccumBounds(Min(0, other), Max(0, other))
if self.min is S.NegativeInfinity:
return AccumBounds(Min(0, -other), Max(0, -other))
if other.is_extended_positive:
return AccumBounds(self.min / other, self.max / other)
elif other.is_extended_negative:
return AccumBounds(self.max / other, self.min / other)
if (1 / other) is S.ComplexInfinity:
return Mul(self, 1 / other, evaluate=False)
else:
return Mul(self, 1 / other)
return NotImplemented
@_sympifyit('other', NotImplemented)
def __rtruediv__(self, other):
if isinstance(other, Expr):
if other.is_extended_real:
if other.is_zero:
return S.Zero
if (self.min.is_extended_nonpositive and self.max.is_extended_nonnegative):
if self.min.is_zero:
if other.is_extended_positive:
return AccumBounds(Mul(other, 1 / self.max), oo)
if other.is_extended_negative:
return AccumBounds(-oo, Mul(other, 1 / self.max))
if self.max.is_zero:
if other.is_extended_positive:
return AccumBounds(-oo, Mul(other, 1 / self.min))
if other.is_extended_negative:
return AccumBounds(Mul(other, 1 / self.min), oo)
return AccumBounds(-oo, oo)
else:
return AccumBounds(Min(other / self.min, other / self.max),
Max(other / self.min, other / self.max))
return Mul(other, 1 / self, evaluate=False)
else:
return NotImplemented
@_sympifyit('other', NotImplemented)
def __pow__(self, other):
if isinstance(other, Expr):
if other is S.Infinity:
if self.min.is_extended_nonnegative:
if self.max < 1:
return S.Zero
if self.min > 1:
return S.Infinity
return AccumBounds(0, oo)
elif self.max.is_extended_negative:
if self.min > -1:
return S.Zero
if self.max < -1:
return zoo
return S.NaN
else:
if self.min > -1:
if self.max < 1:
return S.Zero
return AccumBounds(0, oo)
return AccumBounds(-oo, oo)
if other is S.NegativeInfinity:
return (1/self)**oo
# generically true
if (self.max - self.min).is_nonnegative:
# well defined
if self.min.is_nonnegative:
# no 0 to worry about
if other.is_nonnegative:
# no infinity to worry about
return self.func(self.min**other, self.max**other)
if other.is_zero:
return S.One # x**0 = 1
if other.is_Integer or other.is_integer:
if self.min.is_extended_positive:
return AccumBounds(
Min(self.min**other, self.max**other),
Max(self.min**other, self.max**other))
elif self.max.is_extended_negative:
return AccumBounds(
Min(self.max**other, self.min**other),
Max(self.max**other, self.min**other))
if other % 2 == 0:
if other.is_extended_negative:
if self.min.is_zero:
return AccumBounds(self.max**other, oo)
if self.max.is_zero:
return AccumBounds(self.min**other, oo)
return AccumBounds(0, oo)
return AccumBounds(
S.Zero, Max(self.min**other, self.max**other))
elif other % 2 == 1:
if other.is_extended_negative:
if self.min.is_zero:
return AccumBounds(self.max**other, oo)
if self.max.is_zero:
return AccumBounds(-oo, self.min**other)
return AccumBounds(-oo, oo)
return AccumBounds(self.min**other, self.max**other)
# non-integer exponent
# 0**neg or neg**frac yields complex
if (other.is_number or other.is_rational) and (
self.min.is_extended_nonnegative or (
other.is_extended_nonnegative and
self.min.is_extended_nonnegative)):
num, den = other.as_numer_denom()
if num is S.One:
return AccumBounds(*[i**(1/den) for i in self.args])
elif den is not S.One: # e.g. if other is not Float
return (self**num)**(1/den) # ok for non-negative base
if isinstance(other, AccumBounds):
if (self.min.is_extended_positive or
self.min.is_extended_nonnegative and
other.min.is_extended_nonnegative):
p = [self**i for i in other.args]
if not any(i.is_Pow for i in p):
a = [j for i in p for j in i.args or (i,)]
try:
return self.func(min(a), max(a))
except TypeError: # can't sort
pass
return Pow(self, other, evaluate=False)
return NotImplemented
@_sympifyit('other', NotImplemented)
def __rpow__(self, other):
if other.is_real and other.is_extended_nonnegative and (
self.max - self.min).is_extended_positive:
if other is S.One:
return S.One
if other.is_extended_positive:
a, b = [other**i for i in self.args]
if min(a, b) != a:
a, b = b, a
return self.func(a, b)
if other.is_zero:
if self.min.is_zero:
return self.func(0, 1)
if self.min.is_extended_positive:
return S.Zero
return Pow(other, self, evaluate=False)
def __abs__(self):
if self.max.is_extended_negative:
return self.__neg__()
elif self.min.is_extended_negative:
return AccumBounds(S.Zero, Max(abs(self.min), self.max))
else:
return self
def __contains__(self, other):
"""
Returns ``True`` if other is contained in self, where other
belongs to extended real numbers, ``False`` if not contained,
otherwise TypeError is raised.
Examples
========
>>> from sympy import AccumBounds, oo
>>> 1 in AccumBounds(-1, 3)
True
-oo and oo go together as limits (in AccumulationBounds).
>>> -oo in AccumBounds(1, oo)
True
>>> oo in AccumBounds(-oo, 0)
True
"""
other = _sympify(other)
if other in (S.Infinity, S.NegativeInfinity):
if self.min is S.NegativeInfinity or self.max is S.Infinity:
return True
return False
rv = And(self.min <= other, self.max >= other)
if rv not in (True, False):
raise TypeError("input failed to evaluate")
return rv
def intersection(self, other):
"""
Returns the intersection of 'self' and 'other'.
Here other can be an instance of :py:class:`~.FiniteSet` or AccumulationBounds.
Parameters
==========
other: AccumulationBounds
Another AccumulationBounds object with which the intersection
has to be computed.
Returns
=======
AccumulationBounds
Intersection of ``self`` and ``other``.
Examples
========
>>> from sympy import AccumBounds, FiniteSet
>>> AccumBounds(1, 3).intersection(AccumBounds(2, 4))
AccumBounds(2, 3)
>>> AccumBounds(1, 3).intersection(AccumBounds(4, 6))
EmptySet
>>> AccumBounds(1, 4).intersection(FiniteSet(1, 2, 5))
{1, 2}
"""
if not isinstance(other, (AccumBounds, FiniteSet)):
raise TypeError(
"Input must be AccumulationBounds or FiniteSet object")
if isinstance(other, FiniteSet):
fin_set = S.EmptySet
for i in other:
if i in self:
fin_set = fin_set + FiniteSet(i)
return fin_set
if self.max < other.min or self.min > other.max:
return S.EmptySet
if self.min <= other.min:
if self.max <= other.max:
return AccumBounds(other.min, self.max)
if self.max > other.max:
return other
if other.min <= self.min:
if other.max < self.max:
return AccumBounds(self.min, other.max)
if other.max > self.max:
return self
def union(self, other):
# TODO : Devise a better method for Union of AccumBounds
# this method is not actually correct and
# can be made better
if not isinstance(other, AccumBounds):
raise TypeError(
"Input must be AccumulationBounds or FiniteSet object")
if self.min <= other.min and self.max >= other.min:
return AccumBounds(self.min, Max(self.max, other.max))
if other.min <= self.min and other.max >= self.min:
return AccumBounds(other.min, Max(self.max, other.max))
@dispatch(AccumulationBounds, AccumulationBounds) # type: ignore # noqa:F811
def _eval_is_le(lhs, rhs): # noqa:F811
if is_le(lhs.max, rhs.min):
return True
if is_gt(lhs.min, rhs.max):
return False
@dispatch(AccumulationBounds, Basic) # type: ignore # noqa:F811
def _eval_is_le(lhs, rhs): # noqa: F811
"""
Returns ``True `` if range of values attained by ``lhs`` AccumulationBounds
object is greater than the range of values attained by ``rhs``,
where ``rhs`` may be any value of type AccumulationBounds object or
extended real number value, ``False`` if ``rhs`` satisfies
the same property, else an unevaluated :py:class:`~.Relational`.
Examples
========
>>> from sympy import AccumBounds, oo
>>> AccumBounds(1, 3) > AccumBounds(4, oo)
False
>>> AccumBounds(1, 4) > AccumBounds(3, 4)
AccumBounds(1, 4) > AccumBounds(3, 4)
>>> AccumBounds(1, oo) > -1
True
"""
if not rhs.is_extended_real:
raise TypeError(
"Invalid comparison of %s %s" %
(type(rhs), rhs))
elif rhs.is_comparable:
if is_le(lhs.max, rhs):
return True
if is_gt(lhs.min, rhs):
return False
@dispatch(AccumulationBounds, AccumulationBounds)
def _eval_is_ge(lhs, rhs): # noqa:F811
if is_ge(lhs.min, rhs.max):
return True
if is_lt(lhs.max, rhs.min):
return False
@dispatch(AccumulationBounds, Expr) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa: F811
"""
Returns ``True`` if range of values attained by ``lhs`` AccumulationBounds
object is less that the range of values attained by ``rhs``, where
other may be any value of type AccumulationBounds object or extended
real number value, ``False`` if ``rhs`` satisfies the same
property, else an unevaluated :py:class:`~.Relational`.
Examples
========
>>> from sympy import AccumBounds, oo
>>> AccumBounds(1, 3) >= AccumBounds(4, oo)
False
>>> AccumBounds(1, 4) >= AccumBounds(3, 4)
AccumBounds(1, 4) >= AccumBounds(3, 4)
>>> AccumBounds(1, oo) >= 1
True
"""
if not rhs.is_extended_real:
raise TypeError(
"Invalid comparison of %s %s" %
(type(rhs), rhs))
elif rhs.is_comparable:
if is_ge(lhs.min, rhs):
return True
if is_lt(lhs.max, rhs):
return False
@dispatch(Expr, AccumulationBounds) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa:F811
if not lhs.is_extended_real:
raise TypeError(
"Invalid comparison of %s %s" %
(type(lhs), lhs))
elif lhs.is_comparable:
if is_le(rhs.max, lhs):
return True
if is_gt(rhs.min, lhs):
return False
@dispatch(AccumulationBounds, AccumulationBounds) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa:F811
if is_ge(lhs.min, rhs.max):
return True
if is_lt(lhs.max, rhs.min):
return False
# setting an alias for AccumulationBounds
AccumBounds = AccumulationBounds
|
b2f878c4321af271a90de1f84de45be3d806f2688cbd7a42cc46ab6e8af9f041 | from sympy.tensor import Indexed
from sympy.core.containers import Tuple
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.integrals.integrals import Integral
class IndexedIntegral(Integral):
"""
Experimental class to test integration by indexed variables.
Usage is analogue to ``Integral``, it simply adds awareness of
integration over indices.
Contraction of non-identical index symbols referring to the same
``IndexedBase`` is not yet supported.
Examples
========
>>> from sympy.sandbox.indexed_integrals import IndexedIntegral
>>> from sympy import IndexedBase, symbols
>>> A = IndexedBase('A')
>>> i, j = symbols('i j', integer=True)
>>> ii = IndexedIntegral(A[i], A[i])
>>> ii
Integral(_A[i], _A[i])
>>> ii.doit()
A[i]**2/2
If the indices are different, indexed objects are considered to be
different variables:
>>> i2 = IndexedIntegral(A[j], A[i])
>>> i2
Integral(A[j], _A[i])
>>> i2.doit()
A[i]*A[j]
"""
def __new__(cls, function, *limits, **assumptions):
repl, limits = IndexedIntegral._indexed_process_limits(limits)
function = sympify(function)
function = function.xreplace(repl)
obj = Integral.__new__(cls, function, *limits, **assumptions)
obj._indexed_repl = repl
obj._indexed_reverse_repl = {val: key for key, val in repl.items()}
return obj
def doit(self):
res = super().doit()
return res.xreplace(self._indexed_reverse_repl)
@staticmethod
def _indexed_process_limits(limits):
repl = {}
newlimits = []
for i in limits:
if isinstance(i, (tuple, list, Tuple)):
v = i[0]
vrest = i[1:]
else:
v = i
vrest = ()
if isinstance(v, Indexed):
if v not in repl:
r = Dummy(str(v))
repl[v] = r
newlimits.append((r,)+vrest)
else:
newlimits.append(i)
return repl, newlimits
|
06b7ad56e3a3ab6324fce4724b985c3514dd2c67daaecc9903b3b6b05979f392 | from sympy.core.basic import Basic
new = Basic.__new__
def assoc(d, k, v):
d = d.copy()
d[k] = v
return d
basic_fns = {'op': type,
'new': Basic.__new__,
'leaf': lambda x: not isinstance(x, Basic) or x.is_Atom,
'children': lambda x: x.args}
expr_fns = assoc(basic_fns, 'new', lambda op, *args: op(*args))
|
b9fe9515d5e9b5940d0a4b3227be4685a483ca67aa64f9644afdeedce7a809ac | """
module for generating C, C++, Fortran77, Fortran90, Julia, Rust
and Octave/Matlab routines that evaluate SymPy expressions.
This module is work in progress.
Only the milestones with a '+' character in the list below have been completed.
--- How is sympy.utilities.codegen different from sympy.printing.ccode? ---
We considered the idea to extend the printing routines for SymPy functions in
such a way that it prints complete compilable code, but this leads to a few
unsurmountable issues that can only be tackled with dedicated code generator:
- For C, one needs both a code and a header file, while the printing routines
generate just one string. This code generator can be extended to support
.pyf files for f2py.
- SymPy functions are not concerned with programming-technical issues, such
as input, output and input-output arguments. Other examples are contiguous
or non-contiguous arrays, including headers of other libraries such as gsl
or others.
- It is highly interesting to evaluate several SymPy functions in one C
routine, eventually sharing common intermediate results with the help
of the cse routine. This is more than just printing.
- From the programming perspective, expressions with constants should be
evaluated in the code generator as much as possible. This is different
for printing.
--- Basic assumptions ---
* A generic Routine data structure describes the routine that must be
translated into C/Fortran/... code. This data structure covers all
features present in one or more of the supported languages.
* Descendants from the CodeGen class transform multiple Routine instances
into compilable code. Each derived class translates into a specific
language.
* In many cases, one wants a simple workflow. The friendly functions in the
last part are a simple api on top of the Routine/CodeGen stuff. They are
easier to use, but are less powerful.
--- Milestones ---
+ First working version with scalar input arguments, generating C code,
tests
+ Friendly functions that are easier to use than the rigorous
Routine/CodeGen workflow.
+ Integer and Real numbers as input and output
+ Output arguments
+ InputOutput arguments
+ Sort input/output arguments properly
+ Contiguous array arguments (numpy matrices)
+ Also generate .pyf code for f2py (in autowrap module)
+ Isolate constants and evaluate them beforehand in double precision
+ Fortran 90
+ Octave/Matlab
- Common Subexpression Elimination
- User defined comments in the generated code
- Optional extra include lines for libraries/objects that can eval special
functions
- Test other C compilers and libraries: gcc, tcc, libtcc, gcc+gsl, ...
- Contiguous array arguments (SymPy matrices)
- Non-contiguous array arguments (SymPy matrices)
- ccode must raise an error when it encounters something that cannot be
translated into c. ccode(integrate(sin(x)/x, x)) does not make sense.
- Complex numbers as input and output
- A default complex datatype
- Include extra information in the header: date, user, hostname, sha1
hash, ...
- Fortran 77
- C++
- Python
- Julia
- Rust
- ...
"""
import os
import textwrap
from io import StringIO
from sympy import __version__ as sympy_version
from sympy.core import Symbol, S, Tuple, Equality, Function, Basic
from sympy.printing.c import c_code_printers
from sympy.printing.codeprinter import AssignmentError
from sympy.printing.fortran import FCodePrinter
from sympy.printing.julia import JuliaCodePrinter
from sympy.printing.octave import OctaveCodePrinter
from sympy.printing.rust import RustCodePrinter
from sympy.tensor import Idx, Indexed, IndexedBase
from sympy.matrices import (MatrixSymbol, ImmutableMatrix, MatrixBase,
MatrixExpr, MatrixSlice)
from sympy.utilities.iterables import is_sequence
__all__ = [
# description of routines
"Routine", "DataType", "default_datatypes", "get_default_datatype",
"Argument", "InputArgument", "OutputArgument", "Result",
# routines -> code
"CodeGen", "CCodeGen", "FCodeGen", "JuliaCodeGen", "OctaveCodeGen",
"RustCodeGen",
# friendly functions
"codegen", "make_routine",
]
#
# Description of routines
#
class Routine:
"""Generic description of evaluation routine for set of expressions.
A CodeGen class can translate instances of this class into code in a
particular language. The routine specification covers all the features
present in these languages. The CodeGen part must raise an exception
when certain features are not present in the target language. For
example, multiple return values are possible in Python, but not in C or
Fortran. Another example: Fortran and Python support complex numbers,
while C does not.
"""
def __init__(self, name, arguments, results, local_vars, global_vars):
"""Initialize a Routine instance.
Parameters
==========
name : string
Name of the routine.
arguments : list of Arguments
These are things that appear in arguments of a routine, often
appearing on the right-hand side of a function call. These are
commonly InputArguments but in some languages, they can also be
OutputArguments or InOutArguments (e.g., pass-by-reference in C
code).
results : list of Results
These are the return values of the routine, often appearing on
the left-hand side of a function call. The difference between
Results and OutputArguments and when you should use each is
language-specific.
local_vars : list of Results
These are variables that will be defined at the beginning of the
function.
global_vars : list of Symbols
Variables which will not be passed into the function.
"""
# extract all input symbols and all symbols appearing in an expression
input_symbols = set()
symbols = set()
for arg in arguments:
if isinstance(arg, OutputArgument):
symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed))
elif isinstance(arg, InputArgument):
input_symbols.add(arg.name)
elif isinstance(arg, InOutArgument):
input_symbols.add(arg.name)
symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed))
else:
raise ValueError("Unknown Routine argument: %s" % arg)
for r in results:
if not isinstance(r, Result):
raise ValueError("Unknown Routine result: %s" % r)
symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed))
local_symbols = set()
for r in local_vars:
if isinstance(r, Result):
symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed))
local_symbols.add(r.name)
else:
local_symbols.add(r)
symbols = {s.label if isinstance(s, Idx) else s for s in symbols}
# Check that all symbols in the expressions are covered by
# InputArguments/InOutArguments---subset because user could
# specify additional (unused) InputArguments or local_vars.
notcovered = symbols.difference(
input_symbols.union(local_symbols).union(global_vars))
if notcovered != set():
raise ValueError("Symbols needed for output are not in input " +
", ".join([str(x) for x in notcovered]))
self.name = name
self.arguments = arguments
self.results = results
self.local_vars = local_vars
self.global_vars = global_vars
def __str__(self):
return self.__class__.__name__ + "({name!r}, {arguments}, {results}, {local_vars}, {global_vars})".format(**self.__dict__)
__repr__ = __str__
@property
def variables(self):
"""Returns a set of all variables possibly used in the routine.
For routines with unnamed return values, the dummies that may or
may not be used will be included in the set.
"""
v = set(self.local_vars)
for arg in self.arguments:
v.add(arg.name)
for res in self.results:
v.add(res.result_var)
return v
@property
def result_variables(self):
"""Returns a list of OutputArgument, InOutArgument and Result.
If return values are present, they are at the end ot the list.
"""
args = [arg for arg in self.arguments if isinstance(
arg, (OutputArgument, InOutArgument))]
args.extend(self.results)
return args
class DataType:
"""Holds strings for a certain datatype in different languages."""
def __init__(self, cname, fname, pyname, jlname, octname, rsname):
self.cname = cname
self.fname = fname
self.pyname = pyname
self.jlname = jlname
self.octname = octname
self.rsname = rsname
default_datatypes = {
"int": DataType("int", "INTEGER*4", "int", "", "", "i32"),
"float": DataType("double", "REAL*8", "float", "", "", "f64"),
"complex": DataType("double", "COMPLEX*16", "complex", "", "", "float") #FIXME:
# complex is only supported in fortran, python, julia, and octave.
# So to not break c or rust code generation, we stick with double or
# float, respecitvely (but actually should raise an exception for
# explicitly complex variables (x.is_complex==True))
}
COMPLEX_ALLOWED = False
def get_default_datatype(expr, complex_allowed=None):
"""Derives an appropriate datatype based on the expression."""
if complex_allowed is None:
complex_allowed = COMPLEX_ALLOWED
if complex_allowed:
final_dtype = "complex"
else:
final_dtype = "float"
if expr.is_integer:
return default_datatypes["int"]
elif expr.is_real:
return default_datatypes["float"]
elif isinstance(expr, MatrixBase):
#check all entries
dt = "int"
for element in expr:
if dt == "int" and not element.is_integer:
dt = "float"
if dt == "float" and not element.is_real:
return default_datatypes[final_dtype]
return default_datatypes[dt]
else:
return default_datatypes[final_dtype]
class Variable:
"""Represents a typed variable."""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
"""Return a new variable.
Parameters
==========
name : Symbol or MatrixSymbol
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimension : sequence containing tupes, optional
If present, the argument is interpreted as an array, where this
sequence of tuples specifies (lower, upper) bounds for each
index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
if not isinstance(name, (Symbol, MatrixSymbol)):
raise TypeError("The first argument must be a SymPy symbol.")
if datatype is None:
datatype = get_default_datatype(name)
elif not isinstance(datatype, DataType):
raise TypeError("The (optional) `datatype' argument must be an "
"instance of the DataType class.")
if dimensions and not isinstance(dimensions, (tuple, list)):
raise TypeError(
"The dimension argument must be a sequence of tuples")
self._name = name
self._datatype = {
'C': datatype.cname,
'FORTRAN': datatype.fname,
'JULIA': datatype.jlname,
'OCTAVE': datatype.octname,
'PYTHON': datatype.pyname,
'RUST': datatype.rsname,
}
self.dimensions = dimensions
self.precision = precision
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.name)
__repr__ = __str__
@property
def name(self):
return self._name
def get_datatype(self, language):
"""Returns the datatype string for the requested language.
Examples
========
>>> from sympy import Symbol
>>> from sympy.utilities.codegen import Variable
>>> x = Variable(Symbol('x'))
>>> x.get_datatype('c')
'double'
>>> x.get_datatype('fortran')
'REAL*8'
"""
try:
return self._datatype[language.upper()]
except KeyError:
raise CodeGenError("Has datatypes for languages: %s" %
", ".join(self._datatype))
class Argument(Variable):
"""An abstract Argument data structure: a name and a data type.
This structure is refined in the descendants below.
"""
pass
class InputArgument(Argument):
pass
class ResultBase:
"""Base class for all "outgoing" information from a routine.
Objects of this class stores a SymPy expression, and a SymPy object
representing a result variable that will be used in the generated code
only if necessary.
"""
def __init__(self, expr, result_var):
self.expr = expr
self.result_var = result_var
def __str__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.expr,
self.result_var)
__repr__ = __str__
class OutputArgument(Argument, ResultBase):
"""OutputArgument are always initialized in the routine."""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
"""Return a new variable.
Parameters
==========
name : Symbol, MatrixSymbol
The name of this variable. When used for code generation, this
might appear, for example, in the prototype of function in the
argument list.
result_var : Symbol, Indexed
Something that can be used to assign a value to this variable.
Typically the same as `name` but for Indexed this should be e.g.,
"y[i]" whereas `name` should be the Symbol "y".
expr : object
The expression that should be output, typically a SymPy
expression.
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimension : sequence containing tupes, optional
If present, the argument is interpreted as an array, where this
sequence of tuples specifies (lower, upper) bounds for each
index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
def __str__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.result_var, self.expr)
__repr__ = __str__
class InOutArgument(Argument, ResultBase):
"""InOutArgument are never initialized in the routine."""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
if not datatype:
datatype = get_default_datatype(expr)
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
__init__.__doc__ = OutputArgument.__init__.__doc__
def __str__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.expr,
self.result_var)
__repr__ = __str__
class Result(Variable, ResultBase):
"""An expression for a return value.
The name result is used to avoid conflicts with the reserved word
"return" in the Python language. It is also shorter than ReturnValue.
These may or may not need a name in the destination (e.g., "return(x*y)"
might return a value without ever naming it).
"""
def __init__(self, expr, name=None, result_var=None, datatype=None,
dimensions=None, precision=None):
"""Initialize a return value.
Parameters
==========
expr : SymPy expression
name : Symbol, MatrixSymbol, optional
The name of this return variable. When used for code generation,
this might appear, for example, in the prototype of function in a
list of return values. A dummy name is generated if omitted.
result_var : Symbol, Indexed, optional
Something that can be used to assign a value to this variable.
Typically the same as `name` but for Indexed this should be e.g.,
"y[i]" whereas `name` should be the Symbol "y". Defaults to
`name` if omitted.
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the expr argument.
dimension : sequence containing tupes, optional
If present, this variable is interpreted as an array,
where this sequence of tuples specifies (lower, upper)
bounds for each index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
# Basic because it is the base class for all types of expressions
if not isinstance(expr, (Basic, MatrixBase)):
raise TypeError("The first argument must be a SymPy expression.")
if name is None:
name = 'result_%d' % abs(hash(expr))
if datatype is None:
#try to infer data type from the expression
datatype = get_default_datatype(expr)
if isinstance(name, str):
if isinstance(expr, (MatrixBase, MatrixExpr)):
name = MatrixSymbol(name, *expr.shape)
else:
name = Symbol(name)
if result_var is None:
result_var = name
Variable.__init__(self, name, datatype=datatype,
dimensions=dimensions, precision=precision)
ResultBase.__init__(self, expr, result_var)
def __str__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.expr, self.name,
self.result_var)
__repr__ = __str__
#
# Transformation of routine objects into code
#
class CodeGen:
"""Abstract class for the code generators."""
printer = None # will be set to an instance of a CodePrinter subclass
def _indent_code(self, codelines):
return self.printer.indent_code(codelines)
def _printer_method_with_settings(self, method, settings=None, *args, **kwargs):
settings = settings or {}
ori = {k: self.printer._settings[k] for k in settings}
for k, v in settings.items():
self.printer._settings[k] = v
result = getattr(self.printer, method)(*args, **kwargs)
for k, v in ori.items():
self.printer._settings[k] = v
return result
def _get_symbol(self, s):
"""Returns the symbol as fcode prints it."""
if self.printer._settings['human']:
expr_str = self.printer.doprint(s)
else:
constants, not_supported, expr_str = self.printer.doprint(s)
if constants or not_supported:
raise ValueError("Failed to print %s" % str(s))
return expr_str.strip()
def __init__(self, project="project", cse=False):
"""Initialize a code generator.
Derived classes will offer more options that affect the generated
code.
"""
self.project = project
self.cse = cse
def routine(self, name, expr, argument_sequence=None, global_vars=None):
"""Creates an Routine object that is appropriate for this language.
This implementation is appropriate for at least C/Fortran. Subclasses
can override this if necessary.
Here, we assume at most one return value (the l-value) which must be
scalar. Additional outputs are OutputArguments (e.g., pointers on
right-hand-side or pass-by-reference). Matrices are always returned
via OutputArguments. If ``argument_sequence`` is None, arguments will
be ordered alphabetically, but with all InputArguments first, and then
OutputArgument and InOutArguments.
"""
if self.cse:
from sympy.simplify.cse_main import cse
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
for e in expr:
if not e.is_Equality:
raise CodeGenError("Lists of expressions must all be Equalities. {} is not.".format(e))
# create a list of right hand sides and simplify them
rhs = [e.rhs for e in expr]
common, simplified = cse(rhs)
# pack the simplified expressions back up with their left hand sides
expr = [Equality(e.lhs, rhs) for e, rhs in zip(expr, simplified)]
else:
if isinstance(expr, Equality):
common, simplified = cse(expr.rhs) #, ignore=in_out_args)
expr = Equality(expr.lhs, simplified[0])
else:
common, simplified = cse(expr)
expr = simplified
local_vars = [Result(b,a) for a,b in common]
local_symbols = {a for a,_ in common}
local_expressions = Tuple(*[b for _,b in common])
else:
local_expressions = Tuple()
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
if self.cse:
if {i.label for i in expressions.atoms(Idx)} != set():
raise CodeGenError("CSE and Indexed expressions do not play well together yet")
else:
# local variables for indexed expressions
local_vars = {i.label for i in expressions.atoms(Idx)}
local_symbols = local_vars
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
symbols = (expressions.free_symbols | local_expressions.free_symbols) - local_symbols - global_vars
new_symbols = set()
new_symbols.update(symbols)
for symbol in symbols:
if isinstance(symbol, Idx):
new_symbols.remove(symbol)
new_symbols.update(symbol.args[1].free_symbols)
if isinstance(symbol, Indexed):
new_symbols.remove(symbol)
symbols = new_symbols
# Decide whether to use output argument or return value
return_val = []
output_args = []
for expr in expressions:
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
if isinstance(out_arg, Indexed):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg.base.label
elif isinstance(out_arg, Symbol):
dims = []
symbol = out_arg
elif isinstance(out_arg, MatrixSymbol):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg
else:
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
if expr.has(symbol):
output_args.append(
InOutArgument(symbol, out_arg, expr, dimensions=dims))
else:
output_args.append(
OutputArgument(symbol, out_arg, expr, dimensions=dims))
# remove duplicate arguments when they are not local variables
if symbol not in local_vars:
# avoid duplicate arguments
symbols.remove(symbol)
elif isinstance(expr, (ImmutableMatrix, MatrixSlice)):
# Create a "dummy" MatrixSymbol to use as the Output arg
out_arg = MatrixSymbol('out_%s' % abs(hash(expr)), *expr.shape)
dims = tuple([(S.Zero, dim - 1) for dim in out_arg.shape])
output_args.append(
OutputArgument(out_arg, out_arg, expr, dimensions=dims))
else:
return_val.append(Result(expr))
arg_list = []
# setup input argument list
# helper to get dimensions for data for array-like args
def dimensions(s):
return [(S.Zero, dim - 1) for dim in s.shape]
array_symbols = {}
for array in expressions.atoms(Indexed) | local_expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol) | local_expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
if symbol in array_symbols:
array = array_symbols[symbol]
metadata = {'dimensions': dimensions(array)}
else:
metadata = {}
arg_list.append(InputArgument(symbol, **metadata))
output_args.sort(key=lambda x: str(x.name))
arg_list.extend(output_args)
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
if isinstance(symbol, (IndexedBase, MatrixSymbol)):
metadata = {'dimensions': dimensions(symbol)}
else:
metadata = {}
new_args.append(InputArgument(symbol, **metadata))
arg_list = new_args
return Routine(name, arg_list, return_val, local_vars, global_vars)
def write(self, routines, prefix, to_files=False, header=True, empty=True):
"""Writes all the source code files for the given routines.
The generated source is returned as a list of (filename, contents)
tuples, or is written to files (see below). Each filename consists
of the given prefix, appended with an appropriate extension.
Parameters
==========
routines : list
A list of Routine instances to be written
prefix : string
The prefix for the output files
to_files : bool, optional
When True, the output is written to files. Otherwise, a list
of (filename, contents) tuples is returned. [default: False]
header : bool, optional
When True, a header comment is included on top of each source
file. [default: True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default: True]
"""
if to_files:
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
with open(filename, "w") as f:
dump_fn(self, routines, f, prefix, header, empty)
else:
result = []
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
contents = StringIO()
dump_fn(self, routines, contents, prefix, header, empty)
result.append((filename, contents.getvalue()))
return result
def dump_code(self, routines, f, prefix, header=True, empty=True):
"""Write the code by calling language specific methods.
The generated file contains all the definitions of the routines in
low-level code and refers to the header file if appropriate.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
code_lines = self._preprocessor_statements(prefix)
for routine in routines:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_globals(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if header:
code_lines = ''.join(self._get_header() + [code_lines])
if code_lines:
f.write(code_lines)
class CodeGenError(Exception):
pass
class CodeGenArgumentListError(Exception):
@property
def missing_args(self):
return self.args[1]
header_comment = """Code generated with SymPy %(version)s
See http://www.sympy.org/ for more information.
This file is part of '%(project)s'
"""
class CCodeGen(CodeGen):
"""Generator for C code.
The .write() method inherited from CodeGen will output a code file and
an interface file, <prefix>.c and <prefix>.h respectively.
"""
code_extension = "c"
interface_extension = "h"
standard = 'c99'
def __init__(self, project="project", printer=None,
preprocessor_statements=None, cse=False):
super().__init__(project=project, cse=cse)
self.printer = printer or c_code_printers[self.standard.lower()]()
self.preprocessor_statements = preprocessor_statements
if preprocessor_statements is None:
self.preprocessor_statements = ['#include <math.h>']
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append(" *%s*\n" % line.center(76))
code_lines.append(" " + "*"*78 + "/\n")
return code_lines
def get_prototype(self, routine):
"""Returns a string for the function prototype of the routine.
If the routine has multiple result objects, an CodeGenError is
raised.
See: https://en.wikipedia.org/wiki/Function_prototype
"""
if len(routine.results) > 1:
raise CodeGenError("C only supports a single or no return value.")
elif len(routine.results) == 1:
ctype = routine.results[0].get_datatype('C')
else:
ctype = "void"
type_args = []
for arg in routine.arguments:
name = self.printer.doprint(arg.name)
if arg.dimensions or isinstance(arg, ResultBase):
type_args.append((arg.get_datatype('C'), "*%s" % name))
else:
type_args.append((arg.get_datatype('C'), name))
arguments = ", ".join([ "%s %s" % t for t in type_args])
return "%s %s(%s)" % (ctype, routine.name, arguments)
def _preprocessor_statements(self, prefix):
code_lines = []
code_lines.append('#include "{}.h"'.format(os.path.basename(prefix)))
code_lines.extend(self.preprocessor_statements)
code_lines = ['{}\n'.format(l) for l in code_lines]
return code_lines
def _get_routine_opening(self, routine):
prototype = self.get_prototype(routine)
return ["%s {\n" % prototype]
def _declare_arguments(self, routine):
# arguments are declared in prototype
return []
def _declare_globals(self, routine):
# global variables are not explicitly declared within C functions
return []
def _declare_locals(self, routine):
# Compose a list of symbols to be dereferenced in the function
# body. These are the arguments that were passed by a reference
# pointer, excluding arrays.
dereference = []
for arg in routine.arguments:
if isinstance(arg, ResultBase) and not arg.dimensions:
dereference.append(arg.name)
code_lines = []
for result in routine.local_vars:
# local variables that are simple symbols such as those used as indices into
# for loops are defined declared elsewhere.
if not isinstance(result, Result):
continue
if result.name != result.result_var:
raise CodeGen("Result variable and name should match: {}".format(result))
assign_to = result.name
t = result.get_datatype('c')
if isinstance(result.expr, (MatrixBase, MatrixExpr)):
dims = result.expr.shape
code_lines.append("{} {}[{}];\n".format(t, str(assign_to), dims[0]*dims[1]))
prefix = ""
else:
prefix = "const {} ".format(t)
constants, not_c, c_expr = self._printer_method_with_settings(
'doprint', dict(human=False, dereference=dereference),
result.expr, assign_to=assign_to)
for name, value in sorted(constants, key=str):
code_lines.append("double const %s = %s;\n" % (name, value))
code_lines.append("{}{}\n".format(prefix, c_expr))
return code_lines
def _call_printer(self, routine):
code_lines = []
# Compose a list of symbols to be dereferenced in the function
# body. These are the arguments that were passed by a reference
# pointer, excluding arrays.
dereference = []
for arg in routine.arguments:
if isinstance(arg, ResultBase) and not arg.dimensions:
dereference.append(arg.name)
return_val = None
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name + "_result"
t = result.get_datatype('c')
code_lines.append("{} {};\n".format(t, str(assign_to)))
return_val = assign_to
else:
assign_to = result.result_var
try:
constants, not_c, c_expr = self._printer_method_with_settings(
'doprint', dict(human=False, dereference=dereference),
result.expr, assign_to=assign_to)
except AssignmentError:
assign_to = result.result_var
code_lines.append(
"%s %s;\n" % (result.get_datatype('c'), str(assign_to)))
constants, not_c, c_expr = self._printer_method_with_settings(
'doprint', dict(human=False, dereference=dereference),
result.expr, assign_to=assign_to)
for name, value in sorted(constants, key=str):
code_lines.append("double const %s = %s;\n" % (name, value))
code_lines.append("%s\n" % c_expr)
if return_val:
code_lines.append(" return %s;\n" % return_val)
return code_lines
def _get_routine_ending(self, routine):
return ["}\n"]
def dump_c(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_c.extension = code_extension # type: ignore
dump_c.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the C header file.
This file contains all the function declarations.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix, used to construct the include guards.
Only the basename of the prefix is used.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
if header:
print(''.join(self._get_header()), file=f)
guard_name = "%s__%s__H" % (self.project.replace(
" ", "_").upper(), prefix.replace("/", "_").upper())
# include guards
if empty:
print(file=f)
print("#ifndef %s" % guard_name, file=f)
print("#define %s" % guard_name, file=f)
if empty:
print(file=f)
# declaration of the function prototypes
for routine in routines:
prototype = self.get_prototype(routine)
print("%s;" % prototype, file=f)
# end if include guards
if empty:
print(file=f)
print("#endif", file=f)
if empty:
print(file=f)
dump_h.extension = interface_extension # type: ignore
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_c, dump_h]
class C89CodeGen(CCodeGen):
standard = 'C89'
class C99CodeGen(CCodeGen):
standard = 'C99'
class FCodeGen(CodeGen):
"""Generator for Fortran 95 code
The .write() method inherited from CodeGen will output a code file and
an interface file, <prefix>.f90 and <prefix>.h respectively.
"""
code_extension = "f90"
interface_extension = "h"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or FCodePrinter()
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("!" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append("!*%s*\n" % line.center(76))
code_lines.append("!" + "*"*78 + '\n')
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the fortran routine."""
code_list = []
if len(routine.results) > 1:
raise CodeGenError(
"Fortran only supports a single or no return value.")
elif len(routine.results) == 1:
result = routine.results[0]
code_list.append(result.get_datatype('fortran'))
code_list.append("function")
else:
code_list.append("subroutine")
args = ", ".join("%s" % self._get_symbol(arg.name)
for arg in routine.arguments)
call_sig = "{}({})\n".format(routine.name, args)
# Fortran 95 requires all lines be less than 132 characters, so wrap
# this line before appending.
call_sig = ' &\n'.join(textwrap.wrap(call_sig,
width=60,
break_long_words=False)) + '\n'
code_list.append(call_sig)
code_list = [' '.join(code_list)]
code_list.append('implicit none\n')
return code_list
def _declare_arguments(self, routine):
# argument type declarations
code_list = []
array_list = []
scalar_list = []
for arg in routine.arguments:
if isinstance(arg, InputArgument):
typeinfo = "%s, intent(in)" % arg.get_datatype('fortran')
elif isinstance(arg, InOutArgument):
typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran')
elif isinstance(arg, OutputArgument):
typeinfo = "%s, intent(out)" % arg.get_datatype('fortran')
else:
raise CodeGenError("Unknown Argument type: %s" % type(arg))
fprint = self._get_symbol
if arg.dimensions:
# fortran arrays start at 1
dimstr = ", ".join(["%s:%s" % (
fprint(dim[0] + 1), fprint(dim[1] + 1))
for dim in arg.dimensions])
typeinfo += ", dimension(%s)" % dimstr
array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
else:
scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
# scalars first, because they can be used in array declarations
code_list.extend(scalar_list)
code_list.extend(array_list)
return code_list
def _declare_globals(self, routine):
# Global variables not explicitly declared within Fortran 90 functions.
# Note: a future F77 mode may need to generate "common" blocks.
return []
def _declare_locals(self, routine):
code_list = []
for var in sorted(routine.local_vars, key=str):
typeinfo = get_default_datatype(var)
code_list.append("%s :: %s\n" % (
typeinfo.fname, self._get_symbol(var)))
return code_list
def _get_routine_ending(self, routine):
"""Returns the closing statements of the fortran routine."""
if len(routine.results) == 1:
return ["end function\n"]
else:
return ["end subroutine\n"]
def get_interface(self, routine):
"""Returns a string for the function interface.
The routine should have a single result object, which can be None.
If the routine has multiple result objects, a CodeGenError is
raised.
See: https://en.wikipedia.org/wiki/Function_prototype
"""
prototype = [ "interface\n" ]
prototype.extend(self._get_routine_opening(routine))
prototype.extend(self._declare_arguments(routine))
prototype.extend(self._get_routine_ending(routine))
prototype.append("end interface\n")
return "".join(prototype)
def _call_printer(self, routine):
declarations = []
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
constants, not_fortran, f_expr = self._printer_method_with_settings(
'doprint', dict(human=False, source_format='free', standard=95),
result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
t = get_default_datatype(obj)
declarations.append(
"%s, parameter :: %s = %s\n" % (t.fname, obj, v))
for obj in sorted(not_fortran, key=str):
t = get_default_datatype(obj)
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("%s :: %s\n" % (t.fname, name))
code_lines.append("%s\n" % f_expr)
return declarations + code_lines
def _indent_code(self, codelines):
return self._printer_method_with_settings(
'indent_code', dict(human=False, source_format='free'), codelines)
def dump_f95(self, routines, f, prefix, header=True, empty=True):
# check that symbols are unique with ignorecase
for r in routines:
lowercase = {str(x).lower() for x in r.variables}
orig_case = {str(x) for x in r.variables}
if len(lowercase) < len(orig_case):
raise CodeGenError("Fortran ignores case. Got symbols: %s" %
(", ".join([str(var) for var in r.variables])))
self.dump_code(routines, f, prefix, header, empty)
dump_f95.extension = code_extension # type: ignore
dump_f95.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the interface to a header file.
This file contains all the function declarations.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
if header:
print(''.join(self._get_header()), file=f)
if empty:
print(file=f)
# declaration of the function prototypes
for routine in routines:
prototype = self.get_interface(routine)
f.write(prototype)
if empty:
print(file=f)
dump_h.extension = interface_extension # type: ignore
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_f95, dump_h]
class JuliaCodeGen(CodeGen):
"""Generator for Julia code.
The .write() method inherited from CodeGen will output a code file
<prefix>.jl.
"""
code_extension = "jl"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or JuliaCodePrinter()
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Julia."""
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = {i.label for i in expressions.atoms(Idx)}
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
old_symbols = expressions.free_symbols - local_vars - global_vars
symbols = set()
for s in old_symbols:
if isinstance(s, Idx):
symbols.update(s.args[1].free_symbols)
elif not isinstance(s, Indexed):
symbols.add(s)
# Julia supports multiple return values
return_vals = []
output_args = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
dims = tuple([ (S.One, dim) for dim in out_arg.shape])
symbol = out_arg.base.label
output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims))
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
output_args.sort(key=lambda x: str(x.name))
arg_list = list(output_args)
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
if line == '':
code_lines.append("#\n")
else:
code_lines.append("# %s\n" % line)
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the routine."""
code_list = []
code_list.append("function ")
# Inputs
args = []
for i, arg in enumerate(routine.arguments):
if isinstance(arg, OutputArgument):
raise CodeGenError("Julia: invalid argument of type %s" %
str(type(arg)))
if isinstance(arg, (InputArgument, InOutArgument)):
args.append("%s" % self._get_symbol(arg.name))
args = ", ".join(args)
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ "".join(code_list) ]
return code_list
def _declare_arguments(self, routine):
return []
def _declare_globals(self, routine):
return []
def _declare_locals(self, routine):
return []
def _get_routine_ending(self, routine):
outs = []
for result in routine.results:
if isinstance(result, Result):
# Note: name not result_var; want `y` not `y[i]` for Indexed
s = self._get_symbol(result.name)
else:
raise CodeGenError("unexpected object in Routine results")
outs.append(s)
return ["return " + ", ".join(outs) + "\nend\n"]
def _call_printer(self, routine):
declarations = []
code_lines = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
assign_to = result.result_var
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, jl_expr = self._printer_method_with_settings(
'doprint', dict(human=False), result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
declarations.append(
"%s = %s\n" % (obj, v))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append(
"# unsupported: %s\n" % (name))
code_lines.append("%s\n" % (jl_expr))
return declarations + code_lines
def _indent_code(self, codelines):
# Note that indenting seems to happen twice, first
# statement-by-statement by JuliaPrinter then again here.
p = JuliaCodePrinter({'human': False})
return p.indent_code(codelines)
def dump_jl(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_jl.extension = code_extension # type: ignore
dump_jl.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_jl]
class OctaveCodeGen(CodeGen):
"""Generator for Octave code.
The .write() method inherited from CodeGen will output a code file
<prefix>.m.
Octave .m files usually contain one function. That function name should
match the filename (``prefix``). If you pass multiple ``name_expr`` pairs,
the latter ones are presumed to be private functions accessed by the
primary function.
You should only pass inputs to ``argument_sequence``: outputs are ordered
according to their order in ``name_expr``.
"""
code_extension = "m"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or OctaveCodePrinter()
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Octave."""
# FIXME: this is probably general enough for other high-level
# languages, perhaps its the C/Fortran one that is specialized!
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = {i.label for i in expressions.atoms(Idx)}
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
old_symbols = expressions.free_symbols - local_vars - global_vars
symbols = set()
for s in old_symbols:
if isinstance(s, Idx):
symbols.update(s.args[1].free_symbols)
elif not isinstance(s, Indexed):
symbols.add(s)
# Octave supports multiple return values
return_vals = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
symbol = out_arg.base.label
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
arg_list = []
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
if line == '':
code_lines.append("%\n")
else:
code_lines.append("%% %s\n" % line)
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the routine."""
code_list = []
code_list.append("function ")
# Outputs
outs = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
# Note: name not result_var; want `y` not `y(i)` for Indexed
s = self._get_symbol(result.name)
else:
raise CodeGenError("unexpected object in Routine results")
outs.append(s)
if len(outs) > 1:
code_list.append("[" + (", ".join(outs)) + "]")
else:
code_list.append("".join(outs))
code_list.append(" = ")
# Inputs
args = []
for i, arg in enumerate(routine.arguments):
if isinstance(arg, (OutputArgument, InOutArgument)):
raise CodeGenError("Octave: invalid argument of type %s" %
str(type(arg)))
if isinstance(arg, InputArgument):
args.append("%s" % self._get_symbol(arg.name))
args = ", ".join(args)
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ "".join(code_list) ]
return code_list
def _declare_arguments(self, routine):
return []
def _declare_globals(self, routine):
if not routine.global_vars:
return []
s = " ".join(sorted([self._get_symbol(g) for g in routine.global_vars]))
return ["global " + s + "\n"]
def _declare_locals(self, routine):
return []
def _get_routine_ending(self, routine):
return ["end\n"]
def _call_printer(self, routine):
declarations = []
code_lines = []
for i, result in enumerate(routine.results):
if isinstance(result, Result):
assign_to = result.result_var
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, oct_expr = self._printer_method_with_settings(
'doprint', dict(human=False), result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
declarations.append(
" %s = %s; %% constant\n" % (obj, v))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append(
" %% unsupported: %s\n" % (name))
code_lines.append("%s\n" % (oct_expr))
return declarations + code_lines
def _indent_code(self, codelines):
return self._printer_method_with_settings(
'indent_code', dict(human=False), codelines)
def dump_m(self, routines, f, prefix, header=True, empty=True, inline=True):
# Note used to call self.dump_code() but we need more control for header
code_lines = self._preprocessor_statements(prefix)
for i, routine in enumerate(routines):
if i > 0:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
if i == 0:
if routine.name != prefix:
raise ValueError('Octave function name should match prefix')
if header:
code_lines.append("%" + prefix.upper() +
" Autogenerated by SymPy\n")
code_lines.append(''.join(self._get_header()))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_globals(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if code_lines:
f.write(code_lines)
dump_m.extension = code_extension # type: ignore
dump_m.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_m]
class RustCodeGen(CodeGen):
"""Generator for Rust code.
The .write() method inherited from CodeGen will output a code file
<prefix>.rs
"""
code_extension = "rs"
def __init__(self, project="project", printer=None):
super().__init__(project=project)
self.printer = printer or RustCodePrinter()
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Rust."""
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = {i.label for i in expressions.atoms(Idx)}
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
symbols = expressions.free_symbols - local_vars - global_vars - expressions.atoms(Indexed)
# Rust supports multiple return values
return_vals = []
output_args = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
dims = tuple([ (S.One, dim) for dim in out_arg.shape])
symbol = out_arg.base.label
output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims))
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
output_args.sort(key=lambda x: str(x.name))
arg_list = list(output_args)
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/*\n")
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append((" *%s" % line.center(76)).rstrip() + "\n")
code_lines.append(" */\n")
return code_lines
def get_prototype(self, routine):
"""Returns a string for the function prototype of the routine.
If the routine has multiple result objects, an CodeGenError is
raised.
See: https://en.wikipedia.org/wiki/Function_prototype
"""
results = [i.get_datatype('Rust') for i in routine.results]
if len(results) == 1:
rstype = " -> " + results[0]
elif len(routine.results) > 1:
rstype = " -> (" + ", ".join(results) + ")"
else:
rstype = ""
type_args = []
for arg in routine.arguments:
name = self.printer.doprint(arg.name)
if arg.dimensions or isinstance(arg, ResultBase):
type_args.append(("*%s" % name, arg.get_datatype('Rust')))
else:
type_args.append((name, arg.get_datatype('Rust')))
arguments = ", ".join([ "%s: %s" % t for t in type_args])
return "fn %s(%s)%s" % (routine.name, arguments, rstype)
def _preprocessor_statements(self, prefix):
code_lines = []
# code_lines.append("use std::f64::consts::*;\n")
return code_lines
def _get_routine_opening(self, routine):
prototype = self.get_prototype(routine)
return ["%s {\n" % prototype]
def _declare_arguments(self, routine):
# arguments are declared in prototype
return []
def _declare_globals(self, routine):
# global variables are not explicitly declared within C functions
return []
def _declare_locals(self, routine):
# loop variables are declared in loop statement
return []
def _call_printer(self, routine):
code_lines = []
declarations = []
returns = []
# Compose a list of symbols to be dereferenced in the function
# body. These are the arguments that were passed by a reference
# pointer, excluding arrays.
dereference = []
for arg in routine.arguments:
if isinstance(arg, ResultBase) and not arg.dimensions:
dereference.append(arg.name)
for i, result in enumerate(routine.results):
if isinstance(result, Result):
assign_to = result.result_var
returns.append(str(result.result_var))
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, rs_expr = self._printer_method_with_settings(
'doprint', dict(human=False), result.expr, assign_to=assign_to)
for name, value in sorted(constants, key=str):
declarations.append("const %s: f64 = %s;\n" % (name, value))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("// unsupported: %s\n" % (name))
code_lines.append("let %s\n" % rs_expr);
if len(returns) > 1:
returns = ['(' + ', '.join(returns) + ')']
returns.append('\n')
return declarations + code_lines + returns
def _get_routine_ending(self, routine):
return ["}\n"]
def dump_rs(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_rs.extension = code_extension # type: ignore
dump_rs.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_rs]
def get_code_generator(language, project=None, standard=None, printer = None):
if language == 'C':
if standard is None:
pass
elif standard.lower() == 'c89':
language = 'C89'
elif standard.lower() == 'c99':
language = 'C99'
CodeGenClass = {"C": CCodeGen, "C89": C89CodeGen, "C99": C99CodeGen,
"F95": FCodeGen, "JULIA": JuliaCodeGen,
"OCTAVE": OctaveCodeGen,
"RUST": RustCodeGen}.get(language.upper())
if CodeGenClass is None:
raise ValueError("Language '%s' is not supported." % language)
return CodeGenClass(project, printer)
#
# Friendly functions
#
def codegen(name_expr, language=None, prefix=None, project="project",
to_files=False, header=True, empty=True, argument_sequence=None,
global_vars=None, standard=None, code_gen=None, printer = None):
"""Generate source code for expressions in a given language.
Parameters
==========
name_expr : tuple, or list of tuples
A single (name, expression) tuple or a list of (name, expression)
tuples. Each tuple corresponds to a routine. If the expression is
an equality (an instance of class Equality) the left hand side is
considered an output argument. If expression is an iterable, then
the routine will have multiple outputs.
language : string,
A string that indicates the source code language. This is case
insensitive. Currently, 'C', 'F95' and 'Octave' are supported.
'Octave' generates code compatible with both Octave and Matlab.
prefix : string, optional
A prefix for the names of the files that contain the source code.
Language-dependent suffixes will be appended. If omitted, the name
of the first name_expr tuple is used.
project : string, optional
A project name, used for making unique preprocessor instructions.
[default: "project"]
to_files : bool, optional
When True, the code will be written to one or more files with the
given prefix, otherwise strings with the names and contents of
these files are returned. [default: False]
header : bool, optional
When True, a header is written on top of each source file.
[default: True]
empty : bool, optional
When True, empty lines are used to structure the code.
[default: True]
argument_sequence : iterable, optional
Sequence of arguments for the routine in a preferred order. A
CodeGenError is raised if required arguments are missing.
Redundant arguments are used without warning. If omitted,
arguments will be ordered alphabetically, but with all input
arguments first, and then output or in-out arguments.
global_vars : iterable, optional
Sequence of global variables used by the routine. Variables
listed here will not show up as function arguments.
standard : string
code_gen : CodeGen instance
An instance of a CodeGen subclass. Overrides ``language``.
Examples
========
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(c_name, c_code), (h_name, c_header)] = codegen(
... ("f", x+y*z), "C89", "test", header=False, empty=False)
>>> print(c_name)
test.c
>>> print(c_code)
#include "test.h"
#include <math.h>
double f(double x, double y, double z) {
double f_result;
f_result = x + y*z;
return f_result;
}
<BLANKLINE>
>>> print(h_name)
test.h
>>> print(c_header)
#ifndef PROJECT__TEST__H
#define PROJECT__TEST__H
double f(double x, double y, double z);
#endif
<BLANKLINE>
Another example using Equality objects to give named outputs. Here the
filename (prefix) is taken from the first (name, expr) pair.
>>> from sympy.abc import f, g
>>> from sympy import Eq
>>> [(c_name, c_code), (h_name, c_header)] = codegen(
... [("myfcn", x + y), ("fcn2", [Eq(f, 2*x), Eq(g, y)])],
... "C99", header=False, empty=False)
>>> print(c_name)
myfcn.c
>>> print(c_code)
#include "myfcn.h"
#include <math.h>
double myfcn(double x, double y) {
double myfcn_result;
myfcn_result = x + y;
return myfcn_result;
}
void fcn2(double x, double y, double *f, double *g) {
(*f) = 2*x;
(*g) = y;
}
<BLANKLINE>
If the generated function(s) will be part of a larger project where various
global variables have been defined, the 'global_vars' option can be used
to remove the specified variables from the function signature
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(f_name, f_code), header] = codegen(
... ("f", x+y*z), "F95", header=False, empty=False,
... argument_sequence=(x, y), global_vars=(z,))
>>> print(f_code)
REAL*8 function f(x, y)
implicit none
REAL*8, intent(in) :: x
REAL*8, intent(in) :: y
f = x + y*z
end function
<BLANKLINE>
"""
# Initialize the code generator.
if language is None:
if code_gen is None:
raise ValueError("Need either language or code_gen")
else:
if code_gen is not None:
raise ValueError("You cannot specify both language and code_gen.")
code_gen = get_code_generator(language, project, standard, printer)
if isinstance(name_expr[0], str):
# single tuple is given, turn it into a singleton list with a tuple.
name_expr = [name_expr]
if prefix is None:
prefix = name_expr[0][0]
# Construct Routines appropriate for this code_gen from (name, expr) pairs.
routines = []
for name, expr in name_expr:
routines.append(code_gen.routine(name, expr, argument_sequence,
global_vars))
# Write the code.
return code_gen.write(routines, prefix, to_files, header, empty)
def make_routine(name, expr, argument_sequence=None,
global_vars=None, language="F95"):
"""A factory that makes an appropriate Routine from an expression.
Parameters
==========
name : string
The name of this routine in the generated code.
expr : expression or list/tuple of expressions
A SymPy expression that the Routine instance will represent. If
given a list or tuple of expressions, the routine will be
considered to have multiple return values and/or output arguments.
argument_sequence : list or tuple, optional
List arguments for the routine in a preferred order. If omitted,
the results are language dependent, for example, alphabetical order
or in the same order as the given expressions.
global_vars : iterable, optional
Sequence of global variables used by the routine. Variables
listed here will not show up as function arguments.
language : string, optional
Specify a target language. The Routine itself should be
language-agnostic but the precise way one is created, error
checking, etc depend on the language. [default: "F95"].
Notes
=====
A decision about whether to use output arguments or return values is made
depending on both the language and the particular mathematical expressions.
For an expression of type Equality, the left hand side is typically made
into an OutputArgument (or perhaps an InOutArgument if appropriate).
Otherwise, typically, the calculated expression is made a return values of
the routine.
Examples
========
>>> from sympy.utilities.codegen import make_routine
>>> from sympy.abc import x, y, f, g
>>> from sympy import Eq
>>> r = make_routine('test', [Eq(f, 2*x), Eq(g, x + y)])
>>> [arg.result_var for arg in r.results]
[]
>>> [arg.name for arg in r.arguments]
[x, y, f, g]
>>> [arg.name for arg in r.result_variables]
[f, g]
>>> r.local_vars
set()
Another more complicated example with a mixture of specified and
automatically-assigned names. Also has Matrix output.
>>> from sympy import Matrix
>>> r = make_routine('fcn', [x*y, Eq(f, 1), Eq(g, x + g), Matrix([[x, 2]])])
>>> [arg.result_var for arg in r.results] # doctest: +SKIP
[result_5397460570204848505]
>>> [arg.expr for arg in r.results]
[x*y]
>>> [arg.name for arg in r.arguments] # doctest: +SKIP
[x, y, f, g, out_8598435338387848786]
We can examine the various arguments more closely:
>>> from sympy.utilities.codegen import (InputArgument, OutputArgument,
... InOutArgument)
>>> [a.name for a in r.arguments if isinstance(a, InputArgument)]
[x, y]
>>> [a.name for a in r.arguments if isinstance(a, OutputArgument)] # doctest: +SKIP
[f, out_8598435338387848786]
>>> [a.expr for a in r.arguments if isinstance(a, OutputArgument)]
[1, Matrix([[x, 2]])]
>>> [a.name for a in r.arguments if isinstance(a, InOutArgument)]
[g]
>>> [a.expr for a in r.arguments if isinstance(a, InOutArgument)]
[g + x]
"""
# initialize a new code generator
code_gen = get_code_generator(language)
return code_gen.routine(name, expr, argument_sequence, global_vars)
|
6afdd108118a37ac56e929e142fe1163b4aded027c3859665e9b902f568b680f | """Module for compiling codegen output, and wrap the binary for use in
python.
.. note:: To use the autowrap module it must first be imported
>>> from sympy.utilities.autowrap import autowrap
This module provides a common interface for different external backends, such
as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are
implemented) The goal is to provide access to compiled binaries of acceptable
performance with a one-button user interface, e.g.,
>>> from sympy.abc import x,y
>>> expr = (x - y)**25
>>> flat = expr.expand()
>>> binary_callable = autowrap(flat)
>>> binary_callable(2, 3)
-1.0
Although a SymPy user might primarily be interested in working with
mathematical expressions and not in the details of wrapping tools
needed to evaluate such expressions efficiently in numerical form,
the user cannot do so without some understanding of the
limits in the target language. For example, the expanded expression
contains large coefficients which result in loss of precision when
computing the expression:
>>> binary_callable(3, 2)
0.0
>>> binary_callable(4, 5), binary_callable(5, 4)
(-22925376.0, 25165824.0)
Wrapping the unexpanded expression gives the expected behavior:
>>> e = autowrap(expr)
>>> e(4, 5), e(5, 4)
(-1.0, 1.0)
The callable returned from autowrap() is a binary Python function, not a
SymPy object. If it is desired to use the compiled function in symbolic
expressions, it is better to use binary_function() which returns a SymPy
Function object. The binary callable is attached as the _imp_ attribute and
invoked when a numerical evaluation is requested with evalf(), or with
lambdify().
>>> from sympy.utilities.autowrap import binary_function
>>> f = binary_function('f', expr)
>>> 2*f(x, y) + y
y + 2*f(x, y)
>>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2})
0.e-110
When is this useful?
1) For computations on large arrays, Python iterations may be too slow,
and depending on the mathematical expression, it may be difficult to
exploit the advanced index operations provided by NumPy.
2) For *really* long expressions that will be called repeatedly, the
compiled binary should be significantly faster than SymPy's .evalf()
3) If you are generating code with the codegen utility in order to use
it in another project, the automatic Python wrappers let you test the
binaries immediately from within SymPy.
4) To create customized ufuncs for use with numpy arrays.
See *ufuncify*.
When is this module NOT the best approach?
1) If you are really concerned about speed or memory optimizations,
you will probably get better results by working directly with the
wrapper tools and the low level code. However, the files generated
by this utility may provide a useful starting point and reference
code. Temporary files will be left intact if you supply the keyword
tempdir="path/to/files/".
2) If the array computation can be handled easily by numpy, and you
do not need the binaries for another project.
"""
import sys
import os
import shutil
import tempfile
from subprocess import STDOUT, CalledProcessError, check_output
from string import Template
from warnings import warn
from sympy.core.cache import cacheit
from sympy.core.function import Lambda
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.utilities.codegen import (make_routine, get_code_generator,
OutputArgument, InOutArgument,
InputArgument, CodeGenArgumentListError,
Result, ResultBase, C99CodeGen)
from sympy.utilities.iterables import iterable
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.decorator import doctest_depends_on
_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'),
'modules': ('numpy',)}
class CodeWrapError(Exception):
pass
class CodeWrapper:
"""Base Class for code wrappers"""
_filename = "wrapped_code"
_module_basename = "wrapper_module"
_module_counter = 0
@property
def filename(self):
return "%s_%s" % (self._filename, CodeWrapper._module_counter)
@property
def module_name(self):
return "%s_%s" % (self._module_basename, CodeWrapper._module_counter)
def __init__(self, generator, filepath=None, flags=[], verbose=False):
"""
generator -- the code generator to use
"""
self.generator = generator
self.filepath = filepath
self.flags = flags
self.quiet = not verbose
@property
def include_header(self):
return bool(self.filepath)
@property
def include_empty(self):
return bool(self.filepath)
def _generate_code(self, main_routine, routines):
routines.append(main_routine)
self.generator.write(
routines, self.filename, True, self.include_header,
self.include_empty)
def wrap_code(self, routine, helpers=None):
helpers = helpers or []
if self.filepath:
workdir = os.path.abspath(self.filepath)
else:
workdir = tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routine, helpers)
self._prepare_files(routine)
self._process_files(routine)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, routine.name)
def _process_files(self, routine):
command = self.command
command.extend(self.flags)
try:
retoutput = check_output(command, stderr=STDOUT)
except CalledProcessError as e:
raise CodeWrapError(
"Error while executing command: %s. Command output is:\n%s" % (
" ".join(command), e.output.decode('utf-8')))
if not self.quiet:
print(retoutput)
class DummyWrapper(CodeWrapper):
"""Class used for testing independent of backends """
template = """# dummy module for testing of SymPy
def %(name)s():
return "%(expr)s"
%(name)s.args = "%(args)s"
%(name)s.returns = "%(retvals)s"
"""
def _prepare_files(self, routine):
return
def _generate_code(self, routine, helpers):
with open('%s.py' % self.module_name, 'w') as f:
printed = ", ".join(
[str(res.expr) for res in routine.result_variables])
# convert OutputArguments to return value like f2py
args = filter(lambda x: not isinstance(
x, OutputArgument), routine.arguments)
retvals = []
for val in routine.result_variables:
if isinstance(val, Result):
retvals.append('nameless')
else:
retvals.append(val.result_var)
print(DummyWrapper.template % {
'name': routine.name,
'expr': printed,
'args': ", ".join([str(a.name) for a in args]),
'retvals': ", ".join([str(val) for val in retvals])
}, end="", file=f)
def _process_files(self, routine):
return
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
class CythonCodeWrapper(CodeWrapper):
"""Wrapper that uses Cython"""
setup_template = """\
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
cy_opts = {cythonize_options}
{np_import}
ext_mods = [Extension(
{ext_args},
include_dirs={include_dirs},
library_dirs={library_dirs},
libraries={libraries},
extra_compile_args={extra_compile_args},
extra_link_args={extra_link_args}
)]
setup(ext_modules=cythonize(ext_mods, **cy_opts))
"""
pyx_imports = (
"import numpy as np\n"
"cimport numpy as np\n\n")
pyx_header = (
"cdef extern from '{header_file}.h':\n"
" {prototype}\n\n")
pyx_func = (
"def {name}_c({arg_string}):\n"
"\n"
"{declarations}"
"{body}")
std_compile_flag = '-std=c99'
def __init__(self, *args, **kwargs):
"""Instantiates a Cython code wrapper.
The following optional parameters get passed to ``distutils.Extension``
for building the Python extension module. Read its documentation to
learn more.
Parameters
==========
include_dirs : [list of strings]
A list of directories to search for C/C++ header files (in Unix
form for portability).
library_dirs : [list of strings]
A list of directories to search for C/C++ libraries at link time.
libraries : [list of strings]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [list of strings]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could be
anything. Note that the attribute ``std_compile_flag`` will be
appended to this list.
extra_link_args : [list of strings]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create
a new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
cythonize_options : [dictionary]
Keyword arguments passed on to cythonize.
"""
self._include_dirs = kwargs.pop('include_dirs', [])
self._library_dirs = kwargs.pop('library_dirs', [])
self._libraries = kwargs.pop('libraries', [])
self._extra_compile_args = kwargs.pop('extra_compile_args', [])
self._extra_compile_args.append(self.std_compile_flag)
self._extra_link_args = kwargs.pop('extra_link_args', [])
self._cythonize_options = kwargs.pop('cythonize_options', {})
self._need_numpy = False
super().__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine, build_dir=os.curdir):
# NOTE : build_dir is used for testing purposes.
pyxfilename = self.module_name + '.pyx'
codefilename = "%s.%s" % (self.filename, self.generator.code_extension)
# pyx
with open(os.path.join(build_dir, pyxfilename), 'w') as f:
self.dump_pyx([routine], f, self.filename)
# setup.py
ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])]
if self._need_numpy:
np_import = 'import numpy as np\n'
self._include_dirs.append('np.get_include()')
else:
np_import = ''
with open(os.path.join(build_dir, 'setup.py'), 'w') as f:
includes = str(self._include_dirs).replace("'np.get_include()'",
'np.get_include()')
f.write(self.setup_template.format(
ext_args=", ".join(ext_args),
np_import=np_import,
include_dirs=includes,
library_dirs=self._library_dirs,
libraries=self._libraries,
extra_compile_args=self._extra_compile_args,
extra_link_args=self._extra_link_args,
cythonize_options=self._cythonize_options
))
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name + '_c')
def dump_pyx(self, routines, f, prefix):
"""Write a Cython file with Python wrappers
This file contains all the definitions of the routines in c code and
refers to the header file.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
"""
headers = []
functions = []
for routine in routines:
prototype = self.generator.get_prototype(routine)
# C Function Header Import
headers.append(self.pyx_header.format(header_file=prefix,
prototype=prototype))
# Partition the C function arguments into categories
py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments)
# Function prototype
name = routine.name
arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args)
# Local Declarations
local_decs = []
for arg, val in py_inf.items():
proto = self._prototype_arg(arg)
mat, ind = [self._string_var(v) for v in val]
local_decs.append(" cdef {} = {}.shape[{}]".format(proto, mat, ind))
local_decs.extend([" cdef {}".format(self._declare_arg(a)) for a in py_loc])
declarations = "\n".join(local_decs)
if declarations:
declarations = declarations + "\n"
# Function Body
args_c = ", ".join([self._call_arg(a) for a in routine.arguments])
rets = ", ".join([self._string_var(r.name) for r in py_rets])
if routine.results:
body = ' return %s(%s)' % (routine.name, args_c)
if rets:
body = body + ', ' + rets
else:
body = ' %s(%s)\n' % (routine.name, args_c)
body = body + ' return ' + rets
functions.append(self.pyx_func.format(name=name, arg_string=arg_string,
declarations=declarations, body=body))
# Write text to file
if self._need_numpy:
# Only import numpy if required
f.write(self.pyx_imports)
f.write('\n'.join(headers))
f.write('\n'.join(functions))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_args = []
py_returns = []
py_locals = []
py_inferred = {}
for arg in args:
if isinstance(arg, OutputArgument):
py_returns.append(arg)
py_locals.append(arg)
elif isinstance(arg, InOutArgument):
py_returns.append(arg)
py_args.append(arg)
else:
py_args.append(arg)
# Find arguments that are array dimensions. These can be inferred
# locally in the Cython code.
if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions:
dims = [d[1] + 1 for d in arg.dimensions]
sym_dims = [(i, d) for (i, d) in enumerate(dims) if
isinstance(d, Symbol)]
for (i, d) in sym_dims:
py_inferred[d] = (arg.name, i)
for arg in args:
if arg.name in py_inferred:
py_inferred[arg] = py_inferred.pop(arg.name)
# Filter inferred arguments from py_args
py_args = [a for a in py_args if a not in py_inferred]
return py_returns, py_args, py_locals, py_inferred
def _prototype_arg(self, arg):
mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}"
np_types = {'double': 'np.double_t',
'int': 'np.int_t'}
t = arg.get_datatype('c')
if arg.dimensions:
self._need_numpy = True
ndim = len(arg.dimensions)
mtype = np_types[t]
return mat_dec.format(mtype=mtype, ndim=ndim, name=self._string_var(arg.name))
else:
return "%s %s" % (t, self._string_var(arg.name))
def _declare_arg(self, arg):
proto = self._prototype_arg(arg)
if arg.dimensions:
shape = '(' + ','.join(self._string_var(i[1] + 1) for i in arg.dimensions) + ')'
return proto + " = np.empty({shape})".format(shape=shape)
else:
return proto + " = 0"
def _call_arg(self, arg):
if arg.dimensions:
t = arg.get_datatype('c')
return "<{}*> {}.data".format(t, self._string_var(arg.name))
elif isinstance(arg, ResultBase):
return "&{}".format(self._string_var(arg.name))
else:
return self._string_var(arg.name)
def _string_var(self, var):
printer = self.generator.printer.doprint
return printer(var)
class F2PyCodeWrapper(CodeWrapper):
"""Wrapper that uses f2py"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the f2py '
'backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super().__init__(*args, **kwargs)
@property
def command(self):
filename = self.filename + '.' + self.generator.code_extension
args = ['-c', '-m', self.module_name, filename]
command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args
return command
def _prepare_files(self, routine):
pass
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
# Here we define a lookup of backends -> tuples of languages. For now, each
# tuple is of length 1, but if a backend supports more than one language,
# the most preferable language is listed first.
_lang_lookup = {'CYTHON': ('C99', 'C89', 'C'),
'F2PY': ('F95',),
'NUMPY': ('C99', 'C89', 'C'),
'DUMMY': ('F95',)} # Dummy here just for testing
def _infer_language(backend):
"""For a given backend, return the top choice of language"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
return langs[0]
def _validate_backend_language(backend, language):
"""Throws error if backend and language are incompatible"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
if language.upper() not in langs:
raise ValueError(("Backend {} and language {} are "
"incompatible").format(backend, language))
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def autowrap(expr, language=None, backend='f2py', tempdir=None, args=None,
flags=None, verbose=False, helpers=None, code_gen=None, **kwargs):
"""Generates Python callable binaries based on the math expression.
Parameters
==========
expr
The SymPy expression that should be wrapped as a binary routine.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'f2py' [default],
or 'cython'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
args : iterable, optional
An ordered iterable of symbols. Specifies the argument sequence for the
function.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : 3-tuple or iterable of 3-tuples, optional
Used to define auxiliary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be
passed in via ``helpers``. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be 3-tuples with (<function_name>, <sympy_expression>,
<argument_tuple>). It is mandatory to supply an argument sequence to
helper routines.
code_gen : CodeGen instance
An instance of a CodeGen subclass. Overrides ``language``.
include_dirs : [string]
A list of directories to search for C/C++ header files (in Unix form
for portability).
library_dirs : [string]
A list of directories to search for C/C++ libraries at link time.
libraries : [string]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and compilers
where "command line" makes sense, this is typically a list of
command-line arguments, but for other platforms it could be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create a
new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.autowrap import autowrap
>>> expr = ((x - y + z)**(13)).expand()
>>> binary_func = autowrap(expr)
>>> binary_func(1, 4, 2)
-1.0
"""
if language:
if not isinstance(language, type):
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
# two cases 1) helpers is an iterable of 3-tuples and 2) helpers is a
# 3-tuple
if iterable(helpers) and len(helpers) != 0 and iterable(helpers[0]):
helpers = helpers if helpers else ()
else:
helpers = [helpers] if helpers else ()
args = list(args) if iterable(args, exclude=set) else args
if code_gen is None:
code_gen = get_code_generator(language, "autowrap")
CodeWrapperClass = {
'F2PY': F2PyCodeWrapper,
'CYTHON': CythonCodeWrapper,
'DUMMY': DummyWrapper
}[backend.upper()]
code_wrapper = CodeWrapperClass(code_gen, tempdir, flags if flags else (),
verbose, **kwargs)
helps = []
for name_h, expr_h, args_h in helpers:
helps.append(code_gen.routine(name_h, expr_h, args_h))
for name_h, expr_h, args_h in helpers:
if expr.has(expr_h):
name_h = binary_function(name_h, expr_h, backend='dummy')
expr = expr.subs(expr_h, name_h(*args_h))
try:
routine = code_gen.routine('autofunc', expr, args)
except CodeGenArgumentListError as e:
# if all missing arguments are for pure output, we simply attach them
# at the end and try again, because the wrappers will silently convert
# them to return values anyway.
new_args = []
for missing in e.missing_args:
if not isinstance(missing, OutputArgument):
raise
new_args.append(missing.name)
routine = code_gen.routine('autofunc', expr, args + new_args)
return code_wrapper.wrap_code(routine, helpers=helps)
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def binary_function(symfunc, expr, **kwargs):
"""Returns a SymPy function with expr as binary implementation
This is a convenience function that automates the steps needed to
autowrap the SymPy expression and attaching it to a Function object
with implemented_function().
Parameters
==========
symfunc : SymPy Function
The function to bind the callable to.
expr : SymPy Expression
The expression used to generate the function.
kwargs : dict
Any kwargs accepted by autowrap.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.utilities.autowrap import binary_function
>>> expr = ((x - y)**(25)).expand()
>>> f = binary_function('f', expr)
>>> type(f)
<class 'sympy.core.function.UndefinedFunction'>
>>> 2*f(x, y)
2*f(x, y)
>>> f(x, y).evalf(2, subs={x: 1, y: 2})
-1.0
"""
binary = autowrap(expr, **kwargs)
return implemented_function(symfunc, binary)
#################################################################
# UFUNCIFY #
#################################################################
_ufunc_top = Template("""\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include ${include_file}
static PyMethodDef ${module}Methods[] = {
{NULL, NULL, 0, NULL}
};""")
_ufunc_outcalls = Template("*((double *)out${outnum}) = ${funcname}(${call_args});")
_ufunc_body = Template("""\
static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
${declare_args}
${declare_steps}
for (i = 0; i < n; i++) {
${outcalls}
${step_increments}
}
}
PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc};
static char ${funcname}_types[${n_types}] = ${types}
static void *${funcname}_data[1] = {NULL};""")
_ufunc_bottom = Template("""\
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"${module}",
NULL,
-1,
${module}Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_${module}(void)
{
PyObject *m, *d;
${function_creation}
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
return m;
}
#else
PyMODINIT_FUNC init${module}(void)
{
PyObject *m, *d;
${function_creation}
m = Py_InitModule("${module}", ${module}Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
}
#endif\
""")
_ufunc_init_form = Template("""\
ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out},
PyUFunc_None, "${module}", ${docstring}, 0);
PyDict_SetItemString(d, "${funcname}", ufunc${ind});
Py_DECREF(ufunc${ind});""")
_ufunc_setup = Template("""\
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('',
parent_package,
top_path)
config.add_extension('${module}', sources=['${module}.c', '${filename}.c'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)""")
class UfuncifyCodeWrapper(CodeWrapper):
"""Wrapper for Ufuncify"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the numpy'
' backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super().__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def wrap_code(self, routines, helpers=None):
# This routine overrides CodeWrapper because we can't assume funcname == routines[0].name
# Therefore we have to break the CodeWrapper private API.
# There isn't an obvious way to extend multi-expr support to
# the other autowrap backends, so we limit this change to ufuncify.
helpers = helpers if helpers is not None else []
# We just need a consistent name
funcname = 'wrapped_' + str(id(routines) + id(helpers))
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routines, helpers)
self._prepare_files(routines, funcname)
self._process_files(routines)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, funcname)
def _generate_code(self, main_routines, helper_routines):
all_routines = main_routines + helper_routines
self.generator.write(
all_routines, self.filename, True, self.include_header,
self.include_empty)
def _prepare_files(self, routines, funcname):
# C
codefilename = self.module_name + '.c'
with open(codefilename, 'w') as f:
self.dump_c(routines, f, self.filename, funcname=funcname)
# setup.py
with open('setup.py', 'w') as f:
self.dump_setup(f)
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def dump_setup(self, f):
setup = _ufunc_setup.substitute(module=self.module_name,
filename=self.filename)
f.write(setup)
def dump_c(self, routines, f, prefix, funcname=None):
"""Write a C file with Python wrappers
This file contains all the definitions of the routines in c code.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to name the imported module.
funcname
Name of the main function to be returned.
"""
if funcname is None:
if len(routines) == 1:
funcname = routines[0].name
else:
msg = 'funcname must be specified for multiple output routines'
raise ValueError(msg)
functions = []
function_creation = []
ufunc_init = []
module = self.module_name
include_file = "\"{}.h\"".format(prefix)
top = _ufunc_top.substitute(include_file=include_file, module=module)
name = funcname
# Partition the C function arguments into categories
# Here we assume all routines accept the same arguments
r_index = 0
py_in, _ = self._partition_args(routines[0].arguments)
n_in = len(py_in)
n_out = len(routines)
# Declare Args
form = "char *{0}{1} = args[{2}];"
arg_decs = [form.format('in', i, i) for i in range(n_in)]
arg_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_args = '\n '.join(arg_decs)
# Declare Steps
form = "npy_intp {0}{1}_step = steps[{2}];"
step_decs = [form.format('in', i, i) for i in range(n_in)]
step_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_steps = '\n '.join(step_decs)
# Call Args
form = "*(double *)in{0}"
call_args = ', '.join([form.format(a) for a in range(n_in)])
# Step Increments
form = "{0}{1} += {0}{1}_step;"
step_incs = [form.format('in', i) for i in range(n_in)]
step_incs.extend([form.format('out', i, i) for i in range(n_out)])
step_increments = '\n '.join(step_incs)
# Types
n_types = n_in + n_out
types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};"
# Docstring
docstring = '"Created in SymPy with Ufuncify"'
# Function Creation
function_creation.append("PyObject *ufunc{};".format(r_index))
# Ufunc initialization
init_form = _ufunc_init_form.substitute(module=module,
funcname=name,
docstring=docstring,
n_in=n_in, n_out=n_out,
ind=r_index)
ufunc_init.append(init_form)
outcalls = [_ufunc_outcalls.substitute(
outnum=i, call_args=call_args, funcname=routines[i].name) for i in
range(n_out)]
body = _ufunc_body.substitute(module=module, funcname=name,
declare_args=declare_args,
declare_steps=declare_steps,
call_args=call_args,
step_increments=step_increments,
n_types=n_types, types=types,
outcalls='\n '.join(outcalls))
functions.append(body)
body = '\n\n'.join(functions)
ufunc_init = '\n '.join(ufunc_init)
function_creation = '\n '.join(function_creation)
bottom = _ufunc_bottom.substitute(module=module,
ufunc_init=ufunc_init,
function_creation=function_creation)
text = [top, body, bottom]
f.write('\n\n'.join(text))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_in = []
py_out = []
for arg in args:
if isinstance(arg, OutputArgument):
py_out.append(arg)
elif isinstance(arg, InOutArgument):
raise ValueError("Ufuncify doesn't support InOutArguments")
else:
py_in.append(arg)
return py_in, py_out
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',))
def ufuncify(args, expr, language=None, backend='numpy', tempdir=None,
flags=None, verbose=False, helpers=None, **kwargs):
"""Generates a binary function that supports broadcasting on numpy arrays.
Parameters
==========
args : iterable
Either a Symbol or an iterable of symbols. Specifies the argument
sequence for the function.
expr
A SymPy expression that defines the element wise operation.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'numpy' [default],
'cython', or 'f2py'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in
the specified path.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can
be helpful for debugging.
helpers : iterable, optional
Used to define auxiliary expressions needed for the main expr. If
the main expression needs to call a specialized function it should
be put in the ``helpers`` iterable. Autowrap will then make sure
that the compiled main expression can link to the helper routine.
Items should be tuples with (<funtion_name>, <sympy_expression>,
<arguments>). It is mandatory to supply an argument sequence to
helper routines.
kwargs : dict
These kwargs will be passed to autowrap if the `f2py` or `cython`
backend is used and ignored if the `numpy` backend is used.
Notes
=====
The default backend ('numpy') will create actual instances of
``numpy.ufunc``. These support ndimensional broadcasting, and implicit type
conversion. Use of the other backends will result in a "ufunc-like"
function, which requires equal length 1-dimensional arrays for all
arguments, and will not perform any type conversions.
References
==========
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
Examples
========
>>> from sympy.utilities.autowrap import ufuncify
>>> from sympy.abc import x, y
>>> import numpy as np
>>> f = ufuncify((x, y), y + x**2)
>>> type(f)
<class 'numpy.ufunc'>
>>> f([1, 2, 3], 2)
array([ 3., 6., 11.])
>>> f(np.arange(5), 3)
array([ 3., 4., 7., 12., 19.])
For the 'f2py' and 'cython' backends, inputs are required to be equal length
1-dimensional arrays. The 'f2py' backend will perform type conversion, but
the Cython backend will error if the inputs are not of the expected type.
>>> f_fortran = ufuncify((x, y), y + x**2, backend='f2py')
>>> f_fortran(1, 2)
array([ 3.])
>>> f_fortran(np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]))
array([ 2., 6., 12.])
>>> f_cython = ufuncify((x, y), y + x**2, backend='Cython')
>>> f_cython(1, 2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int)
>>> f_cython(np.array([1.0]), np.array([2.0]))
array([ 3.])
"""
if isinstance(args, Symbol):
args = (args,)
else:
args = tuple(args)
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
if backend.upper() == 'NUMPY':
# maxargs is set by numpy compile-time constant NPY_MAXARGS
# If a future version of numpy modifies or removes this restriction
# this variable should be changed or removed
maxargs = 32
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"), tempdir,
flags, verbose)
if not isinstance(expr, (list, tuple)):
expr = [expr]
if len(expr) == 0:
raise ValueError('Expression iterable has zero length')
if len(expr) + len(args) > maxargs:
msg = ('Cannot create ufunc with more than {0} total arguments: '
'got {1} in, {2} out')
raise ValueError(msg.format(maxargs, len(args), len(expr)))
routines = [make_routine('autofunc{}'.format(idx), exprx, args) for
idx, exprx in enumerate(expr)]
return code_wrapper.wrap_code(routines, helpers=helps)
else:
# Dummies are used for all added expressions to prevent name clashes
# within the original expression.
y = IndexedBase(Dummy('y'))
m = Dummy('m', integer=True)
i = Idx(Dummy('i', integer=True), m)
f_dummy = Dummy('f')
f = implemented_function('%s_%d' % (f_dummy.name, f_dummy.dummy_index), Lambda(args, expr))
# For each of the args create an indexed version.
indexed_args = [IndexedBase(Dummy(str(a))) for a in args]
# Order the arguments (out, args, dim)
args = [y] + indexed_args + [m]
args_with_indices = [a[i] for a in indexed_args]
return autowrap(Eq(y[i], f(*args_with_indices)), language, backend,
tempdir, args, flags, verbose, helpers, **kwargs)
|
754fe07e4bf36d118637c8b714f68a7983e3938ba26fcf4a89116120f6b33903 | """
This module adds several functions for interactive source code inspection.
"""
from sympy.utilities.decorator import deprecated
import inspect
@deprecated(useinstead="?? in IPython/Jupyter or inspect.getsource", issue=14905, deprecated_since_version="1.3")
def source(object):
"""
Prints the source code of a given object.
"""
print('In file: %s' % inspect.getsourcefile(object))
print(inspect.getsource(object))
def get_class(lookup_view):
"""
Convert a string version of a class name to the object.
For example, get_class('sympy.core.Basic') will return
class Basic located in module sympy.core
"""
if isinstance(lookup_view, str):
mod_name, func_name = get_mod_func(lookup_view)
if func_name != '':
lookup_view = getattr(
__import__(mod_name, {}, {}, ['*']), func_name)
if not callable(lookup_view):
raise AttributeError(
"'%s.%s' is not a callable." % (mod_name, func_name))
return lookup_view
def get_mod_func(callback):
"""
splits the string path to a class into a string path to the module
and the name of the class.
Examples
========
>>> from sympy.utilities.source import get_mod_func
>>> get_mod_func('sympy.core.basic.Basic')
('sympy.core.basic', 'Basic')
"""
dot = callback.rfind('.')
if dot == -1:
return callback, ''
return callback[:dot], callback[dot + 1:]
|
f848bc1b8456f90b9b84c9b0bbd4d000f751df0962127251faf335783900b475 | """This module contains some general purpose utilities that are used across
SymPy.
"""
from .iterables import (flatten, group, take, subsets,
variations, numbered_symbols, cartes, capture, dict_merge,
prefixes, postfixes, sift, topological_sort, unflatten,
has_dups, has_variety, reshape, rotations)
from .misc import filldedent
from .lambdify import lambdify
from .source import source
from .decorator import threaded, xthreaded, public, memoize_property
from .timeutils import timed
__all__ = [
'flatten', 'group', 'take', 'subsets', 'variations', 'numbered_symbols',
'cartes', 'capture', 'dict_merge', 'prefixes', 'postfixes', 'sift',
'topological_sort', 'unflatten', 'has_dups', 'has_variety', 'reshape',
'rotations',
'filldedent',
'lambdify',
'source',
'threaded', 'xthreaded', 'public', 'memoize_property',
'timed',
]
|
dedafc0d94b11e3e08fa5931a6038b1dc498a470656891d189d1ec26fb018c25 | """
This module provides convenient functions to transform SymPy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from typing import Any, Dict as tDict, Iterable, Union as tUnion, TYPE_CHECKING
import builtins
import inspect
import keyword
import textwrap
import linecache
# Required despite static analysis claiming it is not used
from sympy.external import import_module # noqa:F401
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import (is_sequence, iterable,
NotIterable, flatten)
from sympy.utilities.misc import filldedent
if TYPE_CHECKING:
import sympy.core.expr
__doctest_requires__ = {('lambdify',): ['numpy', 'tensorflow']}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
MATH_DEFAULT = {} # type: tDict[str, Any]
MPMATH_DEFAULT = {} # type: tDict[str, Any]
NUMPY_DEFAULT = {"I": 1j} # type: tDict[str, Any]
SCIPY_DEFAULT = {"I": 1j} # type: tDict[str, Any]
CUPY_DEFAULT = {"I": 1j} # type: tDict[str, Any]
TENSORFLOW_DEFAULT = {} # type: tDict[str, Any]
SYMPY_DEFAULT = {} # type: tDict[str, Any]
NUMEXPR_DEFAULT = {} # type: tDict[str, Any]
# These are the namespaces the lambda functions will use.
# These are separate from the names above because they are modified
# throughout this file, whereas the defaults should remain unmodified.
MATH = MATH_DEFAULT.copy()
MPMATH = MPMATH_DEFAULT.copy()
NUMPY = NUMPY_DEFAULT.copy()
SCIPY = SCIPY_DEFAULT.copy()
CUPY = CUPY_DEFAULT.copy()
TENSORFLOW = TENSORFLOW_DEFAULT.copy()
SYMPY = SYMPY_DEFAULT.copy()
NUMEXPR = NUMEXPR_DEFAULT.copy()
# Mappings between SymPy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
# NOTE: This dictionary is reused in Function._eval_evalf to allow subclasses
# of Function to automatically evalf.
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableDenseMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci",
"RisingFactorial": "rf",
"FallingFactorial": "ff",
"betainc_regularized": "betainc",
}
NUMPY_TRANSLATIONS = {
"Heaviside": "heaviside",
} # type: tDict[str, str]
SCIPY_TRANSLATIONS = {} # type: tDict[str, str]
CUPY_TRANSLATIONS = {} # type: tDict[str, str]
TENSORFLOW_TRANSLATIONS = {} # type: tDict[str, str]
NUMEXPR_TRANSLATIONS = {} # type: tDict[str, str]
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import numpy; from numpy import *; from numpy.linalg import *",)),
"scipy": (SCIPY, SCIPY_DEFAULT, SCIPY_TRANSLATIONS, ("import numpy; import scipy; from scipy import *; from scipy.special import *",)),
"cupy": (CUPY, CUPY_DEFAULT, CUPY_TRANSLATIONS, ("import cupy",)),
"tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import tensorflow",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload=False):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy", "tensorflow".
These dictionaries map names of Python functions to their equivalent in
other modules.
"""
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module cannot be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"Cannot import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# For computing the modulus of a SymPy expression we use the builtin abs
# function, instead of the previously used fabs function for all
# translation modules. This is because the fabs function in the math
# module does not accept complex valued arguments. (see issue 9474). The
# only exception, where we don't use the builtin abs function is the
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
# Used for dynamically generated filenames that are inserted into the
# linecache.
_lambdify_generated_counter = 1
@doctest_depends_on(modules=('numpy', 'scipy', 'tensorflow',), python_version=(3,))
def lambdify(args: tUnion[Iterable, 'sympy.core.expr.Expr'], expr: 'sympy.core.expr.Expr', modules=None, printer=None, use_imps=True,
dummify=False, cse=False):
"""Convert a SymPy expression into a function that allows for fast
numeric evaluation.
.. warning::
This function uses ``exec``, and thus shouldn't be used on
unsanitized input.
.. versionchanged:: 1.7.0
Passing a set for the *args* parameter is deprecated as sets are
unordered. Use an ordered iterable such as a list or tuple.
Explanation
===========
For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an
equivalent NumPy function that numerically evaluates it:
>>> from sympy import sin, cos, symbols, lambdify
>>> import numpy as np
>>> x = symbols('x')
>>> expr = sin(x) + cos(x)
>>> expr
sin(x) + cos(x)
>>> f = lambdify(x, expr, 'numpy')
>>> a = np.array([1, 2])
>>> f(a)
[1.38177329 0.49315059]
The primary purpose of this function is to provide a bridge from SymPy
expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath,
and tensorflow. In general, SymPy functions do not work with objects from
other libraries, such as NumPy arrays, and functions from numeric
libraries like NumPy or mpmath do not work on SymPy expressions.
``lambdify`` bridges the two by converting a SymPy expression to an
equivalent numeric function.
The basic workflow with ``lambdify`` is to first create a SymPy expression
representing whatever mathematical function you wish to evaluate. This
should be done using only SymPy functions and expressions. Then, use
``lambdify`` to convert this to an equivalent function for numerical
evaluation. For instance, above we created ``expr`` using the SymPy symbol
``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an
equivalent NumPy function ``f``, and called it on a NumPy array ``a``.
Parameters
==========
args : List[Symbol]
A variable or a list of variables whose nesting represents the
nesting of the arguments that will be passed to the function.
Variables can be symbols, undefined functions, or matrix symbols.
>>> from sympy import Eq
>>> from sympy.abc import x, y, z
The list of variables should match the structure of how the
arguments will be passed to the function. Simply enclose the
parameters as they will be passed in a list.
To call a function like ``f(x)`` then ``[x]``
should be the first argument to ``lambdify``; for this
case a single ``x`` can also be used:
>>> f = lambdify(x, x + 1)
>>> f(1)
2
>>> f = lambdify([x], x + 1)
>>> f(1)
2
To call a function like ``f(x, y)`` then ``[x, y]`` will
be the first argument of the ``lambdify``:
>>> f = lambdify([x, y], x + y)
>>> f(1, 1)
2
To call a function with a single 3-element tuple like
``f((x, y, z))`` then ``[(x, y, z)]`` will be the first
argument of the ``lambdify``:
>>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2))
>>> f((3, 4, 5))
True
If two args will be passed and the first is a scalar but
the second is a tuple with two arguments then the items
in the list should match that structure:
>>> f = lambdify([x, (y, z)], x + y + z)
>>> f(1, (2, 3))
6
expr : Expr
An expression, list of expressions, or matrix to be evaluated.
Lists may be nested.
If the expression is a list, the output will also be a list.
>>> f = lambdify(x, [x, [x + 1, x + 2]])
>>> f(1)
[1, [2, 3]]
If it is a matrix, an array will be returned (for the NumPy module).
>>> from sympy import Matrix
>>> f = lambdify(x, Matrix([x, x + 1]))
>>> f(1)
[[1]
[2]]
Note that the argument order here (variables then expression) is used
to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works
(roughly) like ``lambda x: expr``
(see :ref:`lambdify-how-it-works` below).
modules : str, optional
Specifies the numeric library to use.
If not specified, *modules* defaults to:
- ``["scipy", "numpy"]`` if SciPy is installed
- ``["numpy"]`` if only NumPy is installed
- ``["math", "mpmath", "sympy"]`` if neither is installed.
That is, SymPy functions are replaced as far as possible by
either ``scipy`` or ``numpy`` functions if available, and Python's
standard library ``math``, or ``mpmath`` functions otherwise.
*modules* can be one of the following types:
- The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``,
``"scipy"``, ``"sympy"``, or ``"tensorflow"``. This uses the
corresponding printer and namespace mapping for that module.
- A module (e.g., ``math``). This uses the global namespace of the
module. If the module is one of the above known modules, it will
also use the corresponding printer and namespace mapping
(i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``).
- A dictionary that maps names of SymPy functions to arbitrary
functions
(e.g., ``{'sin': custom_sin}``).
- A list that contains a mix of the arguments above, with higher
priority given to entries appearing first
(e.g., to use the NumPy module but override the ``sin`` function
with a custom version, you can use
``[{'sin': custom_sin}, 'numpy']``).
dummify : bool, optional
Whether or not the variables in the provided expression that are not
valid Python identifiers are substituted with dummy symbols.
This allows for undefined functions like ``Function('f')(t)`` to be
supplied as arguments. By default, the variables are only dummified
if they are not valid Python identifiers.
Set ``dummify=True`` to replace all arguments with dummy symbols
(if ``args`` is not a string) - for example, to ensure that the
arguments do not redefine any built-in names.
cse : bool, or callable, optional
Large expressions can be computed more efficiently when
common subexpressions are identified and precomputed before
being used multiple time. Finding the subexpressions will make
creation of the 'lambdify' function slower, however.
When ``True``, ``sympy.simplify.cse`` is used, otherwise (the default)
the user may pass a function matching the ``cse`` signature.
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
``lambdify`` can be used to translate SymPy expressions into mpmath
functions. This may be preferable to using ``evalf`` (which uses mpmath on
the backend) in some cases.
>>> f = lambdify(x, sin(x), 'mpmath')
>>> f(1)
0.8414709848078965
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
The ``flatten`` function can be used to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in ``expr`` can also carry their own numerical
implementations, in a callable attached to the ``_imp_`` attribute. This
can be used with undefined functions using the ``implemented_function``
factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
Usage with Tensorflow:
>>> import tensorflow as tf
>>> from sympy import Max, sin, lambdify
>>> from sympy.abc import x
>>> f = Max(x, sin(x))
>>> func = lambdify(x, f, 'tensorflow')
After tensorflow v2, eager execution is enabled by default.
If you want to get the compatible result across tensorflow v1 and v2
as same as this tutorial, run this line.
>>> tf.compat.v1.enable_eager_execution()
If you have eager execution enabled, you can get the result out
immediately as you can use numpy.
If you pass tensorflow objects, you may get an ``EagerTensor``
object instead of value.
>>> result = func(tf.constant(1.0))
>>> print(result)
tf.Tensor(1.0, shape=(), dtype=float32)
>>> print(result.__class__)
<class 'tensorflow.python.framework.ops.EagerTensor'>
You can use ``.numpy()`` to get the numpy value of the tensor.
>>> result.numpy()
1.0
>>> var = tf.Variable(2.0)
>>> result = func(var) # also works for tf.Variable and tf.Placeholder
>>> result.numpy()
2.0
And it works with any shape array.
>>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> result = func(tensor)
>>> result.numpy()
[[1. 2.]
[3. 4.]]
Notes
=====
- For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
``implemented_function`` and user defined subclasses of Function. If
specified, numexpr may be the only option in modules. The official list
of numexpr functions can be found at:
https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions
- In previous versions of SymPy, ``lambdify`` replaced ``Matrix`` with
``numpy.matrix`` by default. As of SymPy 1.0 ``numpy.array`` is the
default. To get the old default behavior you must pass in
``[{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']`` to the
``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
[[1]
[2]]
- In the above examples, the generated functions can accept scalar
values or numpy arrays as arguments. However, in some cases
the generated function relies on the input being a numpy array:
>>> from sympy import Piecewise
>>> from sympy.testing.pytest import ignore_warnings
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
>>> with ignore_warnings(RuntimeWarning):
... f(numpy.array([-1, 0, 1, 2]))
[-1. 0. 1. 0.5]
>>> f(0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
In such cases, the input should be wrapped in a numpy array:
>>> with ignore_warnings(RuntimeWarning):
... float(f(numpy.array([0])))
0.0
Or if numpy functionality is not required another module can be used:
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
>>> f(0)
0
.. _lambdify-how-it-works:
How it works
============
When using this function, it helps a great deal to have an idea of what it
is doing. At its core, lambdify is nothing more than a namespace
translation, on top of a special printer that makes some corner cases work
properly.
To understand lambdify, first we must properly understand how Python
namespaces work. Say we had two files. One called ``sin_cos_sympy.py``,
with
.. code:: python
# sin_cos_sympy.py
from sympy.functions.elementary.trigonometric import (cos, sin)
def sin_cos(x):
return sin(x) + cos(x)
and one called ``sin_cos_numpy.py`` with
.. code:: python
# sin_cos_numpy.py
from numpy import sin, cos
def sin_cos(x):
return sin(x) + cos(x)
The two files define an identical function ``sin_cos``. However, in the
first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and
``cos``. In the second, they are defined as the NumPy versions.
If we were to import the first file and use the ``sin_cos`` function, we
would get something like
>>> from sin_cos_sympy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
cos(1) + sin(1)
On the other hand, if we imported ``sin_cos`` from the second file, we
would get
>>> from sin_cos_numpy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
1.38177329068
In the first case we got a symbolic output, because it used the symbolic
``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric
result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions
from NumPy. But notice that the versions of ``sin`` and ``cos`` that were
used was not inherent to the ``sin_cos`` function definition. Both
``sin_cos`` definitions are exactly the same. Rather, it was based on the
names defined at the module where the ``sin_cos`` function was defined.
The key point here is that when function in Python references a name that
is not defined in the function, that name is looked up in the "global"
namespace of the module where that function is defined.
Now, in Python, we can emulate this behavior without actually writing a
file to disk using the ``exec`` function. ``exec`` takes a string
containing a block of Python code, and a dictionary that should contain
the global variables of the module. It then executes the code "in" that
dictionary, as if it were the module globals. The following is equivalent
to the ``sin_cos`` defined in ``sin_cos_sympy.py``:
>>> import sympy
>>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
cos(1) + sin(1)
and similarly with ``sin_cos_numpy``:
>>> import numpy
>>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
1.38177329068
So now we can get an idea of how ``lambdify`` works. The name "lambdify"
comes from the fact that we can think of something like ``lambdify(x,
sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where
``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why
the symbols argument is first in ``lambdify``, as opposed to most SymPy
functions where it comes after the expression: to better mimic the
``lambda`` keyword.
``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and
1. Converts it to a string
2. Creates a module globals dictionary based on the modules that are
passed in (by default, it uses the NumPy module)
3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the
list of variables separated by commas, and ``{expr}`` is the string
created in step 1., then ``exec``s that string with the module globals
namespace and returns ``func``.
In fact, functions returned by ``lambdify`` support inspection. So you can
see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you
are using IPython or the Jupyter notebook.
>>> f = lambdify(x, sin(x) + cos(x))
>>> import inspect
>>> print(inspect.getsource(f))
def _lambdifygenerated(x):
return sin(x) + cos(x)
This shows us the source code of the function, but not the namespace it
was defined in. We can inspect that by looking at the ``__globals__``
attribute of ``f``:
>>> f.__globals__['sin']
<ufunc 'sin'>
>>> f.__globals__['cos']
<ufunc 'cos'>
>>> f.__globals__['sin'] is numpy.sin
True
This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be
``numpy.sin`` and ``numpy.cos``.
Note that there are some convenience layers in each of these steps, but at
the core, this is how ``lambdify`` works. Step 1 is done using the
``LambdaPrinter`` printers defined in the printing module (see
:mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions
to define how they should be converted to a string for different modules.
You can change which printer ``lambdify`` uses by passing a custom printer
in to the ``printer`` argument.
Step 2 is augmented by certain translations. There are default
translations for each module, but you can provide your own by passing a
list to the ``modules`` argument. For instance,
>>> def mysin(x):
... print('taking the sin of', x)
... return numpy.sin(x)
...
>>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy'])
>>> f(1)
taking the sin of 1
0.8414709848078965
The globals dictionary is generated from the list by merging the
dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The
merging is done so that earlier items take precedence, which is why
``mysin`` is used above instead of ``numpy.sin``.
If you want to modify the way ``lambdify`` works for a given function, it
is usually easiest to do so by modifying the globals dictionary as such.
In more complicated cases, it may be necessary to create and pass in a
custom printer.
Finally, step 3 is augmented with certain convenience operations, such as
the addition of a docstring.
Understanding how ``lambdify`` works can make it easier to avoid certain
gotchas when using it. For instance, a common mistake is to create a
lambdified function for one module (say, NumPy), and pass it objects from
another (say, a SymPy expression).
For instance, say we create
>>> from sympy.abc import x
>>> f = lambdify(x, x + 1, 'numpy')
Now if we pass in a NumPy array, we get that array plus 1
>>> import numpy
>>> a = numpy.array([1, 2])
>>> f(a)
[2 3]
But what happens if you make the mistake of passing in a SymPy expression
instead of a NumPy array:
>>> f(x + 1)
x + 2
This worked, but it was only by accident. Now take a different lambdified
function:
>>> from sympy import sin
>>> g = lambdify(x, x + sin(x), 'numpy')
This works as expected on NumPy arrays:
>>> g(a)
[1.84147098 2.90929743]
But if we try to pass in a SymPy expression, it fails
>>> try:
... g(x + 1)
... # NumPy release after 1.17 raises TypeError instead of
... # AttributeError
... except (AttributeError, TypeError):
... raise AttributeError() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError:
Now, let's look at what happened. The reason this fails is that ``g``
calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not
know how to operate on a SymPy object. **As a general rule, NumPy
functions do not know how to operate on SymPy expressions, and SymPy
functions do not know how to operate on NumPy arrays. This is why lambdify
exists: to provide a bridge between SymPy and NumPy.**
However, why is it that ``f`` did work? That's because ``f`` doesn't call
any functions, it only adds 1. So the resulting function that is created,
``def _lambdifygenerated(x): return x + 1`` does not depend on the globals
namespace it is defined in. Thus it works, but only by accident. A future
version of ``lambdify`` may remove this behavior.
Be aware that certain implementation details described here may change in
future versions of SymPy. The API of passing in custom modules and
printers will not change, but the details of how a lambda function is
created may change. However, the basic idea will remain the same, and
understanding it will be helpful to understanding the behavior of
lambdify.
**In general: you should create lambdified functions for one module (say,
NumPy), and only pass it input types that are compatible with that module
(say, NumPy arrays).** Remember that by default, if the ``module``
argument is not provided, ``lambdify`` creates functions using the NumPy
and SciPy namespaces.
"""
from sympy.core.symbol import Symbol
from sympy.core.expr import Expr
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["numpy", "scipy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {} # type: tDict[str, Any]
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore
elif _module_present('scipy', namespaces):
from sympy.printing.numpy import SciPyPrinter as Printer # type: ignore
elif _module_present('numpy', namespaces):
from sympy.printing.numpy import NumPyPrinter as Printer # type: ignore
elif _module_present('cupy', namespaces):
from sympy.printing.numpy import CuPyPrinter as Printer # type: ignore
elif _module_present('numexpr', namespaces):
from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore
elif _module_present('tensorflow', namespaces):
from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore
elif _module_present('sympy', namespaces):
from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore
else:
from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
if isinstance(args, set):
SymPyDeprecationWarning(
feature="The list of arguments is a `set`. This leads to unpredictable results",
useinstead=": Convert set into list or tuple",
issue=20013,
deprecated_since_version="1.6.3"
).warn()
# Get the names of the args, for creating a docstring
iterable_args: Iterable = (args,) if isinstance(args, Expr) else args
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore
for n, var in enumerate(iterable_args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify) # type: _EvaluatorPrinter
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
if cse == True:
from sympy.simplify.cse_main import cse as _cse
cses, _expr = _cse(expr, list=False)
elif callable(cse):
cses, _expr = cse(expr)
else:
cses, _expr = (), expr
funcstr = funcprinter.doprint(funcname, iterable_args, _expr, cses=cses)
# Collect the module imports from the code printers.
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
ln = "from %s import %s" % (mod, k)
try:
exec(ln, {}, namespace)
except ImportError:
# Tensorflow 2.0 has issues with importing a specific
# function from its submodule.
# https://github.com/tensorflow/tensorflow/issues/33022
ln = "%s = %s.%s" % (k, mod, k)
exec(ln, {}, namespace)
imp_mod_lines.append(ln)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
funclocals = {} # type: tDict[str, Any]
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore
func = funclocals[funcname]
# Apply the docstring
sig = "func({})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def _recursive_to_string(doprint, arg):
"""Functions in lambdify accept both SymPy types and non-SymPy types such as python
lists and tuples. This method ensures that we only call the doprint method of the
printer with SymPy types (so that the printer safely can use SymPy-methods)."""
from sympy.matrices.common import MatrixOperations
from sympy.core.basic import Basic
if isinstance(arg, (Basic, MatrixOperations)):
return doprint(arg)
elif iterable(arg):
if isinstance(arg, list):
left, right = "[]"
elif isinstance(arg, tuple):
left, right = "()"
else:
raise NotImplementedError("unhandled type: %s, %s" % (type(arg), arg))
return left +', '.join(_recursive_to_string(doprint, e) for e in arg) + right
elif isinstance(arg, str):
return arg
else:
return doprint(arg)
def lambdastr(args, expr, printer=None, dummify=None):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(_0,_1[0],_1[1])'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy.core.basic import Basic
from sympy.core.function import (Derivative, Function)
from sympy.core.symbol import (Dummy, Symbol)
from sympy.core.sympify import sympify
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
# replace these with Dummy symbols
if isinstance(args, (Function, Symbol, Derivative)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
expr = sympify(expr)
# dict/tuple are sympified to Basic
if isinstance(expr, Basic):
expr = expr.xreplace(dummies_dict)
# list is not sympified to Basic
elif isinstance(expr, list):
expr = [sub_expr(a, dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
def flat_indexes(iterable):
n = 0
for el in iterable:
if isiter(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
if dummify is None:
dummify = any(isinstance(a, Basic) and
a.atoms(Function, Derivative) for a in (
args if isiter(args) else [args]))
if isiter(args) and any(isiter(i) for i in args):
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
indexed_args = ','.join([
dum_args[ind[0]] + ''.join(["[%s]" % k for k in ind[1:]])
for ind in flat_indexes(args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
return 'lambda %s: (%s)(%s)' % (','.join(dum_args), lstr, indexed_args)
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = _recursive_to_string(lambdarepr, expr)
return "lambda %s: (%s)" % (args, expr)
class _EvaluatorPrinter:
def __init__(self, printer=None, dummify=False):
self._dummify = dummify
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import LambdaPrinter
if printer is None:
printer = LambdaPrinter()
if inspect.isfunction(printer):
self._exprrepr = printer
else:
if inspect.isclass(printer):
printer = printer()
self._exprrepr = printer.doprint
#if hasattr(printer, '_print_Symbol'):
# symbolrepr = printer._print_Symbol
#if hasattr(printer, '_print_Dummy'):
# dummyrepr = printer._print_Dummy
# Used to print the generated function arguments in a standard way
self._argrepr = LambdaPrinter().doprint
def doprint(self, funcname, args, expr, *, cses=()):
"""
Returns the function definition code as a string.
"""
from sympy.core.symbol import Dummy
funcbody = []
if not iterable(args):
args = [args]
argstrs, expr = self._preprocess(args, expr)
# Generate argument unpacking and final argument list
funcargs = []
unpackings = []
for argstr in argstrs:
if iterable(argstr):
funcargs.append(self._argrepr(Dummy()))
unpackings.extend(self._print_unpacking(argstr, funcargs[-1]))
else:
funcargs.append(argstr)
funcsig = 'def {}({}):'.format(funcname, ', '.join(funcargs))
# Wrap input arguments before unpacking
funcbody.extend(self._print_funcargwrapping(funcargs))
funcbody.extend(unpackings)
for s, e in cses:
if e is None:
funcbody.append('del {}'.format(s))
else:
funcbody.append('{} = {}'.format(s, self._exprrepr(e)))
str_expr = _recursive_to_string(self._exprrepr, expr)
if '\n' in str_expr:
str_expr = '({})'.format(str_expr)
funcbody.append('return {}'.format(str_expr))
funclines = [funcsig]
funclines.extend([' ' + line for line in funcbody])
return '\n'.join(funclines) + '\n'
@classmethod
def _is_safe_ident(cls, ident):
return isinstance(ident, str) and ident.isidentifier() \
and not keyword.iskeyword(ident)
def _preprocess(self, args, expr):
"""Preprocess args, expr to replace arguments that do not map
to valid Python identifiers.
Returns string form of args, and updated expr.
"""
from sympy.core.basic import Basic
from sympy.core.sorting import ordered
from sympy.core.function import (Derivative, Function)
from sympy.core.symbol import Dummy, uniquely_named_symbol
from sympy.matrices import DeferredVector
from sympy.core.expr import Expr
# Args of type Dummy can cause name collisions with args
# of type Symbol. Force dummify of everything in this
# situation.
dummify = self._dummify or any(
isinstance(arg, Dummy) for arg in flatten(args))
argstrs = [None]*len(args)
for arg, i in reversed(list(ordered(zip(args, range(len(args)))))):
if iterable(arg):
s, expr = self._preprocess(arg, expr)
elif isinstance(arg, DeferredVector):
s = str(arg)
elif isinstance(arg, Basic) and arg.is_symbol:
s = self._argrepr(arg)
if dummify or not self._is_safe_ident(s):
dummy = Dummy()
if isinstance(expr, Expr):
dummy = uniquely_named_symbol(
dummy.name, expr, modify=lambda s: '_' + s)
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
elif dummify or isinstance(arg, (Function, Derivative)):
dummy = Dummy()
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
else:
s = str(arg)
argstrs[i] = s
return argstrs, expr
def _subexpr(self, expr, dummies_dict):
from sympy.matrices import DeferredVector
from sympy.core.sympify import sympify
expr = sympify(expr)
xreplace = getattr(expr, 'xreplace', None)
if xreplace is not None:
expr = xreplace(dummies_dict)
else:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [self._subexpr(sympify(a), dummies_dict) for a in expr.keys()]
v = [self._subexpr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(self._subexpr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [self._subexpr(sympify(a), dummies_dict) for a in expr]
return expr
def _print_funcargwrapping(self, args):
"""Generate argument wrapping code.
args is the argument list of the generated function (strings).
Return value is a list of lines of code that will be inserted at
the beginning of the function definition.
"""
return []
def _print_unpacking(self, unpackto, arg):
"""Generate argument unpacking code.
arg is the function argument to be unpacked (a string), and
unpackto is a list or nested lists of the variable names (strings) to
unpack to.
"""
def unpack_lhs(lvalues):
return '[{}]'.format(', '.join(
unpack_lhs(val) if iterable(val) else val for val in lvalues))
return ['{} = {}'.format(unpack_lhs(unpackto), arg)]
class _TensorflowEvaluatorPrinter(_EvaluatorPrinter):
def _print_unpacking(self, lvalues, rvalue):
"""Generate argument unpacking code.
This method is used when the input value is not interable,
but can be indexed (see issue #14655).
"""
def flat_indexes(elems):
n = 0
for el in elems:
if iterable(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
indexed = ', '.join('{}[{}]'.format(rvalue, ']['.join(map(str, ind)))
for ind in flat_indexes(lvalues))
return ['[{}] = [{}]'.format(', '.join(flatten(lvalues)), indexed)]
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as ``expr``. Examples
include SymPy expressions, as well as tuples, lists and dicts that may
contain SymPy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within ``expr`` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# SymPy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If ``symfunc`` is an Undefined function, create a new function
with the same name and the implemented function attached.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> f = implemented_function('f', lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
kwargs = {}
if isinstance(symfunc, UndefinedFunction):
kwargs = symfunc._kwargs
symfunc = symfunc.__name__
if isinstance(symfunc, str):
# Keyword arguments to UndefinedFunction are added as attributes to
# the created class.
symfunc = UndefinedFunction(
symfunc, _imp_=staticmethod(implementation), **kwargs)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError(filldedent('''
symfunc should be either a string or
an UndefinedFunction instance.'''))
return symfunc
|
8edfa128bd318719c324323d0b6217b47fd081862d7eaf5f5fd909dc4ee26a78 | """
pkgdata is a simple, extensible way for a package to acquire data file
resources.
The getResource function is equivalent to the standard idioms, such as
the following minimal implementation::
import sys, os
def getResource(identifier, pkgname=__name__):
pkgpath = os.path.dirname(sys.modules[pkgname].__file__)
path = os.path.join(pkgpath, identifier)
return open(os.path.normpath(path), mode='rb')
When a __loader__ is present on the module given by __name__, it will defer
getResource to its get_data implementation and return it as a file-like
object (such as StringIO).
"""
import sys
import os
from io import StringIO
def get_resource(identifier, pkgname=__name__):
"""
Acquire a readable object for a given package name and identifier.
An IOError will be raised if the resource cannot be found.
For example::
mydata = get_resource('mypkgdata.jpg').read()
Note that the package name must be fully qualified, if given, such
that it would be found in sys.modules.
In some cases, getResource will return a real file object. In that
case, it may be useful to use its name attribute to get the path
rather than use it as a file-like object. For example, you may
be handing data off to a C API.
"""
mod = sys.modules[pkgname]
fn = getattr(mod, '__file__', None)
if fn is None:
raise OSError("%r has no __file__!")
path = os.path.join(os.path.dirname(fn), identifier)
loader = getattr(mod, '__loader__', None)
if loader is not None:
try:
data = loader.get_data(path)
except (OSError, AttributeError):
pass
else:
return StringIO(data.decode('utf-8'))
return open(os.path.normpath(path), 'rb')
|
5f0660333f2e489088357f20bd743a21449b2265c9364030b36cb93e25452ed4 | """
Algorithms and classes to support enumerative combinatorics.
Currently just multiset partitions, but more could be added.
Terminology (following Knuth, algorithm 7.1.2.5M TAOCP)
*multiset* aaabbcccc has a *partition* aaabc | bccc
The submultisets, aaabc and bccc of the partition are called
*parts*, or sometimes *vectors*. (Knuth notes that multiset
partitions can be thought of as partitions of vectors of integers,
where the ith element of the vector gives the multiplicity of
element i.)
The values a, b and c are *components* of the multiset. These
correspond to elements of a set, but in a multiset can be present
with a multiplicity greater than 1.
The algorithm deserves some explanation.
Think of the part aaabc from the multiset above. If we impose an
ordering on the components of the multiset, we can represent a part
with a vector, in which the value of the first element of the vector
corresponds to the multiplicity of the first component in that
part. Thus, aaabc can be represented by the vector [3, 1, 1]. We
can also define an ordering on parts, based on the lexicographic
ordering of the vector (leftmost vector element, i.e., the element
with the smallest component number, is the most significant), so
that [3, 1, 1] > [3, 1, 0] and [3, 1, 1] > [2, 1, 4]. The ordering
on parts can be extended to an ordering on partitions: First, sort
the parts in each partition, left-to-right in decreasing order. Then
partition A is greater than partition B if A's leftmost/greatest
part is greater than B's leftmost part. If the leftmost parts are
equal, compare the second parts, and so on.
In this ordering, the greatest partition of a given multiset has only
one part. The least partition is the one in which the components
are spread out, one per part.
The enumeration algorithms in this file yield the partitions of the
argument multiset in decreasing order. The main data structure is a
stack of parts, corresponding to the current partition. An
important invariant is that the parts on the stack are themselves in
decreasing order. This data structure is decremented to find the
next smaller partition. Most often, decrementing the partition will
only involve adjustments to the smallest parts at the top of the
stack, much as adjacent integers *usually* differ only in their last
few digits.
Knuth's algorithm uses two main operations on parts:
Decrement - change the part so that it is smaller in the
(vector) lexicographic order, but reduced by the smallest amount possible.
For example, if the multiset has vector [5,
3, 1], and the bottom/greatest part is [4, 2, 1], this part would
decrement to [4, 2, 0], while [4, 0, 0] would decrement to [3, 3,
1]. A singleton part is never decremented -- [1, 0, 0] is not
decremented to [0, 3, 1]. Instead, the decrement operator needs
to fail for this case. In Knuth's pseudocode, the decrement
operator is step m5.
Spread unallocated multiplicity - Once a part has been decremented,
it cannot be the rightmost part in the partition. There is some
multiplicity that has not been allocated, and new parts must be
created above it in the stack to use up this multiplicity. To
maintain the invariant that the parts on the stack are in
decreasing order, these new parts must be less than or equal to
the decremented part.
For example, if the multiset is [5, 3, 1], and its most
significant part has just been decremented to [5, 3, 0], the
spread operation will add a new part so that the stack becomes
[[5, 3, 0], [0, 0, 1]]. If the most significant part (for the
same multiset) has been decremented to [2, 0, 0] the stack becomes
[[2, 0, 0], [2, 0, 0], [1, 3, 1]]. In the pseudocode, the spread
operation for one part is step m2. The complete spread operation
is a loop of steps m2 and m3.
In order to facilitate the spread operation, Knuth stores, for each
component of each part, not just the multiplicity of that component
in the part, but also the total multiplicity available for this
component in this part or any lesser part above it on the stack.
One added twist is that Knuth does not represent the part vectors as
arrays. Instead, he uses a sparse representation, in which a
component of a part is represented as a component number (c), plus
the multiplicity of the component in that part (v) as well as the
total multiplicity available for that component (u). This saves
time that would be spent skipping over zeros.
"""
class PartComponent:
"""Internal class used in support of the multiset partitions
enumerators and the associated visitor functions.
Represents one component of one part of the current partition.
A stack of these, plus an auxiliary frame array, f, represents a
partition of the multiset.
Knuth's pseudocode makes c, u, and v separate arrays.
"""
__slots__ = ('c', 'u', 'v')
def __init__(self):
self.c = 0 # Component number
self.u = 0 # The as yet unpartitioned amount in component c
# *before* it is allocated by this triple
self.v = 0 # Amount of c component in the current part
# (v<=u). An invariant of the representation is
# that the next higher triple for this component
# (if there is one) will have a value of u-v in
# its u attribute.
def __repr__(self):
"for debug/algorithm animation purposes"
return 'c:%d u:%d v:%d' % (self.c, self.u, self.v)
def __eq__(self, other):
"""Define value oriented equality, which is useful for testers"""
return (isinstance(other, self.__class__) and
self.c == other.c and
self.u == other.u and
self.v == other.v)
def __ne__(self, other):
"""Defined for consistency with __eq__"""
return not self == other
# This function tries to be a faithful implementation of algorithm
# 7.1.2.5M in Volume 4A, Combinatoral Algorithms, Part 1, of The Art
# of Computer Programming, by Donald Knuth. This includes using
# (mostly) the same variable names, etc. This makes for rather
# low-level Python.
# Changes from Knuth's pseudocode include
# - use PartComponent struct/object instead of 3 arrays
# - make the function a generator
# - map (with some difficulty) the GOTOs to Python control structures.
# - Knuth uses 1-based numbering for components, this code is 0-based
# - renamed variable l to lpart.
# - flag variable x takes on values True/False instead of 1/0
#
def multiset_partitions_taocp(multiplicities):
"""Enumerates partitions of a multiset.
Parameters
==========
multiplicities
list of integer multiplicities of the components of the multiset.
Yields
======
state
Internal data structure which encodes a particular partition.
This output is then usually processed by a visitor function
which combines the information from this data structure with
the components themselves to produce an actual partition.
Unless they wish to create their own visitor function, users will
have little need to look inside this data structure. But, for
reference, it is a 3-element list with components:
f
is a frame array, which is used to divide pstack into parts.
lpart
points to the base of the topmost part.
pstack
is an array of PartComponent objects.
The ``state`` output offers a peek into the internal data
structures of the enumeration function. The client should
treat this as read-only; any modification of the data
structure will cause unpredictable (and almost certainly
incorrect) results. Also, the components of ``state`` are
modified in place at each iteration. Hence, the visitor must
be called at each loop iteration. Accumulating the ``state``
instances and processing them later will not work.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> # variables components and multiplicities represent the multiset 'abb'
>>> components = 'ab'
>>> multiplicities = [1, 2]
>>> states = multiset_partitions_taocp(multiplicities)
>>> list(list_visitor(state, components) for state in states)
[[['a', 'b', 'b']],
[['a', 'b'], ['b']],
[['a'], ['b', 'b']],
[['a'], ['b'], ['b']]]
See Also
========
sympy.utilities.iterables.multiset_partitions: Takes a multiset
as input and directly yields multiset partitions. It
dispatches to a number of functions, including this one, for
implementation. Most users will find it more convenient to
use than multiset_partitions_taocp.
"""
# Important variables.
# m is the number of components, i.e., number of distinct elements
m = len(multiplicities)
# n is the cardinality, total number of elements whether or not distinct
n = sum(multiplicities)
# The main data structure, f segments pstack into parts. See
# list_visitor() for example code indicating how this internal
# state corresponds to a partition.
# Note: allocation of space for stack is conservative. Knuth's
# exercise 7.2.1.5.68 gives some indication of how to tighten this
# bound, but this is not implemented.
pstack = [PartComponent() for i in range(n * m + 1)]
f = [0] * (n + 1)
# Step M1 in Knuth (Initialize)
# Initial state - entire multiset in one part.
for j in range(m):
ps = pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
# Other variables
f[0] = 0
a = 0
lpart = 0
f[1] = m
b = m # in general, current stack frame is from a to b - 1
while True:
while True:
# Step M2 (Subtract v from u)
j = a
k = b
x = False
while j < b:
pstack[k].u = pstack[j].u - pstack[j].v
if pstack[k].u == 0:
x = True
elif not x:
pstack[k].c = pstack[j].c
pstack[k].v = min(pstack[j].v, pstack[k].u)
x = pstack[k].u < pstack[j].v
k = k + 1
else: # x is True
pstack[k].c = pstack[j].c
pstack[k].v = pstack[k].u
k = k + 1
j = j + 1
# Note: x is True iff v has changed
# Step M3 (Push if nonzero.)
if k > b:
a = b
b = k
lpart = lpart + 1
f[lpart + 1] = b
# Return to M2
else:
break # Continue to M4
# M4 Visit a partition
state = [f, lpart, pstack]
yield state
# M5 (Decrease v)
while True:
j = b-1
while (pstack[j].v == 0):
j = j - 1
if j == a and pstack[j].v == 1:
# M6 (Backtrack)
if lpart == 0:
return
lpart = lpart - 1
b = a
a = f[lpart]
# Return to M5
else:
pstack[j].v = pstack[j].v - 1
for k in range(j + 1, b):
pstack[k].v = pstack[k].u
break # GOTO M2
# --------------- Visitor functions for multiset partitions ---------------
# A visitor takes the partition state generated by
# multiset_partitions_taocp or other enumerator, and produces useful
# output (such as the actual partition).
def factoring_visitor(state, primes):
"""Use with multiset_partitions_taocp to enumerate the ways a
number can be expressed as a product of factors. For this usage,
the exponents of the prime factors of a number are arguments to
the partition enumerator, while the corresponding prime factors
are input here.
Examples
========
To enumerate the factorings of a number we can think of the elements of the
partition as being the prime factors and the multiplicities as being their
exponents.
>>> from sympy.utilities.enumerative import factoring_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> from sympy import factorint
>>> primes, multiplicities = zip(*factorint(24).items())
>>> primes
(2, 3)
>>> multiplicities
(3, 1)
>>> states = multiset_partitions_taocp(multiplicities)
>>> list(factoring_visitor(state, primes) for state in states)
[[24], [8, 3], [12, 2], [4, 6], [4, 2, 3], [6, 2, 2], [2, 2, 2, 3]]
"""
f, lpart, pstack = state
factoring = []
for i in range(lpart + 1):
factor = 1
for ps in pstack[f[i]: f[i + 1]]:
if ps.v > 0:
factor *= primes[ps.c] ** ps.v
factoring.append(factor)
return factoring
def list_visitor(state, components):
"""Return a list of lists to represent the partition.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> states = multiset_partitions_taocp([1, 2, 1])
>>> s = next(states)
>>> list_visitor(s, 'abc') # for multiset 'a b b c'
[['a', 'b', 'b', 'c']]
>>> s = next(states)
>>> list_visitor(s, [1, 2, 3]) # for multiset '1 2 2 3
[[1, 2, 2], [3]]
"""
f, lpart, pstack = state
partition = []
for i in range(lpart+1):
part = []
for ps in pstack[f[i]:f[i+1]]:
if ps.v > 0:
part.extend([components[ps.c]] * ps.v)
partition.append(part)
return partition
class MultisetPartitionTraverser():
"""
Has methods to ``enumerate`` and ``count`` the partitions of a multiset.
This implements a refactored and extended version of Knuth's algorithm
7.1.2.5M [AOCP]_."
The enumeration methods of this class are generators and return
data structures which can be interpreted by the same visitor
functions used for the output of ``multiset_partitions_taocp``.
Examples
========
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([4,4,4,2])
127750
>>> m.count_partitions([3,3,3])
686
See Also
========
multiset_partitions_taocp
sympy.utilities.iterables.multiset_partitions
References
==========
.. [AOCP] Algorithm 7.1.2.5M in Volume 4A, Combinatoral Algorithms,
Part 1, of The Art of Computer Programming, by Donald Knuth.
.. [Factorisatio] On a Problem of Oppenheim concerning
"Factorisatio Numerorum" E. R. Canfield, Paul Erdos, Carl
Pomerance, JOURNAL OF NUMBER THEORY, Vol. 17, No. 1. August
1983. See section 7 for a description of an algorithm
similar to Knuth's.
.. [Yorgey] Generating Multiset Partitions, Brent Yorgey, The
Monad.Reader, Issue 8, September 2007.
"""
def __init__(self):
self.debug = False
# TRACING variables. These are useful for gathering
# statistics on the algorithm itself, but have no particular
# benefit to a user of the code.
self.k1 = 0
self.k2 = 0
self.p1 = 0
self.pstack = None
self.f = None
self.lpart = 0
self.discarded = 0
# dp_stack is list of lists of (part_key, start_count) pairs
self.dp_stack = []
# dp_map is map part_key-> count, where count represents the
# number of multiset which are descendants of a part with this
# key, **or any of its decrements**
# Thus, when we find a part in the map, we add its count
# value to the running total, cut off the enumeration, and
# backtrack
if not hasattr(self, 'dp_map'):
self.dp_map = {}
def db_trace(self, msg):
"""Useful for understanding/debugging the algorithms. Not
generally activated in end-user code."""
if self.debug:
# XXX: animation_visitor is undefined... Clearly this does not
# work and was not tested. Previous code in comments below.
raise RuntimeError
#letters = 'abcdefghijklmnopqrstuvwxyz'
#state = [self.f, self.lpart, self.pstack]
#print("DBG:", msg,
# ["".join(part) for part in list_visitor(state, letters)],
# animation_visitor(state))
#
# Helper methods for enumeration
#
def _initialize_enumeration(self, multiplicities):
"""Allocates and initializes the partition stack.
This is called from the enumeration/counting routines, so
there is no need to call it separately."""
num_components = len(multiplicities)
# cardinality is the total number of elements, whether or not distinct
cardinality = sum(multiplicities)
# pstack is the partition stack, which is segmented by
# f into parts.
self.pstack = [PartComponent() for i in
range(num_components * cardinality + 1)]
self.f = [0] * (cardinality + 1)
# Initial state - entire multiset in one part.
for j in range(num_components):
ps = self.pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
self.f[0] = 0
self.f[1] = num_components
self.lpart = 0
# The decrement_part() method corresponds to step M5 in Knuth's
# algorithm. This is the base version for enum_all(). Modified
# versions of this method are needed if we want to restrict
# sizes of the partitions produced.
def decrement_part(self, part):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
If you think of the v values in the part as a multi-digit
integer (least significant digit on the right) this is
basically decrementing that integer, but with the extra
constraint that the leftmost digit cannot be decremented to 0.
Parameters
==========
part
The part, represented as a list of PartComponent objects,
which is to be decremented.
"""
plen = len(part)
for j in range(plen - 1, -1, -1):
if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0:
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
return True
return False
# Version to allow number of parts to be bounded from above.
# Corresponds to (a modified) step M5.
def decrement_part_small(self, part, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
Notes
=====
The goal of this modification of the ordinary decrement method
is to fail (meaning that the subtree rooted at this part is to
be skipped) when it can be proved that this part can only have
child partitions which are larger than allowed by ``ub``. If a
decision is made to fail, it must be accurate, otherwise the
enumeration will miss some partitions. But, it is OK not to
capture all the possible failures -- if a part is passed that
shouldn't be, the resulting too-large partitions are filtered
by the enumeration one level up. However, as is usual in
constrained enumerations, failing early is advantageous.
The tests used by this method catch the most common cases,
although this implementation is by no means the last word on
this problem. The tests include:
1) ``lpart`` must be less than ``ub`` by at least 2. This is because
once a part has been decremented, the partition
will gain at least one child in the spread step.
2) If the leading component of the part is about to be
decremented, check for how many parts will be added in
order to use up the unallocated multiplicity in that
leading component, and fail if this number is greater than
allowed by ``ub``. (See code for the exact expression.) This
test is given in the answer to Knuth's problem 7.2.1.5.69.
3) If there is *exactly* enough room to expand the leading
component by the above test, check the next component (if
it exists) once decrementing has finished. If this has
``v == 0``, this next component will push the expansion over the
limit by 1, so fail.
"""
if self.lpart >= ub - 1:
self.p1 += 1 # increment to keep track of usefulness of tests
return False
plen = len(part)
for j in range(plen - 1, -1, -1):
# Knuth's mod, (answer to problem 7.2.1.5.69)
if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u:
self.k1 += 1
return False
if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0:
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
# Have now decremented part, but are we doomed to
# failure when it is expanded? Check one oddball case
# that turns out to be surprisingly common - exactly
# enough room to expand the leading component, but no
# room for the second component, which has v=0.
if (plen > 1 and part[1].v == 0 and
(part[0].u - part[0].v) ==
((ub - self.lpart - 1) * part[0].v)):
self.k2 += 1
self.db_trace("Decrement fails test 3")
return False
return True
return False
def decrement_part_large(self, part, amt, lb):
"""Decrements part, while respecting size constraint.
A part can have no children which are of sufficient size (as
indicated by ``lb``) unless that part has sufficient
unallocated multiplicity. When enforcing the size constraint,
this method will decrement the part (if necessary) by an
amount needed to ensure sufficient unallocated multiplicity.
Returns True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
amt
Can only take values 0 or 1. A value of 1 means that the
part must be decremented, and then the size constraint is
enforced. A value of 0 means just to enforce the ``lb``
size constraint.
lb
The partitions produced by the calling enumeration must
have more parts than this value.
"""
if amt == 1:
# In this case we always need to increment, *before*
# enforcing the "sufficient unallocated multiplicity"
# constraint. Easiest for this is just to call the
# regular decrement method.
if not self.decrement_part(part):
return False
# Next, perform any needed additional decrementing to respect
# "sufficient unallocated multiplicity" (or fail if this is
# not possible).
min_unalloc = lb - self.lpart
if min_unalloc <= 0:
return True
total_mult = sum(pc.u for pc in part)
total_alloc = sum(pc.v for pc in part)
if total_mult <= min_unalloc:
return False
deficit = min_unalloc - (total_mult - total_alloc)
if deficit <= 0:
return True
for i in range(len(part) - 1, -1, -1):
if i == 0:
if part[0].v > deficit:
part[0].v -= deficit
return True
else:
return False # This shouldn't happen, due to above check
else:
if part[i].v >= deficit:
part[i].v -= deficit
return True
else:
deficit -= part[i].v
part[i].v = 0
def decrement_part_range(self, part, lb, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
lb
The partitions produced by the calling enumeration must
have more parts than this value.
Notes
=====
Combines the constraints of _small and _large decrement
methods. If returns success, part has been decremented at
least once, but perhaps by quite a bit more if needed to meet
the lb constraint.
"""
# Constraint in the range case is just enforcing both the
# constraints from _small and _large cases. Note the 0 as the
# second argument to the _large call -- this is the signal to
# decrement only as needed to for constraint enforcement. The
# short circuiting and left-to-right order of the 'and'
# operator is important for this to work correctly.
return self.decrement_part_small(part, ub) and \
self.decrement_part_large(part, 0, lb)
def spread_part_multiplicity(self):
"""Returns True if a new part has been created, and
adjusts pstack, f and lpart as needed.
Notes
=====
Spreads unallocated multiplicity from the current top part
into a new part created above the current on the stack. This
new part is constrained to be less than or equal to the old in
terms of the part ordering.
This call does nothing (and returns False) if the current top
part has no unallocated multiplicity.
"""
j = self.f[self.lpart] # base of current top part
k = self.f[self.lpart + 1] # ub of current; potential base of next
base = k # save for later comparison
changed = False # Set to true when the new part (so far) is
# strictly less than (as opposed to less than
# or equal) to the old.
for j in range(self.f[self.lpart], self.f[self.lpart + 1]):
self.pstack[k].u = self.pstack[j].u - self.pstack[j].v
if self.pstack[k].u == 0:
changed = True
else:
self.pstack[k].c = self.pstack[j].c
if changed: # Put all available multiplicity in this part
self.pstack[k].v = self.pstack[k].u
else: # Still maintaining ordering constraint
if self.pstack[k].u < self.pstack[j].v:
self.pstack[k].v = self.pstack[k].u
changed = True
else:
self.pstack[k].v = self.pstack[j].v
k = k + 1
if k > base:
# Adjust for the new part on stack
self.lpart = self.lpart + 1
self.f[self.lpart + 1] = k
return True
return False
def top_part(self):
"""Return current top part on the stack, as a slice of pstack.
"""
return self.pstack[self.f[self.lpart]:self.f[self.lpart + 1]]
# Same interface and functionality as multiset_partitions_taocp(),
# but some might find this refactored version easier to follow.
def enum_all(self, multiplicities):
"""Enumerate the partitions of a multiset.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_all([2,2])
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'a'], ['b'], ['b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
See Also
========
multiset_partitions_taocp():
which provides the same result as this method, but is
about twice as fast. Hence, enum_all is primarily useful
for testing. Also see the function for a discussion of
states and visitors.
"""
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit a partition
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_small(self, multiplicities, ub):
"""Enumerate multiset partitions with no more than ``ub`` parts.
Equivalent to enum_range(multiplicities, 0, ub)
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
ub
Maximum number of parts
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_small([2,2], 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
The implementation is based, in part, on the answer given to
exercise 69, in Knuth [AOCP]_.
See Also
========
enum_all, enum_large, enum_range
"""
# Keep track of iterations which do not yield a partition.
# Clearly, we would like to keep this number small.
self.discarded = 0
if ub <= 0:
return
self._initialize_enumeration(multiplicities)
while True:
good_partition = True
while self.spread_part_multiplicity():
self.db_trace("spread 1")
if self.lpart >= ub:
self.discarded += 1
good_partition = False
self.db_trace(" Discarding")
self.lpart = ub - 2
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_small(self.top_part(), ub):
self.db_trace("Failed decrement, going to backtrack")
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
self.db_trace("Backtracked to")
self.db_trace("decrement ok, about to expand")
def enum_large(self, multiplicities, lb):
"""Enumerate the partitions of a multiset with lb < num(parts)
Equivalent to enum_range(multiplicities, lb, sum(multiplicities))
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
lb
Number of parts in the partition must be greater than
this lower bound.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_large([2,2], 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a'], ['b'], ['b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
See Also
========
enum_all, enum_small, enum_range
"""
self.discarded = 0
if lb >= sum(multiplicities):
return
self._initialize_enumeration(multiplicities)
self.decrement_part_large(self.top_part(), 0, lb)
while True:
good_partition = True
while self.spread_part_multiplicity():
if not self.decrement_part_large(self.top_part(), 0, lb):
# Failure here should be rare/impossible
self.discarded += 1
good_partition = False
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_large(self.top_part(), 1, lb):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_range(self, multiplicities, lb, ub):
"""Enumerate the partitions of a multiset with
``lb < num(parts) <= ub``.
In particular, if partitions with exactly ``k`` parts are
desired, call with ``(multiplicities, k - 1, k)``. This
method generalizes enum_all, enum_small, and enum_large.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_range([2,2], 1, 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
"""
# combine the constraints of the _large and _small
# enumerations.
self.discarded = 0
if ub <= 0 or lb >= sum(multiplicities):
return
self._initialize_enumeration(multiplicities)
self.decrement_part_large(self.top_part(), 0, lb)
while True:
good_partition = True
while self.spread_part_multiplicity():
self.db_trace("spread 1")
if not self.decrement_part_large(self.top_part(), 0, lb):
# Failure here - possible in range case?
self.db_trace(" Discarding (large cons)")
self.discarded += 1
good_partition = False
break
elif self.lpart >= ub:
self.discarded += 1
good_partition = False
self.db_trace(" Discarding small cons")
self.lpart = ub - 2
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_range(self.top_part(), lb, ub):
self.db_trace("Failed decrement, going to backtrack")
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
self.db_trace("Backtracked to")
self.db_trace("decrement ok, about to expand")
def count_partitions_slow(self, multiplicities):
"""Returns the number of partitions of a multiset whose elements
have the multiplicities given in ``multiplicities``.
Primarily for comparison purposes. It follows the same path as
enumerate, and counts, rather than generates, the partitions.
See Also
========
count_partitions
Has the same calling interface, but is much faster.
"""
# number of partitions so far in the enumeration
self.pcount = 0
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit (count) a partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return self.pcount
self.lpart -= 1
def count_partitions(self, multiplicities):
"""Returns the number of partitions of a multiset whose components
have the multiplicities given in ``multiplicities``.
For larger counts, this method is much faster than calling one
of the enumerators and counting the result. Uses dynamic
programming to cut down on the number of nodes actually
explored. The dictionary used in order to accelerate the
counting process is stored in the ``MultisetPartitionTraverser``
object and persists across calls. If the user does not
expect to call ``count_partitions`` for any additional
multisets, the object should be cleared to save memory. On
the other hand, the cache built up from one count run can
significantly speed up subsequent calls to ``count_partitions``,
so it may be advantageous not to clear the object.
Examples
========
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([9,8,2])
288716
>>> m.count_partitions([2,2])
9
>>> del m
Notes
=====
If one looks at the workings of Knuth's algorithm M [AOCP]_, it
can be viewed as a traversal of a binary tree of parts. A
part has (up to) two children, the left child resulting from
the spread operation, and the right child from the decrement
operation. The ordinary enumeration of multiset partitions is
an in-order traversal of this tree, and with the partitions
corresponding to paths from the root to the leaves. The
mapping from paths to partitions is a little complicated,
since the partition would contain only those parts which are
leaves or the parents of a spread link, not those which are
parents of a decrement link.
For counting purposes, it is sufficient to count leaves, and
this can be done with a recursive in-order traversal. The
number of leaves of a subtree rooted at a particular part is a
function only of that part itself, so memoizing has the
potential to speed up the counting dramatically.
This method follows a computational approach which is similar
to the hypothetical memoized recursive function, but with two
differences:
1) This method is iterative, borrowing its structure from the
other enumerations and maintaining an explicit stack of
parts which are in the process of being counted. (There
may be multisets which can be counted reasonably quickly by
this implementation, but which would overflow the default
Python recursion limit with a recursive implementation.)
2) Instead of using the part data structure directly, a more
compact key is constructed. This saves space, but more
importantly coalesces some parts which would remain
separate with physical keys.
Unlike the enumeration functions, there is currently no _range
version of count_partitions. If someone wants to stretch
their brain, it should be possible to construct one by
memoizing with a histogram of counts rather than a single
count, and combining the histograms.
"""
# number of partitions so far in the enumeration
self.pcount = 0
# dp_stack is list of lists of (part_key, start_count) pairs
self.dp_stack = []
self._initialize_enumeration(multiplicities)
pkey = part_key(self.top_part())
self.dp_stack.append([(pkey, 0), ])
while True:
while self.spread_part_multiplicity():
pkey = part_key(self.top_part())
if pkey in self.dp_map:
# Already have a cached value for the count of the
# subtree rooted at this part. Add it to the
# running counter, and break out of the spread
# loop. The -1 below is to compensate for the
# leaf that this code path would otherwise find,
# and which gets incremented for below.
self.pcount += (self.dp_map[pkey] - 1)
self.lpart -= 1
break
else:
self.dp_stack.append([(pkey, self.pcount), ])
# M4 count a leaf partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
for key, oldcount in self.dp_stack.pop():
self.dp_map[key] = self.pcount - oldcount
if self.lpart == 0:
return self.pcount
self.lpart -= 1
# At this point have successfully decremented the part on
# the stack and it does not appear in the cache. It needs
# to be added to the list at the top of dp_stack
pkey = part_key(self.top_part())
self.dp_stack[-1].append((pkey, self.pcount),)
def part_key(part):
"""Helper for MultisetPartitionTraverser.count_partitions that
creates a key for ``part``, that only includes information which can
affect the count for that part. (Any irrelevant information just
reduces the effectiveness of dynamic programming.)
Notes
=====
This member function is a candidate for future exploration. There
are likely symmetries that can be exploited to coalesce some
``part_key`` values, and thereby save space and improve
performance.
"""
# The component number is irrelevant for counting partitions, so
# leave it out of the memo key.
rval = []
for ps in part:
rval.append(ps.u)
rval.append(ps.v)
return tuple(rval)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.