hash
stringlengths
64
64
content
stringlengths
0
1.51M
e32fb70185a2f8ebd29739230846ba76e6606a16fa1b5ddd7a3e60f1733ba938
from __future__ import print_function, division from sympy import Number from sympy.core import Mul, Basic, sympify, S from sympy.core.compatibility import range from sympy.functions import adjoint from sympy.strategies import (rm_id, unpack, typed, flatten, exhaust, do_one, new) from sympy.matrices.matrices import MatrixBase from .inverse import Inverse from .matexpr import \ MatrixExpr, ShapeError, Identity, ZeroMatrix, GenericIdentity from .matpow import MatPow from .transpose import transpose from .permutation import PermutationMatrix # XXX: MatMul should perhaps not subclass directly from Mul class MatMul(MatrixExpr, Mul): """ A product of matrix expressions Examples ======== >>> from sympy import MatMul, MatrixSymbol >>> A = MatrixSymbol('A', 5, 4) >>> B = MatrixSymbol('B', 4, 3) >>> C = MatrixSymbol('C', 3, 6) >>> MatMul(A, B, C) A*B*C """ is_MatMul = True identity = GenericIdentity() def __new__(cls, *args, **kwargs): check = kwargs.get('check', True) if not args: return cls.identity # This must be removed aggressively in the constructor to avoid # TypeErrors from GenericIdentity().shape args = filter(lambda i: cls.identity != i, args) args = list(map(sympify, args)) obj = Basic.__new__(cls, *args) factor, matrices = obj.as_coeff_matrices() if check: validate(*matrices) if not matrices: # Should it be # # return Basic.__neq__(cls, factor, GenericIdentity()) ? return factor return obj @property def shape(self): matrices = [arg for arg in self.args if arg.is_Matrix] return (matrices[0].rows, matrices[-1].cols) def _entry(self, i, j, expand=True, **kwargs): from sympy import Dummy, Sum, Mul, ImmutableMatrix, Integer coeff, matrices = self.as_coeff_matrices() if len(matrices) == 1: # situation like 2*X, matmul is just X return coeff * matrices[0][i, j] indices = [None]*(len(matrices) + 1) ind_ranges = [None]*(len(matrices) - 1) indices[0] = i indices[-1] = j def f(): counter = 1 while True: yield Dummy("i_%i" % counter) counter += 1 dummy_generator = kwargs.get("dummy_generator", f()) for i in range(1, len(matrices)): indices[i] = next(dummy_generator) for i, arg in enumerate(matrices[:-1]): ind_ranges[i] = arg.shape[1] - 1 matrices = [arg._entry(indices[i], indices[i+1], dummy_generator=dummy_generator) for i, arg in enumerate(matrices)] expr_in_sum = Mul.fromiter(matrices) if any(v.has(ImmutableMatrix) for v in matrices): expand = True result = coeff*Sum( expr_in_sum, *zip(indices[1:-1], [0]*len(ind_ranges), ind_ranges) ) # Don't waste time in result.doit() if the sum bounds are symbolic if not any(isinstance(v, (Integer, int)) for v in ind_ranges): expand = False return result.doit() if expand else result def as_coeff_matrices(self): scalars = [x for x in self.args if not x.is_Matrix] matrices = [x for x in self.args if x.is_Matrix] coeff = Mul(*scalars) if coeff.is_commutative is False: raise NotImplementedError("noncommutative scalars in MatMul are not supported.") return coeff, matrices def as_coeff_mmul(self): coeff, matrices = self.as_coeff_matrices() return coeff, MatMul(*matrices) def _eval_transpose(self): """Transposition of matrix multiplication. Notes ===== The following rules are applied. Transposition for matrix multiplied with another matrix: `\\left(A B\\right)^{T} = B^{T} A^{T}` Transposition for matrix multiplied with scalar: `\\left(c A\\right)^{T} = c A^{T}` References ========== .. [1] https://en.wikipedia.org/wiki/Transpose """ coeff, matrices = self.as_coeff_matrices() return MatMul( coeff, *[transpose(arg) for arg in matrices[::-1]]).doit() def _eval_adjoint(self): return MatMul(*[adjoint(arg) for arg in self.args[::-1]]).doit() def _eval_trace(self): factor, mmul = self.as_coeff_mmul() if factor != 1: from .trace import trace return factor * trace(mmul.doit()) else: raise NotImplementedError("Can't simplify any further") def _eval_determinant(self): from sympy.matrices.expressions.determinant import Determinant factor, matrices = self.as_coeff_matrices() square_matrices = only_squares(*matrices) return factor**self.rows * Mul(*list(map(Determinant, square_matrices))) def _eval_inverse(self): try: return MatMul(*[ arg.inverse() if isinstance(arg, MatrixExpr) else arg**-1 for arg in self.args[::-1]]).doit() except ShapeError: return Inverse(self) def doit(self, **kwargs): deep = kwargs.get('deep', True) if deep: args = [arg.doit(**kwargs) for arg in self.args] else: args = self.args # treat scalar*MatrixSymbol or scalar*MatPow separately expr = canonicalize(MatMul(*args)) return expr # Needed for partial compatibility with Mul def args_cnc(self, **kwargs): coeff_c = [x for x in self.args if x.is_commutative] coeff_nc = [x for x in self.args if not x.is_commutative] return [coeff_c, coeff_nc] def _eval_derivative_matrix_lines(self, x): from .transpose import Transpose with_x_ind = [i for i, arg in enumerate(self.args) if arg.has(x)] lines = [] for ind in with_x_ind: left_args = self.args[:ind] right_args = self.args[ind+1:] if right_args: right_mat = MatMul.fromiter(right_args) else: right_mat = Identity(self.shape[1]) if left_args: left_rev = MatMul.fromiter([Transpose(i).doit() if i.is_Matrix else i for i in reversed(left_args)]) else: left_rev = Identity(self.shape[0]) d = self.args[ind]._eval_derivative_matrix_lines(x) for i in d: i.append_first(left_rev) i.append_second(right_mat) lines.append(i) return lines def validate(*matrices): """ Checks for valid shapes for args of MatMul """ for i in range(len(matrices)-1): A, B = matrices[i:i+2] if A.cols != B.rows: raise ShapeError("Matrices %s and %s are not aligned"%(A, B)) # Rules def newmul(*args): if args[0] == 1: args = args[1:] return new(MatMul, *args) def any_zeros(mul): if any([arg.is_zero or (arg.is_Matrix and arg.is_ZeroMatrix) for arg in mul.args]): matrices = [arg for arg in mul.args if arg.is_Matrix] return ZeroMatrix(matrices[0].rows, matrices[-1].cols) return mul def merge_explicit(matmul): """ Merge explicit MatrixBase arguments >>> from sympy import MatrixSymbol, eye, Matrix, MatMul, pprint >>> from sympy.matrices.expressions.matmul import merge_explicit >>> A = MatrixSymbol('A', 2, 2) >>> B = Matrix([[1, 1], [1, 1]]) >>> C = Matrix([[1, 2], [3, 4]]) >>> X = MatMul(A, B, C) >>> pprint(X) [1 1] [1 2] A*[ ]*[ ] [1 1] [3 4] >>> pprint(merge_explicit(X)) [4 6] A*[ ] [4 6] >>> X = MatMul(B, A, C) >>> pprint(X) [1 1] [1 2] [ ]*A*[ ] [1 1] [3 4] >>> pprint(merge_explicit(X)) [1 1] [1 2] [ ]*A*[ ] [1 1] [3 4] """ if not any(isinstance(arg, MatrixBase) for arg in matmul.args): return matmul newargs = [] last = matmul.args[0] for arg in matmul.args[1:]: if isinstance(arg, (MatrixBase, Number)) and isinstance(last, (MatrixBase, Number)): last = last * arg else: newargs.append(last) last = arg newargs.append(last) return MatMul(*newargs) def remove_ids(mul): """ Remove Identities from a MatMul This is a modified version of sympy.strategies.rm_id. This is necesssary because MatMul may contain both MatrixExprs and Exprs as args. See Also ======== sympy.strategies.rm_id """ # Separate Exprs from MatrixExprs in args factor, mmul = mul.as_coeff_mmul() # Apply standard rm_id for MatMuls result = rm_id(lambda x: x.is_Identity is True)(mmul) if result != mmul: return newmul(factor, *result.args) # Recombine and return else: return mul def factor_in_front(mul): factor, matrices = mul.as_coeff_matrices() if factor != 1: return newmul(factor, *matrices) return mul def combine_powers(mul): """Combine consecutive powers with the same base into one e.g. A*A**2 -> A**3 This also cancels out the possible matrix inverses using the knowledgebase of ``Inverse``. e.g. Y * X * X.I -> Y """ factor, args = mul.as_coeff_matrices() new_args = [args[0]] for B in args[1:]: A = new_args[-1] if A.is_square == False or B.is_square == False: new_args.append(B) continue if isinstance(A, MatPow): A_base, A_exp = A.args else: A_base, A_exp = A, S.One if isinstance(B, MatPow): B_base, B_exp = B.args else: B_base, B_exp = B, S.One if A_base == B_base: new_exp = A_exp + B_exp new_args[-1] = MatPow(A_base, new_exp).doit(deep=False) elif not isinstance(B_base, MatrixBase) and \ A_base == B_base.inverse(): new_exp = A_exp - B_exp new_args[-1] = MatPow(A_base, new_exp).doit(deep=False) else: new_args.append(B) return newmul(factor, *new_args) def combine_permutations(mul): """Refine products of permutation matrices as the products of cycles. """ args = mul.args l = len(args) if l < 2: return mul result = [args[0]] for i in range(1, l): A = result[-1] B = args[i] if isinstance(A, PermutationMatrix) and \ isinstance(B, PermutationMatrix): cycle_1 = A.args[0] cycle_2 = B.args[0] result[-1] = PermutationMatrix(cycle_1 * cycle_2) else: result.append(B) return MatMul(*result) rules = ( any_zeros, remove_ids, combine_powers, unpack, rm_id(lambda x: x == 1), merge_explicit, factor_in_front, flatten, combine_permutations) canonicalize = exhaust(typed({MatMul: do_one(*rules)})) def only_squares(*matrices): """factor matrices only if they are square""" if matrices[0].rows != matrices[-1].cols: raise RuntimeError("Invalid matrices being multiplied") out = [] start = 0 for i, M in enumerate(matrices): if M.cols == matrices[start].rows: out.append(MatMul(*matrices[start:i+1]).doit()) start = i+1 return out from sympy.assumptions.ask import ask, Q from sympy.assumptions.refine import handlers_dict def refine_MatMul(expr, assumptions): """ >>> from sympy import MatrixSymbol, Q, assuming, refine >>> X = MatrixSymbol('X', 2, 2) >>> expr = X * X.T >>> print(expr) X*X.T >>> with assuming(Q.orthogonal(X)): ... print(refine(expr)) I """ newargs = [] exprargs = [] for args in expr.args: if args.is_Matrix: exprargs.append(args) else: newargs.append(args) last = exprargs[0] for arg in exprargs[1:]: if arg == last.T and ask(Q.orthogonal(arg), assumptions): last = Identity(arg.shape[0]) elif arg == last.conjugate() and ask(Q.unitary(arg), assumptions): last = Identity(arg.shape[0]) else: newargs.append(last) last = arg newargs.append(last) return MatMul(*newargs) handlers_dict['MatMul'] = refine_MatMul
7f998c47c839d698545790c6425b9a073d99d16b1220fd99bd67e6d7d3451d14
from __future__ import print_function, division from sympy.core import S from sympy.core.sympify import _sympify from sympy.functions import KroneckerDelta from .matexpr import MatrixExpr, Identity, ZeroMatrix, OneMatrix class PermutationMatrix(MatrixExpr): """A Permutation Matrix Parameters ========== perm : Permutation The permutation the matrix uses. The size of the permutation determines the matrix size. See the documentation of :class:`sympy.combinatorics.permutations.Permutation` for the further information of how to create a permutation object. Examples ======== >>> from sympy.matrices import Matrix, PermutationMatrix >>> from sympy.combinatorics import Permutation Creating a permutation matrix: >>> p = Permutation(1, 2, 0) >>> P = PermutationMatrix(p) >>> P = P.as_explicit() >>> P Matrix([ [0, 1, 0], [0, 0, 1], [1, 0, 0]]) Permuting a matrix row and column: >>> M = Matrix([0, 1, 2]) >>> Matrix(P*M) Matrix([ [1], [2], [0]]) >>> Matrix(M.T*P) Matrix([[2, 0, 1]]) See Also ======== sympy.combinatorics.permutations.Permutation """ def __new__(cls, perm): from sympy.combinatorics.permutations import Permutation perm = _sympify(perm) if not isinstance(perm, Permutation): raise ValueError( "{} must be a SymPy Permutation instance.".format(perm)) return super(PermutationMatrix, cls).__new__(cls, perm) @property def shape(self): size = self.args[0].size return (size, size) @property def is_Identity(self): return self.args[0].is_Identity def doit(self): if self.is_Identity: return Identity(self.rows) return self def _entry(self, i, j, **kwargs): perm = self.args[0] return KroneckerDelta(perm.apply(i), j) def _eval_inverse(self): return PermutationMatrix(self.args[0] ** -1) _eval_transpose = _eval_adjoint = _eval_inverse def _eval_determinant(self): sign = self.args[0].signature() if sign == 1: return S.One elif sign == -1: return S.NegativeOne raise NotImplementedError def _eval_rewrite_as_BlockDiagMatrix(self, *args, **kwargs): from sympy.combinatorics.permutations import Permutation from .blockmatrix import BlockDiagMatrix perm = self.args[0] full_cyclic_form = perm.full_cyclic_form cycles_picks = [] # Stage 1. Decompose the cycles into the blockable form. a, b, c = 0, 0, 0 flag = False for cycle in full_cyclic_form: l = len(cycle) m = max(cycle) if not flag: if m + 1 > a + l: flag = True temp = [cycle] b = m c = l else: cycles_picks.append([cycle]) a += l else: if m > b: if m + 1 == a + c + l: temp.append(cycle) cycles_picks.append(temp) flag = False a = m+1 else: b = m temp.append(cycle) c += l else: if b + 1 == a + c + l: temp.append(cycle) cycles_picks.append(temp) flag = False a = b+1 else: temp.append(cycle) c += l # Stage 2. Normalize each decomposed cycles and build matrix. p = 0 args = [] for pick in cycles_picks: new_cycles = [] l = 0 for cycle in pick: new_cycle = [i - p for i in cycle] new_cycles.append(new_cycle) l += len(cycle) p += l perm = Permutation(new_cycles) mat = PermutationMatrix(perm) args.append(mat) return BlockDiagMatrix(*args) class MatrixPermute(MatrixExpr): r"""Symbolic representation for permuting matrix rows or columns. Parameters ========== perm : Permutation, PermutationMatrix The permutation to use for permuting the matrix. The permutation can be resized to the suitable one, axis : 0 or 1 The axis to permute alongside. If `0`, it will permute the matrix rows. If `1`, it will permute the matrix columns. Notes ===== This follows the same notation used in :meth:`sympy.matrices.common.MatrixCommon.permute`. Examples ======== >>> from sympy.matrices import Matrix, MatrixPermute >>> from sympy.combinatorics import Permutation Permuting the matrix rows: >>> p = Permutation(1, 2, 0) >>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> B = MatrixPermute(A, p, axis=0) >>> B.as_explicit() Matrix([ [4, 5, 6], [7, 8, 9], [1, 2, 3]]) Permuting the matrix columns: >>> B = MatrixPermute(A, p, axis=1) >>> B.as_explicit() Matrix([ [2, 3, 1], [5, 6, 4], [8, 9, 7]]) See Also ======== sympy.matrices.common.MatrixCommon.permute """ def __new__(cls, mat, perm, axis=S.Zero): from sympy.combinatorics.permutations import Permutation mat = _sympify(mat) if not mat.is_Matrix: raise ValueError( "{} must be a SymPy matrix instance.".format(perm)) perm = _sympify(perm) if isinstance(perm, PermutationMatrix): perm = perm.args[0] if not isinstance(perm, Permutation): raise ValueError( "{} must be a SymPy Permutation or a PermutationMatrix " \ "instance".format(perm)) axis = _sympify(axis) if axis not in (0, 1): raise ValueError("The axis must be 0 or 1.") mat_size = mat.shape[axis] if mat_size != perm.size: try: perm = perm.resize(mat_size) except ValueError: raise ValueError( "Size does not match between the permutation {} " "and the matrix {} threaded over the axis {} " "and cannot be converted." .format(perm, mat, axis)) return super(MatrixPermute, cls).__new__(cls, mat, perm, axis) def doit(self, deep=True): mat, perm, axis = self.args if deep: mat = mat.doit(deep=deep) perm = perm.doit(deep=deep) if perm.is_Identity: return mat if mat.is_Identity: if axis is S.Zero: return PermutationMatrix(perm) elif axis is S.One: return PermutationMatrix(perm**-1) if isinstance(mat, (ZeroMatrix, OneMatrix)): return mat if isinstance(mat, MatrixPermute) and mat.args[2] == axis: return MatrixPermute(mat.args[0], perm * mat.args[1], axis) return self @property def shape(self): return self.args[0].shape def _entry(self, i, j, **kwargs): mat, perm, axis = self.args if axis == 0: return mat[perm.apply(i), j] elif axis == 1: return mat[i, perm.apply(j)] def _eval_rewrite_as_MatMul(self, *args, **kwargs): from .matmul import MatMul mat, perm, axis = self.args deep = kwargs.get("deep", True) if deep: mat = mat.rewrite(MatMul) if axis == 0: return MatMul(PermutationMatrix(perm), mat) elif axis == 1: return MatMul(mat, PermutationMatrix(perm**-1))
ea0649f7b2df2581f6b3d29d0160f7a185cce0d3322e6c2ccc1f05130f1b0a4a
from sympy.combinatorics import Permutation from sympy.core.expr import unchanged from sympy.matrices import Matrix from sympy.matrices.expressions import \ MatMul, BlockDiagMatrix, Determinant, Inverse from sympy.matrices.expressions.matexpr import \ MatrixSymbol, Identity, ZeroMatrix, OneMatrix from sympy.matrices.expressions.permutation import \ MatrixPermute, PermutationMatrix from sympy.utilities.pytest import raises from sympy import Symbol def test_PermutationMatrix_basic(): p = Permutation([1, 0]) assert unchanged(PermutationMatrix, p) raises(ValueError, lambda: PermutationMatrix((0, 1, 2))) assert PermutationMatrix(p).as_explicit() == Matrix([[0, 1], [1, 0]]) assert isinstance(PermutationMatrix(p) * MatrixSymbol('A', 2, 2), MatMul) def test_PermutationMatrix_matmul(): p = Permutation([1, 2, 0]) P = PermutationMatrix(p) M = Matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) assert (P * M).as_explicit() == P.as_explicit() * M assert (M * P).as_explicit() == M * P.as_explicit() P1 = PermutationMatrix(Permutation([1, 2, 0])) P2 = PermutationMatrix(Permutation([2, 1, 0])) P3 = PermutationMatrix(Permutation([1, 0, 2])) assert P1 * P2 == P3 def test_PermutationMatrix_matpow(): p1 = Permutation([1, 2, 0]) P1 = PermutationMatrix(p1) p2 = Permutation([2, 0, 1]) P2 = PermutationMatrix(p2) assert P1**2 == P2 assert P1**3 == Identity(3) def test_PermutationMatrix_identity(): p = Permutation([0, 1]) assert PermutationMatrix(p).is_Identity p = Permutation([1, 0]) assert not PermutationMatrix(p).is_Identity def test_PermutationMatrix_determinant(): P = PermutationMatrix(Permutation([0, 1, 2])) assert Determinant(P).doit() == 1 P = PermutationMatrix(Permutation([0, 2, 1])) assert Determinant(P).doit() == -1 P = PermutationMatrix(Permutation([2, 0, 1])) assert Determinant(P).doit() == 1 def test_PermutationMatrix_inverse(): P = PermutationMatrix(Permutation(0, 1, 2)) assert Inverse(P).doit() == PermutationMatrix(Permutation(0, 2, 1)) def test_PermutationMatrix_rewrite_BlockDiagMatrix(): P = PermutationMatrix(Permutation([0, 1, 2, 3, 4, 5])) P0 = PermutationMatrix(Permutation([0])) assert P.rewrite(BlockDiagMatrix) == \ BlockDiagMatrix(P0, P0, P0, P0, P0, P0) P = PermutationMatrix(Permutation([0, 1, 3, 2, 4, 5])) P10 = PermutationMatrix(Permutation(0, 1)) assert P.rewrite(BlockDiagMatrix) == \ BlockDiagMatrix(P0, P0, P10, P0, P0) P = PermutationMatrix(Permutation([1, 0, 3, 2, 5, 4])) assert P.rewrite(BlockDiagMatrix) == \ BlockDiagMatrix(P10, P10, P10) P = PermutationMatrix(Permutation([0, 4, 3, 2, 1, 5])) P3210 = PermutationMatrix(Permutation([3, 2, 1, 0])) assert P.rewrite(BlockDiagMatrix) == \ BlockDiagMatrix(P0, P3210, P0) P = PermutationMatrix(Permutation([0, 4, 2, 3, 1, 5])) P3120 = PermutationMatrix(Permutation([3, 1, 2, 0])) assert P.rewrite(BlockDiagMatrix) == \ BlockDiagMatrix(P0, P3120, P0) P = PermutationMatrix(Permutation(0, 3)(1, 4)(2, 5)) assert P.rewrite(BlockDiagMatrix) == BlockDiagMatrix(P) def test_MartrixPermute_basic(): p = Permutation(0, 1) P = PermutationMatrix(p) A = MatrixSymbol('A', 2, 2) raises(ValueError, lambda: MatrixPermute(Symbol('x'), p)) raises(ValueError, lambda: MatrixPermute(A, Symbol('x'))) assert MatrixPermute(A, P) == MatrixPermute(A, p) raises(ValueError, lambda: MatrixPermute(A, p, 2)) pp = Permutation(0, 1, size=3) assert MatrixPermute(A, pp) == MatrixPermute(A, p) pp = Permutation(0, 1, 2) raises(ValueError, lambda: MatrixPermute(A, pp)) def test_MatrixPermute_shape(): p = Permutation(0, 1) A = MatrixSymbol('A', 2, 3) assert MatrixPermute(A, p).shape == (2, 3) def test_MatrixPermute_explicit(): p = Permutation(0, 1, 2) A = MatrixSymbol('A', 3, 3) AA = A.as_explicit() assert MatrixPermute(A, p, 0).as_explicit() == \ AA.permute(p, orientation='rows') assert MatrixPermute(A, p, 1).as_explicit() == \ AA.permute(p, orientation='cols') def test_MatrixPermute_rewrite_MatMul(): p = Permutation(0, 1, 2) A = MatrixSymbol('A', 3, 3) assert MatrixPermute(A, p, 0).rewrite(MatMul).as_explicit() == \ MatrixPermute(A, p, 0).as_explicit() assert MatrixPermute(A, p, 1).rewrite(MatMul).as_explicit() == \ MatrixPermute(A, p, 1).as_explicit() def test_MatrixPermute_doit(): p = Permutation(0, 1, 2) A = MatrixSymbol('A', 3, 3) assert MatrixPermute(A, p).doit() == MatrixPermute(A, p) p = Permutation(0, size=3) A = MatrixSymbol('A', 3, 3) assert MatrixPermute(A, p).doit().as_explicit() == \ MatrixPermute(A, p).as_explicit() p = Permutation(0, 1, 2) A = Identity(3) assert MatrixPermute(A, p, 0).doit().as_explicit() == \ MatrixPermute(A, p, 0).as_explicit() assert MatrixPermute(A, p, 1).doit().as_explicit() == \ MatrixPermute(A, p, 1).as_explicit() A = ZeroMatrix(3, 3) assert MatrixPermute(A, p).doit() == A A = OneMatrix(3, 3) assert MatrixPermute(A, p).doit() == A A = MatrixSymbol('A', 4, 4) p1 = Permutation(0, 1, 2, 3) p2 = Permutation(0, 2, 3, 1) expr = MatrixPermute(MatrixPermute(A, p1, 0), p2, 0) assert expr.as_explicit() == expr.doit().as_explicit() expr = MatrixPermute(MatrixPermute(A, p1, 1), p2, 1) assert expr.as_explicit() == expr.doit().as_explicit()
38c7c74b1327707632c0551a886d4088c008d364047cdca57709242950a2839d
from sympy import (S, Dummy, Lambda, symbols, Interval, Intersection, Set, EmptySet, FiniteSet, Union, ComplexRegion) from sympy.multipledispatch import dispatch from sympy.sets.conditionset import ConditionSet from sympy.sets.fancysets import (Integers, Naturals, Reals, Range, ImageSet, Rationals) from sympy.sets.sets import UniversalSet, imageset, ProductSet @dispatch(ConditionSet, ConditionSet) def intersection_sets(a, b): # noqa:F811 return None @dispatch(ConditionSet, Set) def intersection_sets(a, b): # noqa:F811 return ConditionSet(a.sym, a.condition, Intersection(a.base_set, b)) @dispatch(Naturals, Integers) def intersection_sets(a, b): # noqa:F811 return a @dispatch(Naturals, Naturals) def intersection_sets(a, b): # noqa:F811 return a if a is S.Naturals else b @dispatch(Interval, Naturals) def intersection_sets(a, b): # noqa:F811 return intersection_sets(b, a) @dispatch(ComplexRegion, Set) def intersection_sets(self, other): # noqa:F811 if other.is_ComplexRegion: # self in rectangular form if (not self.polar) and (not other.polar): return ComplexRegion(Intersection(self.sets, other.sets)) # self in polar form elif self.polar and other.polar: r1, theta1 = self.a_interval, self.b_interval r2, theta2 = other.a_interval, other.b_interval new_r_interval = Intersection(r1, r2) new_theta_interval = Intersection(theta1, theta2) # 0 and 2*Pi means the same if ((2*S.Pi in theta1 and S.Zero in theta2) or (2*S.Pi in theta2 and S.Zero in theta1)): new_theta_interval = Union(new_theta_interval, FiniteSet(0)) return ComplexRegion(new_r_interval*new_theta_interval, polar=True) if other.is_subset(S.Reals): new_interval = [] x = symbols("x", cls=Dummy, real=True) # self in rectangular form if not self.polar: for element in self.psets: if S.Zero in element.args[1]: new_interval.append(element.args[0]) new_interval = Union(*new_interval) return Intersection(new_interval, other) # self in polar form elif self.polar: for element in self.psets: if S.Zero in element.args[1]: new_interval.append(element.args[0]) if S.Pi in element.args[1]: new_interval.append(ImageSet(Lambda(x, -x), element.args[0])) if S.Zero in element.args[0]: new_interval.append(FiniteSet(0)) new_interval = Union(*new_interval) return Intersection(new_interval, other) @dispatch(Integers, Reals) def intersection_sets(a, b): # noqa:F811 return a @dispatch(Range, Interval) def intersection_sets(a, b): # noqa:F811 from sympy.functions.elementary.integers import floor, ceiling if not all(i.is_number for i in b.args[:2]): return # In case of null Range, return an EmptySet. if a.size == 0: return S.EmptySet # trim down to self's size, and represent # as a Range with step 1. start = ceiling(max(b.inf, a.inf)) if start not in b: start += 1 end = floor(min(b.sup, a.sup)) if end not in b: end -= 1 return intersection_sets(a, Range(start, end + 1)) @dispatch(Range, Naturals) def intersection_sets(a, b): # noqa:F811 return intersection_sets(a, Interval(b.inf, S.Infinity)) @dispatch(Range, Range) def intersection_sets(a, b): # noqa:F811 from sympy.solvers.diophantine import diop_linear from sympy.core.numbers import ilcm from sympy import sign # non-overlap quick exits if not b: return S.EmptySet if not a: return S.EmptySet if b.sup < a.inf: return S.EmptySet if b.inf > a.sup: return S.EmptySet # work with finite end at the start r1 = a if r1.start.is_infinite: r1 = r1.reversed r2 = b if r2.start.is_infinite: r2 = r2.reversed # If both ends are infinite then it means that one Range is just the set # of all integers (the step must be 1). if r1.start.is_infinite: return b if r2.start.is_infinite: return a # this equation represents the values of the Range; # it's a linear equation eq = lambda r, i: r.start + i*r.step # we want to know when the two equations might # have integer solutions so we use the diophantine # solver va, vb = diop_linear(eq(r1, Dummy('a')) - eq(r2, Dummy('b'))) # check for no solution no_solution = va is None and vb is None if no_solution: return S.EmptySet # there is a solution # ------------------- # find the coincident point, c a0 = va.as_coeff_Add()[0] c = eq(r1, a0) # find the first point, if possible, in each range # since c may not be that point def _first_finite_point(r1, c): if c == r1.start: return c # st is the signed step we need to take to # get from c to r1.start st = sign(r1.start - c)*step # use Range to calculate the first point: # we want to get as close as possible to # r1.start; the Range will not be null since # it will at least contain c s1 = Range(c, r1.start + st, st)[-1] if s1 == r1.start: pass else: # if we didn't hit r1.start then, if the # sign of st didn't match the sign of r1.step # we are off by one and s1 is not in r1 if sign(r1.step) != sign(st): s1 -= st if s1 not in r1: return return s1 # calculate the step size of the new Range step = abs(ilcm(r1.step, r2.step)) s1 = _first_finite_point(r1, c) if s1 is None: return S.EmptySet s2 = _first_finite_point(r2, c) if s2 is None: return S.EmptySet # replace the corresponding start or stop in # the original Ranges with these points; the # result must have at least one point since # we know that s1 and s2 are in the Ranges def _updated_range(r, first): st = sign(r.step)*step if r.start.is_finite: rv = Range(first, r.stop, st) else: rv = Range(r.start, first + st, st) return rv r1 = _updated_range(a, s1) r2 = _updated_range(b, s2) # work with them both in the increasing direction if sign(r1.step) < 0: r1 = r1.reversed if sign(r2.step) < 0: r2 = r2.reversed # return clipped Range with positive step; it # can't be empty at this point start = max(r1.start, r2.start) stop = min(r1.stop, r2.stop) return Range(start, stop, step) @dispatch(Range, Integers) def intersection_sets(a, b): # noqa:F811 return a @dispatch(ImageSet, Set) def intersection_sets(self, other): # noqa:F811 from sympy.solvers.diophantine import diophantine # Only handle the straight-forward univariate case if (len(self.lamda.variables) > 1 or self.lamda.signature != self.lamda.variables): return None base_set = self.base_sets[0] # Intersection between ImageSets with Integers as base set # For {f(n) : n in Integers} & {g(m) : m in Integers} we solve the # diophantine equations f(n)=g(m). # If the solutions for n are {h(t) : t in Integers} then we return # {f(h(t)) : t in integers}. # If the solutions for n are {n_1, n_2, ..., n_k} then we return # {f(n_i) : 1 <= i <= k}. if base_set is S.Integers: gm = None if isinstance(other, ImageSet) and other.base_sets == (S.Integers,): gm = other.lamda.expr var = other.lamda.variables[0] # Symbol of second ImageSet lambda must be distinct from first m = Dummy('m') gm = gm.subs(var, m) elif other is S.Integers: m = gm = Dummy('m') if gm is not None: fn = self.lamda.expr n = self.lamda.variables[0] try: solns = list(diophantine(fn - gm, syms=(n, m), permute=True)) except (TypeError, NotImplementedError): # TypeError if equation not polynomial with rational coeff. # NotImplementedError if correct format but no solver. return # 3 cases are possible for solns: # - empty set, # - one or more parametric (infinite) solutions, # - a finite number of (non-parametric) solution couples. # Among those, there is one type of solution set that is # not helpful here: multiple parametric solutions. if len(solns) == 0: return EmptySet elif any(not isinstance(s, int) and s.free_symbols for tupl in solns for s in tupl): if len(solns) == 1: soln, solm = solns[0] (t,) = soln.free_symbols expr = fn.subs(n, soln.subs(t, n)).expand() return imageset(Lambda(n, expr), S.Integers) else: return else: return FiniteSet(*(fn.subs(n, s[0]) for s in solns)) if other == S.Reals: from sympy.solvers.solveset import solveset_real from sympy.core.function import expand_complex f = self.lamda.expr n = self.lamda.variables[0] n_ = Dummy(n.name, real=True) f_ = f.subs(n, n_) re, im = f_.as_real_imag() im = expand_complex(im) re = re.subs(n_, n) im = im.subs(n_, n) ifree = im.free_symbols lam = Lambda(n, re) if not im: # allow re-evaluation # of self in this case to make # the result canonical pass elif im.is_zero is False: return S.EmptySet elif ifree != {n}: return None else: # univarite imaginary part in same variable base_set = base_set.intersect(solveset_real(im, n)) return imageset(lam, base_set) elif isinstance(other, Interval): from sympy.solvers.solveset import (invert_real, invert_complex, solveset) f = self.lamda.expr n = self.lamda.variables[0] new_inf, new_sup = None, None new_lopen, new_ropen = other.left_open, other.right_open if f.is_real: inverter = invert_real else: inverter = invert_complex g1, h1 = inverter(f, other.inf, n) g2, h2 = inverter(f, other.sup, n) if all(isinstance(i, FiniteSet) for i in (h1, h2)): if g1 == n: if len(h1) == 1: new_inf = h1.args[0] if g2 == n: if len(h2) == 1: new_sup = h2.args[0] # TODO: Design a technique to handle multiple-inverse # functions # Any of the new boundary values cannot be determined if any(i is None for i in (new_sup, new_inf)): return range_set = S.EmptySet if all(i.is_real for i in (new_sup, new_inf)): # this assumes continuity of underlying function # however fixes the case when it is decreasing if new_inf > new_sup: new_inf, new_sup = new_sup, new_inf new_interval = Interval(new_inf, new_sup, new_lopen, new_ropen) range_set = base_set.intersect(new_interval) else: if other.is_subset(S.Reals): solutions = solveset(f, n, S.Reals) if not isinstance(range_set, (ImageSet, ConditionSet)): range_set = solutions.intersect(other) else: return if range_set is S.EmptySet: return S.EmptySet elif isinstance(range_set, Range) and range_set.size is not S.Infinity: range_set = FiniteSet(*list(range_set)) if range_set is not None: return imageset(Lambda(n, f), range_set) return else: return @dispatch(ProductSet, ProductSet) def intersection_sets(a, b): # noqa:F811 if len(b.args) != len(a.args): return S.EmptySet return ProductSet(*(i.intersect(j) for i, j in zip(a.sets, b.sets))) @dispatch(Interval, Interval) def intersection_sets(a, b): # noqa:F811 # handle (-oo, oo) infty = S.NegativeInfinity, S.Infinity if a == Interval(*infty): l, r = a.left, a.right if l.is_real or l in infty or r.is_real or r in infty: return b # We can't intersect [0,3] with [x,6] -- we don't know if x>0 or x<0 if not a._is_comparable(b): return None empty = False if a.start <= b.end and b.start <= a.end: # Get topology right. if a.start < b.start: start = b.start left_open = b.left_open elif a.start > b.start: start = a.start left_open = a.left_open else: start = a.start left_open = a.left_open or b.left_open if a.end < b.end: end = a.end right_open = a.right_open elif a.end > b.end: end = b.end right_open = b.right_open else: end = a.end right_open = a.right_open or b.right_open if end - start == 0 and (left_open or right_open): empty = True else: empty = True if empty: return S.EmptySet return Interval(start, end, left_open, right_open) @dispatch(type(EmptySet), Set) def intersection_sets(a, b): # noqa:F811 return S.EmptySet @dispatch(UniversalSet, Set) def intersection_sets(a, b): # noqa:F811 return b @dispatch(FiniteSet, FiniteSet) def intersection_sets(a, b): # noqa:F811 return FiniteSet(*(a._elements & b._elements)) @dispatch(FiniteSet, Set) def intersection_sets(a, b): # noqa:F811 try: return FiniteSet(*[el for el in a if el in b]) except TypeError: return None # could not evaluate `el in b` due to symbolic ranges. @dispatch(Set, Set) def intersection_sets(a, b): # noqa:F811 return None @dispatch(Integers, Rationals) def intersection_sets(a, b): # noqa:F811 return a @dispatch(Naturals, Rationals) def intersection_sets(a, b): # noqa:F811 return a @dispatch(Rationals, Reals) def intersection_sets(a, b): # noqa:F811 return a def _intlike_interval(a, b): try: from sympy.functions.elementary.integers import floor, ceiling if b._inf is S.NegativeInfinity and b._sup is S.Infinity: return a s = Range(max(a.inf, ceiling(b.left)), floor(b.right) + 1) return intersection_sets(s, b) # take out endpoints if open interval except ValueError: return None @dispatch(Integers, Interval) def intersection_sets(a, b): # noqa:F811 return _intlike_interval(a, b) @dispatch(Naturals, Interval) def intersection_sets(a, b): # noqa:F811 return _intlike_interval(a, b)
823327b58494de772fe1087cf4b73affe419468f993b7a8d012850960b35c529
from sympy.core.compatibility import range, PY3 from sympy.core.expr import unchanged from sympy.sets.fancysets import (ImageSet, Range, normalize_theta_set, ComplexRegion) from sympy.sets.sets import (FiniteSet, Interval, imageset, Union, Intersection, ProductSet, Contains) from sympy.simplify.simplify import simplify from sympy import (S, Symbol, Lambda, symbols, cos, sin, pi, oo, Basic, Rational, sqrt, tan, log, exp, Abs, I, Tuple, eye, Dummy, floor, And, Eq) from sympy.utilities.iterables import cartes from sympy.utilities.pytest import XFAIL, raises from sympy.abc import x, y, t import itertools def test_naturals(): N = S.Naturals assert 5 in N assert -5 not in N assert 5.5 not in N ni = iter(N) a, b, c, d = next(ni), next(ni), next(ni), next(ni) assert (a, b, c, d) == (1, 2, 3, 4) assert isinstance(a, Basic) assert N.intersect(Interval(-5, 5)) == Range(1, 6) assert N.intersect(Interval(-5, 5, True, True)) == Range(1, 5) assert N.boundary == N assert N.is_open == False assert N.is_closed == True assert N.inf == 1 assert N.sup is oo assert not N.contains(oo) for s in (S.Naturals0, S.Naturals): assert s.intersection(S.Reals) is s assert s.is_subset(S.Reals) assert N.as_relational(x) == And(Eq(floor(x), x), x >= 1, x < oo) def test_naturals0(): N = S.Naturals0 assert 0 in N assert -1 not in N assert next(iter(N)) == 0 assert not N.contains(oo) assert N.contains(sin(x)) == Contains(sin(x), N) def test_integers(): Z = S.Integers assert 5 in Z assert -5 in Z assert 5.5 not in Z assert not Z.contains(oo) assert not Z.contains(-oo) zi = iter(Z) a, b, c, d = next(zi), next(zi), next(zi), next(zi) assert (a, b, c, d) == (0, 1, -1, 2) assert isinstance(a, Basic) assert Z.intersect(Interval(-5, 5)) == Range(-5, 6) assert Z.intersect(Interval(-5, 5, True, True)) == Range(-4, 5) assert Z.intersect(Interval(5, S.Infinity)) == Range(5, S.Infinity) assert Z.intersect(Interval.Lopen(5, S.Infinity)) == Range(6, S.Infinity) assert Z.inf is -oo assert Z.sup is oo assert Z.boundary == Z assert Z.is_open == False assert Z.is_closed == True assert Z.as_relational(x) == And(Eq(floor(x), x), -oo < x, x < oo) def test_ImageSet(): raises(ValueError, lambda: ImageSet(x, S.Integers)) assert ImageSet(Lambda(x, 1), S.Integers) == FiniteSet(1) assert ImageSet(Lambda(x, y), S.Integers) == {y} assert ImageSet(Lambda(x, 1), S.EmptySet) == S.EmptySet empty = Intersection(FiniteSet(log(2)/pi), S.Integers) assert unchanged(ImageSet, Lambda(x, 1), empty) # issue #17471 squares = ImageSet(Lambda(x, x**2), S.Naturals) assert 4 in squares assert 5 not in squares assert FiniteSet(*range(10)).intersect(squares) == FiniteSet(1, 4, 9) assert 16 not in squares.intersect(Interval(0, 10)) si = iter(squares) a, b, c, d = next(si), next(si), next(si), next(si) assert (a, b, c, d) == (1, 4, 9, 16) harmonics = ImageSet(Lambda(x, 1/x), S.Naturals) assert Rational(1, 5) in harmonics assert Rational(.25) in harmonics assert 0.25 not in harmonics assert Rational(.3) not in harmonics assert (1, 2) not in harmonics assert harmonics.is_iterable assert imageset(x, -x, Interval(0, 1)) == Interval(-1, 0) assert ImageSet(Lambda(x, x**2), Interval(0, 2)).doit() == Interval(0, 4) assert ImageSet(Lambda((x, y), 2*x), {4}, {3}).doit() == FiniteSet(8) assert (ImageSet(Lambda((x, y), x+y), {1, 2, 3}, {10, 20, 30}).doit() == FiniteSet(11, 12, 13, 21, 22, 23, 31, 32, 33)) c = Interval(1, 3) * Interval(1, 3) assert Tuple(2, 6) in ImageSet(Lambda(((x, y),), (x, 2*y)), c) assert Tuple(2, S.Half) in ImageSet(Lambda(((x, y),), (x, 1/y)), c) assert Tuple(2, -2) not in ImageSet(Lambda(((x, y),), (x, y**2)), c) assert Tuple(2, -2) in ImageSet(Lambda(((x, y),), (x, -2)), c) c3 = ProductSet(Interval(3, 7), Interval(8, 11), Interval(5, 9)) assert Tuple(8, 3, 9) in ImageSet(Lambda(((t, y, x),), (y, t, x)), c3) assert Tuple(Rational(1, 8), 3, 9) in ImageSet(Lambda(((t, y, x),), (1/y, t, x)), c3) assert 2/pi not in ImageSet(Lambda(((x, y),), 2/x), c) assert 2/S(100) not in ImageSet(Lambda(((x, y),), 2/x), c) assert Rational(2, 3) in ImageSet(Lambda(((x, y),), 2/x), c) S1 = imageset(lambda x, y: x + y, S.Integers, S.Naturals) assert S1.base_pset == ProductSet(S.Integers, S.Naturals) assert S1.base_sets == (S.Integers, S.Naturals) # Passing a set instead of a FiniteSet shouldn't raise assert unchanged(ImageSet, Lambda(x, x**2), {1, 2, 3}) S2 = ImageSet(Lambda(((x, y),), x+y), {(1, 2), (3, 4)}) assert 3 in S2.doit() # FIXME: This doesn't yet work: #assert 3 in S2 assert S2._contains(3) is None raises(TypeError, lambda: ImageSet(Lambda(x, x**2), 1)) def test_image_is_ImageSet(): assert isinstance(imageset(x, sqrt(sin(x)), Range(5)), ImageSet) def test_halfcircle(): r, th = symbols('r, theta', real=True) L = Lambda(((r, th),), (r*cos(th), r*sin(th))) halfcircle = ImageSet(L, Interval(0, 1)*Interval(0, pi)) assert (1, 0) in halfcircle assert (0, -1) not in halfcircle assert (0, 0) in halfcircle assert halfcircle._contains((r, 0)) is None # This one doesn't work: #assert (r, 2*pi) not in halfcircle assert not halfcircle.is_iterable def test_ImageSet_iterator_not_injective(): L = Lambda(x, x - x % 2) # produces 0, 2, 2, 4, 4, 6, 6, ... evens = ImageSet(L, S.Naturals) i = iter(evens) # No repeats here assert (next(i), next(i), next(i), next(i)) == (0, 2, 4, 6) def test_inf_Range_len(): raises(ValueError, lambda: len(Range(0, oo, 2))) assert Range(0, oo, 2).size is S.Infinity assert Range(0, -oo, -2).size is S.Infinity assert Range(oo, 0, -2).size is S.Infinity assert Range(-oo, 0, 2).size is S.Infinity def test_Range_set(): empty = Range(0) assert Range(5) == Range(0, 5) == Range(0, 5, 1) r = Range(10, 20, 2) assert 12 in r assert 8 not in r assert 11 not in r assert 30 not in r assert list(Range(0, 5)) == list(range(5)) assert list(Range(5, 0, -1)) == list(range(5, 0, -1)) assert Range(5, 15).sup == 14 assert Range(5, 15).inf == 5 assert Range(15, 5, -1).sup == 15 assert Range(15, 5, -1).inf == 6 assert Range(10, 67, 10).sup == 60 assert Range(60, 7, -10).inf == 10 assert len(Range(10, 38, 10)) == 3 assert Range(0, 0, 5) == empty assert Range(oo, oo, 1) == empty assert Range(oo, 1, 1) == empty assert Range(-oo, 1, -1) == empty assert Range(1, oo, -1) == empty assert Range(1, -oo, 1) == empty assert Range(1, -4, oo) == empty assert Range(1, -4, -oo) == Range(1, 2) assert Range(1, 4, oo) == Range(1, 2) assert Range(-oo, oo).size == oo assert Range(oo, -oo, -1).size == oo raises(ValueError, lambda: Range(-oo, oo, 2)) raises(ValueError, lambda: Range(x, pi, y)) raises(ValueError, lambda: Range(x, y, 0)) assert 5 in Range(0, oo, 5) assert -5 in Range(-oo, 0, 5) assert oo not in Range(0, oo) ni = symbols('ni', integer=False) assert ni not in Range(oo) u = symbols('u', integer=None) assert Range(oo).contains(u) is not False inf = symbols('inf', infinite=True) assert inf not in Range(-oo, oo) raises(ValueError, lambda: Range(0, oo, 2)[-1]) raises(ValueError, lambda: Range(0, -oo, -2)[-1]) assert Range(-oo, 1, 1)[-1] is S.Zero assert Range(oo, 1, -1)[-1] == 2 assert inf not in Range(oo) inf = symbols('inf', infinite=True) assert inf not in Range(oo) assert Range(-oo, 1, 1)[-1] is S.Zero assert Range(oo, 1, -1)[-1] == 2 assert Range(1, 10, 1)[-1] == 9 assert all(i.is_Integer for i in Range(0, -1, 1)) it = iter(Range(-oo, 0, 2)) raises(TypeError, lambda: next(it)) assert empty.intersect(S.Integers) == empty assert Range(-1, 10, 1).intersect(S.Integers) == Range(-1, 10, 1) assert Range(-1, 10, 1).intersect(S.Naturals) == Range(1, 10, 1) assert Range(-1, 10, 1).intersect(S.Naturals0) == Range(0, 10, 1) # test slicing assert Range(1, 10, 1)[5] == 6 assert Range(1, 12, 2)[5] == 11 assert Range(1, 10, 1)[-1] == 9 assert Range(1, 10, 3)[-1] == 7 raises(ValueError, lambda: Range(oo,0,-1)[1:3:0]) raises(ValueError, lambda: Range(oo,0,-1)[:1]) raises(ValueError, lambda: Range(1, oo)[-2]) raises(ValueError, lambda: Range(-oo, 1)[2]) raises(IndexError, lambda: Range(10)[-20]) raises(IndexError, lambda: Range(10)[20]) raises(ValueError, lambda: Range(2, -oo, -2)[2:2:0]) assert Range(2, -oo, -2)[2:2:2] == empty assert Range(2, -oo, -2)[:2:2] == Range(2, -2, -4) raises(ValueError, lambda: Range(-oo, 4, 2)[:2:2]) assert Range(-oo, 4, 2)[::-2] == Range(2, -oo, -4) raises(ValueError, lambda: Range(-oo, 4, 2)[::2]) assert Range(oo, 2, -2)[::] == Range(oo, 2, -2) assert Range(-oo, 4, 2)[:-2:-2] == Range(2, 0, -4) assert Range(-oo, 4, 2)[:-2:2] == Range(-oo, 0, 4) raises(ValueError, lambda: Range(-oo, 4, 2)[:0:-2]) raises(ValueError, lambda: Range(-oo, 4, 2)[:2:-2]) assert Range(-oo, 4, 2)[-2::-2] == Range(0, -oo, -4) raises(ValueError, lambda: Range(-oo, 4, 2)[-2:0:-2]) raises(ValueError, lambda: Range(-oo, 4, 2)[0::2]) assert Range(oo, 2, -2)[0::] == Range(oo, 2, -2) raises(ValueError, lambda: Range(-oo, 4, 2)[0:-2:2]) assert Range(oo, 2, -2)[0:-2:] == Range(oo, 6, -2) raises(ValueError, lambda: Range(oo, 2, -2)[0:2:]) raises(ValueError, lambda: Range(-oo, 4, 2)[2::-1]) assert Range(-oo, 4, 2)[-2::2] == Range(0, 4, 4) assert Range(oo, 0, -2)[-10:0:2] == empty raises(ValueError, lambda: Range(oo, 0, -2)[-10:10:2]) raises(ValueError, lambda: Range(oo, 0, -2)[0::-2]) assert Range(oo, 0, -2)[0:-4:-2] == empty assert Range(oo, 0, -2)[:0:2] == empty raises(ValueError, lambda: Range(oo, 0, -2)[:1:-1]) # test empty Range assert Range(x, x, y) == empty assert empty.reversed == empty assert 0 not in empty assert list(empty) == [] assert len(empty) == 0 assert empty.size is S.Zero assert empty.intersect(FiniteSet(0)) is S.EmptySet assert bool(empty) is False raises(IndexError, lambda: empty[0]) assert empty[:0] == empty raises(NotImplementedError, lambda: empty.inf) raises(NotImplementedError, lambda: empty.sup) AB = [None] + list(range(12)) for R in [ Range(1, 10), Range(1, 10, 2), ]: r = list(R) for a, b, c in cartes(AB, AB, [-3, -1, None, 1, 3]): for reverse in range(2): r = list(reversed(r)) R = R.reversed result = list(R[a:b:c]) ans = r[a:b:c] txt = ('\n%s[%s:%s:%s] = %s -> %s' % ( R, a, b, c, result, ans)) check = ans == result assert check, txt assert Range(1, 10, 1).boundary == Range(1, 10, 1) for r in (Range(1, 10, 2), Range(1, oo, 2)): rev = r.reversed assert r.inf == rev.inf and r.sup == rev.sup assert r.step == -rev.step # Make sure to use range in Python 3 and xrange in Python 2 (regardless of # compatibility imports above) if PY3: builtin_range = range else: builtin_range = xrange # noqa raises(TypeError, lambda: Range(builtin_range(1))) assert S(builtin_range(10)) == Range(10) if PY3: assert S(builtin_range(1000000000000)) == \ Range(1000000000000) # test Range.as_relational assert Range(1, 4).as_relational(x) == (x >= 1) & (x <= 3) & Eq(x, floor(x)) assert Range(oo, 1, -2).as_relational(x) == (x >= 3) & (x < oo) & Eq(x, floor(x)) def test_Range_symbolic(): # symbolic Range sr = Range(x, y, t) i = Symbol('i', integer=True) ip = Symbol('i', integer=True, positive=True) ir = Range(i, i + 20, 2) inf = symbols('inf', infinite=True) # args assert sr.args == (x, y, t) assert ir.args == (i, i + 20, 2) # reversed raises(ValueError, lambda: sr.reversed) assert ir.reversed == Range(i + 18, i - 2, -2) # contains assert inf not in sr assert inf not in ir assert .1 not in sr assert .1 not in ir assert i + 1 not in ir assert i + 2 in ir raises(TypeError, lambda: 1 in sr) # XXX is this what contains is supposed to do? # iter raises(ValueError, lambda: next(iter(sr))) assert next(iter(ir)) == i assert sr.intersect(S.Integers) == sr assert sr.intersect(FiniteSet(x)) == Intersection({x}, sr) raises(ValueError, lambda: sr[:2]) raises(ValueError, lambda: sr[0]) raises(ValueError, lambda: sr.as_relational(x)) # len assert len(ir) == ir.size == 10 raises(ValueError, lambda: len(sr)) raises(ValueError, lambda: sr.size) # bool assert bool(ir) == bool(sr) == True # getitem raises(ValueError, lambda: sr[0]) raises(ValueError, lambda: sr[-1]) raises(ValueError, lambda: sr[:2]) assert ir[:2] == Range(i, i + 4, 2) assert ir[0] == i assert ir[-2] == i + 16 assert ir[-1] == i + 18 raises(ValueError, lambda: Range(i)[-1]) assert Range(ip)[-1] == ip - 1 assert ir.inf == i assert ir.sup == i + 18 assert Range(ip).inf == 0 assert Range(ip).sup == ip - 1 raises(ValueError, lambda: Range(i).inf) # as_relational raises(ValueError, lambda: sr.as_relational(x)) assert ir.as_relational(x) == ( x >= i) & Eq(x, floor(x)) & (x <= i + 18) assert Range(i, i + 1).as_relational(x) == Eq(x, i) # contains() for symbolic values (issue #18146) e = Symbol('e', integer=True, even=True) o = Symbol('o', integer=True, odd=True) assert Range(5).contains(i) == And(i >= 0, i <= 4) assert Range(1).contains(i) == Eq(i, 0) assert Range(-oo, 5, 1).contains(i) == (i <= 4) assert Range(-oo, oo).contains(i) == True assert Range(0, 8, 2).contains(i) == Contains(i, Range(0, 8, 2)) assert Range(0, 8, 2).contains(e) == And(e >= 0, e <= 6) assert Range(0, 8, 2).contains(2*i) == And(2*i >= 0, 2*i <= 6) assert Range(0, 8, 2).contains(o) == False assert Range(1, 9, 2).contains(e) == False assert Range(1, 9, 2).contains(o) == And(o >= 1, o <= 7) assert Range(8, 0, -2).contains(o) == False assert Range(9, 1, -2).contains(o) == And(o >= 3, o <= 9) assert Range(-oo, 8, 2).contains(i) == Contains(i, Range(-oo, 8, 2)) def test_range_range_intersection(): for a, b, r in [ (Range(0), Range(1), S.EmptySet), (Range(3), Range(4, oo), S.EmptySet), (Range(3), Range(-3, -1), S.EmptySet), (Range(1, 3), Range(0, 3), Range(1, 3)), (Range(1, 3), Range(1, 4), Range(1, 3)), (Range(1, oo, 2), Range(2, oo, 2), S.EmptySet), (Range(0, oo, 2), Range(oo), Range(0, oo, 2)), (Range(0, oo, 2), Range(100), Range(0, 100, 2)), (Range(2, oo, 2), Range(oo), Range(2, oo, 2)), (Range(0, oo, 2), Range(5, 6), S.EmptySet), (Range(2, 80, 1), Range(55, 71, 4), Range(55, 71, 4)), (Range(0, 6, 3), Range(-oo, 5, 3), S.EmptySet), (Range(0, oo, 2), Range(5, oo, 3), Range(8, oo, 6)), (Range(4, 6, 2), Range(2, 16, 7), S.EmptySet),]: assert a.intersect(b) == r assert a.intersect(b.reversed) == r assert a.reversed.intersect(b) == r assert a.reversed.intersect(b.reversed) == r a, b = b, a assert a.intersect(b) == r assert a.intersect(b.reversed) == r assert a.reversed.intersect(b) == r assert a.reversed.intersect(b.reversed) == r def test_range_interval_intersection(): p = symbols('p', positive=True) assert isinstance(Range(3).intersect(Interval(p, p + 2)), Intersection) assert Range(4).intersect(Interval(0, 3)) == Range(4) assert Range(4).intersect(Interval(-oo, oo)) == Range(4) assert Range(4).intersect(Interval(1, oo)) == Range(1, 4) assert Range(4).intersect(Interval(1.1, oo)) == Range(2, 4) assert Range(4).intersect(Interval(0.1, 3)) == Range(1, 4) assert Range(4).intersect(Interval(0.1, 3.1)) == Range(1, 4) assert Range(4).intersect(Interval.open(0, 3)) == Range(1, 3) assert Range(4).intersect(Interval.open(0.1, 0.5)) is S.EmptySet # Null Range intersections assert Range(0).intersect(Interval(0.2, 0.8)) is S.EmptySet assert Range(0).intersect(Interval(-oo, oo)) is S.EmptySet def test_Integers_eval_imageset(): ans = ImageSet(Lambda(x, 2*x + Rational(3, 7)), S.Integers) im = imageset(Lambda(x, -2*x + Rational(3, 7)), S.Integers) assert im == ans im = imageset(Lambda(x, -2*x - Rational(11, 7)), S.Integers) assert im == ans y = Symbol('y') L = imageset(x, 2*x + y, S.Integers) assert y + 4 in L _x = symbols('x', negative=True) eq = _x**2 - _x + 1 assert imageset(_x, eq, S.Integers).lamda.expr == _x**2 + _x + 1 eq = 3*_x - 1 assert imageset(_x, eq, S.Integers).lamda.expr == 3*_x + 2 assert imageset(x, (x, 1/x), S.Integers) == \ ImageSet(Lambda(x, (x, 1/x)), S.Integers) def test_Range_eval_imageset(): a, b, c = symbols('a b c') assert imageset(x, a*(x + b) + c, Range(3)) == \ imageset(x, a*x + a*b + c, Range(3)) eq = (x + 1)**2 assert imageset(x, eq, Range(3)).lamda.expr == eq eq = a*(x + b) + c r = Range(3, -3, -2) imset = imageset(x, eq, r) assert imset.lamda.expr != eq assert list(imset) == [eq.subs(x, i).expand() for i in list(r)] def test_fun(): assert (FiniteSet(*ImageSet(Lambda(x, sin(pi*x/4)), Range(-10, 11))) == FiniteSet(-1, -sqrt(2)/2, 0, sqrt(2)/2, 1)) def test_Reals(): assert 5 in S.Reals assert S.Pi in S.Reals assert -sqrt(2) in S.Reals assert (2, 5) not in S.Reals assert sqrt(-1) not in S.Reals assert S.Reals == Interval(-oo, oo) assert S.Reals != Interval(0, oo) assert S.Reals.is_subset(Interval(-oo, oo)) assert S.Reals.intersect(Range(-oo, oo)) == Range(-oo, oo) def test_Complex(): assert 5 in S.Complexes assert 5 + 4*I in S.Complexes assert S.Pi in S.Complexes assert -sqrt(2) in S.Complexes assert -I in S.Complexes assert sqrt(-1) in S.Complexes assert S.Complexes.intersect(S.Reals) == S.Reals assert S.Complexes.union(S.Reals) == S.Complexes assert S.Complexes == ComplexRegion(S.Reals*S.Reals) assert (S.Complexes == ComplexRegion(Interval(1, 2)*Interval(3, 4))) == False assert str(S.Complexes) == "S.Complexes" assert repr(S.Complexes) == "S.Complexes" def take(n, iterable): "Return first n items of the iterable as a list" return list(itertools.islice(iterable, n)) def test_intersections(): assert S.Integers.intersect(S.Reals) == S.Integers assert 5 in S.Integers.intersect(S.Reals) assert 5 in S.Integers.intersect(S.Reals) assert -5 not in S.Naturals.intersect(S.Reals) assert 5.5 not in S.Integers.intersect(S.Reals) assert 5 in S.Integers.intersect(Interval(3, oo)) assert -5 in S.Integers.intersect(Interval(-oo, 3)) assert all(x.is_Integer for x in take(10, S.Integers.intersect(Interval(3, oo)) )) def test_infinitely_indexed_set_1(): from sympy.abc import n, m, t assert imageset(Lambda(n, n), S.Integers) == imageset(Lambda(m, m), S.Integers) assert imageset(Lambda(n, 2*n), S.Integers).intersect( imageset(Lambda(m, 2*m + 1), S.Integers)) is S.EmptySet assert imageset(Lambda(n, 2*n), S.Integers).intersect( imageset(Lambda(n, 2*n + 1), S.Integers)) is S.EmptySet assert imageset(Lambda(m, 2*m), S.Integers).intersect( imageset(Lambda(n, 3*n), S.Integers)) == \ ImageSet(Lambda(t, 6*t), S.Integers) assert imageset(x, x/2 + Rational(1, 3), S.Integers).intersect(S.Integers) is S.EmptySet assert imageset(x, x/2 + S.Half, S.Integers).intersect(S.Integers) is S.Integers # https://github.com/sympy/sympy/issues/17355 S53 = ImageSet(Lambda(n, 5*n + 3), S.Integers) assert S53.intersect(S.Integers) == S53 def test_infinitely_indexed_set_2(): from sympy.abc import n a = Symbol('a', integer=True) assert imageset(Lambda(n, n), S.Integers) == \ imageset(Lambda(n, n + a), S.Integers) assert imageset(Lambda(n, n + pi), S.Integers) == \ imageset(Lambda(n, n + a + pi), S.Integers) assert imageset(Lambda(n, n), S.Integers) == \ imageset(Lambda(n, -n + a), S.Integers) assert imageset(Lambda(n, -6*n), S.Integers) == \ ImageSet(Lambda(n, 6*n), S.Integers) assert imageset(Lambda(n, 2*n + pi), S.Integers) == \ ImageSet(Lambda(n, 2*n + pi - 2), S.Integers) def test_imageset_intersect_real(): from sympy import I from sympy.abc import n assert imageset(Lambda(n, n + (n - 1)*(n + 1)*I), S.Integers).intersect(S.Reals) == \ FiniteSet(-1, 1) s = ImageSet( Lambda(n, -I*(I*(2*pi*n - pi/4) + log(Abs(sqrt(-I))))), S.Integers) # s is unevaluated, but after intersection the result # should be canonical assert s.intersect(S.Reals) == imageset( Lambda(n, 2*n*pi - pi/4), S.Integers) == ImageSet( Lambda(n, 2*pi*n + pi*Rational(7, 4)), S.Integers) def test_imageset_intersect_interval(): from sympy.abc import n f1 = ImageSet(Lambda(n, n*pi), S.Integers) f2 = ImageSet(Lambda(n, 2*n), Interval(0, pi)) f3 = ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers) # complex expressions f4 = ImageSet(Lambda(n, n*I*pi), S.Integers) f5 = ImageSet(Lambda(n, 2*I*n*pi + pi/2), S.Integers) # non-linear expressions f6 = ImageSet(Lambda(n, log(n)), S.Integers) f7 = ImageSet(Lambda(n, n**2), S.Integers) f8 = ImageSet(Lambda(n, Abs(n)), S.Integers) f9 = ImageSet(Lambda(n, exp(n)), S.Naturals0) assert f1.intersect(Interval(-1, 1)) == FiniteSet(0) assert f1.intersect(Interval(0, 2*pi, False, True)) == FiniteSet(0, pi) assert f2.intersect(Interval(1, 2)) == Interval(1, 2) assert f3.intersect(Interval(-1, 1)) == S.EmptySet assert f3.intersect(Interval(-5, 5)) == FiniteSet(pi*Rational(-3, 2), pi/2) assert f4.intersect(Interval(-1, 1)) == FiniteSet(0) assert f4.intersect(Interval(1, 2)) == S.EmptySet assert f5.intersect(Interval(0, 1)) == S.EmptySet assert f6.intersect(Interval(0, 1)) == FiniteSet(S.Zero, log(2)) assert f7.intersect(Interval(0, 10)) == Intersection(f7, Interval(0, 10)) assert f8.intersect(Interval(0, 2)) == Intersection(f8, Interval(0, 2)) assert f9.intersect(Interval(1, 2)) == Intersection(f9, Interval(1, 2)) def test_imageset_intersect_diophantine(): from sympy.abc import m, n # Check that same lambda variable for both ImageSets is handled correctly img1 = ImageSet(Lambda(n, 2*n + 1), S.Integers) img2 = ImageSet(Lambda(n, 4*n + 1), S.Integers) assert img1.intersect(img2) == img2 # Empty solution set returned by diophantine: assert ImageSet(Lambda(n, 2*n), S.Integers).intersect( ImageSet(Lambda(n, 2*n + 1), S.Integers)) == S.EmptySet # Check intersection with S.Integers: assert ImageSet(Lambda(n, 9/n + 20*n/3), S.Integers).intersect( S.Integers) == FiniteSet(-61, -23, 23, 61) # Single solution (2, 3) for diophantine solution: assert ImageSet(Lambda(n, (n - 2)**2), S.Integers).intersect( ImageSet(Lambda(n, -(n - 3)**2), S.Integers)) == FiniteSet(0) # Single parametric solution for diophantine solution: assert ImageSet(Lambda(n, n**2 + 5), S.Integers).intersect( ImageSet(Lambda(m, 2*m), S.Integers)) == ImageSet( Lambda(n, 4*n**2 + 4*n + 6), S.Integers) # 4 non-parametric solution couples for dioph. equation: assert ImageSet(Lambda(n, n**2 - 9), S.Integers).intersect( ImageSet(Lambda(m, -m**2), S.Integers)) == FiniteSet(-9, 0) # Double parametric solution for diophantine solution: assert ImageSet(Lambda(m, m**2 + 40), S.Integers).intersect( ImageSet(Lambda(n, 41*n), S.Integers)) == Intersection( ImageSet(Lambda(m, m**2 + 40), S.Integers), ImageSet(Lambda(n, 41*n), S.Integers)) # Check that diophantine returns *all* (8) solutions (permute=True) assert ImageSet(Lambda(n, n**4 - 2**4), S.Integers).intersect( ImageSet(Lambda(m, -m**4 + 3**4), S.Integers)) == FiniteSet(0, 65) assert ImageSet(Lambda(n, pi/12 + n*5*pi/12), S.Integers).intersect( ImageSet(Lambda(n, 7*pi/12 + n*11*pi/12), S.Integers)) == ImageSet( Lambda(n, 55*pi*n/12 + 17*pi/4), S.Integers) # TypeError raised by diophantine (#18081) assert ImageSet(Lambda(n, n*log(2)), S.Integers).intersection(S.Integers) \ == Intersection(ImageSet(Lambda(n, n*log(2)), S.Integers), S.Integers) # NotImplementedError raised by diophantine (no solver for cubic_thue) assert ImageSet(Lambda(n, n**3 + 1), S.Integers).intersect( ImageSet(Lambda(n, n**3), S.Integers)) == Intersection( ImageSet(Lambda(n, n**3 + 1), S.Integers), ImageSet(Lambda(n, n**3), S.Integers)) def test_infinitely_indexed_set_3(): from sympy.abc import n, m, t assert imageset(Lambda(m, 2*pi*m), S.Integers).intersect( imageset(Lambda(n, 3*pi*n), S.Integers)) == \ ImageSet(Lambda(t, 6*pi*t), S.Integers) assert imageset(Lambda(n, 2*n + 1), S.Integers) == \ imageset(Lambda(n, 2*n - 1), S.Integers) assert imageset(Lambda(n, 3*n + 2), S.Integers) == \ imageset(Lambda(n, 3*n - 1), S.Integers) def test_ImageSet_simplification(): from sympy.abc import n, m assert imageset(Lambda(n, n), S.Integers) == S.Integers assert imageset(Lambda(n, sin(n)), imageset(Lambda(m, tan(m)), S.Integers)) == \ imageset(Lambda(m, sin(tan(m))), S.Integers) assert imageset(n, 1 + 2*n, S.Naturals) == Range(3, oo, 2) assert imageset(n, 1 + 2*n, S.Naturals0) == Range(1, oo, 2) assert imageset(n, 1 - 2*n, S.Naturals) == Range(-1, -oo, -2) def test_ImageSet_contains(): from sympy.abc import x assert (2, S.Half) in imageset(x, (x, 1/x), S.Integers) assert imageset(x, x + I*3, S.Integers).intersection(S.Reals) is S.EmptySet i = Dummy(integer=True) q = imageset(x, x + I*y, S.Integers).intersection(S.Reals) assert q.subs(y, I*i).intersection(S.Integers) is S.Integers q = imageset(x, x + I*y/x, S.Integers).intersection(S.Reals) assert q.subs(y, 0) is S.Integers assert q.subs(y, I*i*x).intersection(S.Integers) is S.Integers z = cos(1)**2 + sin(1)**2 - 1 q = imageset(x, x + I*z, S.Integers).intersection(S.Reals) assert q is not S.EmptySet def test_ComplexRegion_contains(): # contains in ComplexRegion a = Interval(2, 3) b = Interval(4, 6) c = Interval(7, 9) c1 = ComplexRegion(a*b) c2 = ComplexRegion(Union(a*b, c*a)) assert 2.5 + 4.5*I in c1 assert 2 + 4*I in c1 assert 3 + 4*I in c1 assert 8 + 2.5*I in c2 assert 2.5 + 6.1*I not in c1 assert 4.5 + 3.2*I not in c1 r1 = Interval(0, 1) theta1 = Interval(0, 2*S.Pi) c3 = ComplexRegion(r1*theta1, polar=True) assert (0.5 + I*Rational(6, 10)) in c3 assert (S.Half + I*Rational(6, 10)) in c3 assert (S.Half + .6*I) in c3 assert (0.5 + .6*I) in c3 assert I in c3 assert 1 in c3 assert 0 in c3 assert 1 + I not in c3 assert 1 - I not in c3 raises(ValueError, lambda: ComplexRegion(r1*theta1, polar=2)) def test_ComplexRegion_intersect(): # Polar form X_axis = ComplexRegion(Interval(0, oo)*FiniteSet(0, S.Pi), polar=True) unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True) upper_half_disk = ComplexRegion(Interval(0, oo)*Interval(0, S.Pi), polar=True) lower_half_disk = ComplexRegion(Interval(0, oo)*Interval(S.Pi, 2*S.Pi), polar=True) right_half_disk = ComplexRegion(Interval(0, oo)*Interval(-S.Pi/2, S.Pi/2), polar=True) first_quad_disk = ComplexRegion(Interval(0, oo)*Interval(0, S.Pi/2), polar=True) assert upper_half_disk.intersect(unit_disk) == upper_half_unit_disk assert right_half_disk.intersect(first_quad_disk) == first_quad_disk assert upper_half_disk.intersect(right_half_disk) == first_quad_disk assert upper_half_disk.intersect(lower_half_disk) == X_axis c1 = ComplexRegion(Interval(0, 4)*Interval(0, 2*S.Pi), polar=True) assert c1.intersect(Interval(1, 5)) == Interval(1, 4) assert c1.intersect(Interval(4, 9)) == FiniteSet(4) assert c1.intersect(Interval(5, 12)) is S.EmptySet # Rectangular form X_axis = ComplexRegion(Interval(-oo, oo)*FiniteSet(0)) unit_square = ComplexRegion(Interval(-1, 1)*Interval(-1, 1)) upper_half_unit_square = ComplexRegion(Interval(-1, 1)*Interval(0, 1)) upper_half_plane = ComplexRegion(Interval(-oo, oo)*Interval(0, oo)) lower_half_plane = ComplexRegion(Interval(-oo, oo)*Interval(-oo, 0)) right_half_plane = ComplexRegion(Interval(0, oo)*Interval(-oo, oo)) first_quad_plane = ComplexRegion(Interval(0, oo)*Interval(0, oo)) assert upper_half_plane.intersect(unit_square) == upper_half_unit_square assert right_half_plane.intersect(first_quad_plane) == first_quad_plane assert upper_half_plane.intersect(right_half_plane) == first_quad_plane assert upper_half_plane.intersect(lower_half_plane) == X_axis c1 = ComplexRegion(Interval(-5, 5)*Interval(-10, 10)) assert c1.intersect(Interval(2, 7)) == Interval(2, 5) assert c1.intersect(Interval(5, 7)) == FiniteSet(5) assert c1.intersect(Interval(6, 9)) is S.EmptySet # unevaluated object C1 = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) C2 = ComplexRegion(Interval(-1, 1)*Interval(-1, 1)) assert C1.intersect(C2) == Intersection(C1, C2, evaluate=False) def test_ComplexRegion_union(): # Polar form c1 = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) c2 = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True) c3 = ComplexRegion(Interval(0, oo)*Interval(0, S.Pi), polar=True) c4 = ComplexRegion(Interval(0, oo)*Interval(S.Pi, 2*S.Pi), polar=True) p1 = Union(Interval(0, 1)*Interval(0, 2*S.Pi), Interval(0, 1)*Interval(0, S.Pi)) p2 = Union(Interval(0, oo)*Interval(0, S.Pi), Interval(0, oo)*Interval(S.Pi, 2*S.Pi)) assert c1.union(c2) == ComplexRegion(p1, polar=True) assert c3.union(c4) == ComplexRegion(p2, polar=True) # Rectangular form c5 = ComplexRegion(Interval(2, 5)*Interval(6, 9)) c6 = ComplexRegion(Interval(4, 6)*Interval(10, 12)) c7 = ComplexRegion(Interval(0, 10)*Interval(-10, 0)) c8 = ComplexRegion(Interval(12, 16)*Interval(14, 20)) p3 = Union(Interval(2, 5)*Interval(6, 9), Interval(4, 6)*Interval(10, 12)) p4 = Union(Interval(0, 10)*Interval(-10, 0), Interval(12, 16)*Interval(14, 20)) assert c5.union(c6) == ComplexRegion(p3) assert c7.union(c8) == ComplexRegion(p4) assert c1.union(Interval(2, 4)) == Union(c1, Interval(2, 4), evaluate=False) assert c5.union(Interval(2, 4)) == Union(c5, ComplexRegion.from_real(Interval(2, 4))) def test_ComplexRegion_from_real(): c1 = ComplexRegion(Interval(0, 1) * Interval(0, 2 * S.Pi), polar=True) raises(ValueError, lambda: c1.from_real(c1)) assert c1.from_real(Interval(-1, 1)) == ComplexRegion(Interval(-1, 1) * FiniteSet(0), False) def test_ComplexRegion_measure(): a, b = Interval(2, 5), Interval(4, 8) theta1, theta2 = Interval(0, 2*S.Pi), Interval(0, S.Pi) c1 = ComplexRegion(a*b) c2 = ComplexRegion(Union(a*theta1, b*theta2), polar=True) assert c1.measure == 12 assert c2.measure == 9*pi def test_normalize_theta_set(): # Interval assert normalize_theta_set(Interval(pi, 2*pi)) == \ Union(FiniteSet(0), Interval.Ropen(pi, 2*pi)) assert normalize_theta_set(Interval(pi*Rational(9, 2), 5*pi)) == Interval(pi/2, pi) assert normalize_theta_set(Interval(pi*Rational(-3, 2), pi/2)) == Interval.Ropen(0, 2*pi) assert normalize_theta_set(Interval.open(pi*Rational(-3, 2), pi/2)) == \ Union(Interval.Ropen(0, pi/2), Interval.open(pi/2, 2*pi)) assert normalize_theta_set(Interval.open(pi*Rational(-7, 2), pi*Rational(-3, 2))) == \ Union(Interval.Ropen(0, pi/2), Interval.open(pi/2, 2*pi)) assert normalize_theta_set(Interval(-pi/2, pi/2)) == \ Union(Interval(0, pi/2), Interval.Ropen(pi*Rational(3, 2), 2*pi)) assert normalize_theta_set(Interval.open(-pi/2, pi/2)) == \ Union(Interval.Ropen(0, pi/2), Interval.open(pi*Rational(3, 2), 2*pi)) assert normalize_theta_set(Interval(-4*pi, 3*pi)) == Interval.Ropen(0, 2*pi) assert normalize_theta_set(Interval(pi*Rational(-3, 2), -pi/2)) == Interval(pi/2, pi*Rational(3, 2)) assert normalize_theta_set(Interval.open(0, 2*pi)) == Interval.open(0, 2*pi) assert normalize_theta_set(Interval.Ropen(-pi/2, pi/2)) == \ Union(Interval.Ropen(0, pi/2), Interval.Ropen(pi*Rational(3, 2), 2*pi)) assert normalize_theta_set(Interval.Lopen(-pi/2, pi/2)) == \ Union(Interval(0, pi/2), Interval.open(pi*Rational(3, 2), 2*pi)) assert normalize_theta_set(Interval(-pi/2, pi/2)) == \ Union(Interval(0, pi/2), Interval.Ropen(pi*Rational(3, 2), 2*pi)) assert normalize_theta_set(Interval.open(4*pi, pi*Rational(9, 2))) == Interval.open(0, pi/2) assert normalize_theta_set(Interval.Lopen(4*pi, pi*Rational(9, 2))) == Interval.Lopen(0, pi/2) assert normalize_theta_set(Interval.Ropen(4*pi, pi*Rational(9, 2))) == Interval.Ropen(0, pi/2) assert normalize_theta_set(Interval.open(3*pi, 5*pi)) == \ Union(Interval.Ropen(0, pi), Interval.open(pi, 2*pi)) # FiniteSet assert normalize_theta_set(FiniteSet(0, pi, 3*pi)) == FiniteSet(0, pi) assert normalize_theta_set(FiniteSet(0, pi/2, pi, 2*pi)) == FiniteSet(0, pi/2, pi) assert normalize_theta_set(FiniteSet(0, -pi/2, -pi, -2*pi)) == FiniteSet(0, pi, pi*Rational(3, 2)) assert normalize_theta_set(FiniteSet(pi*Rational(-3, 2), pi/2)) == \ FiniteSet(pi/2) assert normalize_theta_set(FiniteSet(2*pi)) == FiniteSet(0) # Unions assert normalize_theta_set(Union(Interval(0, pi/3), Interval(pi/2, pi))) == \ Union(Interval(0, pi/3), Interval(pi/2, pi)) assert normalize_theta_set(Union(Interval(0, pi), Interval(2*pi, pi*Rational(7, 3)))) == \ Interval(0, pi) # ValueError for non-real sets raises(ValueError, lambda: normalize_theta_set(S.Complexes)) # NotImplementedError for subset of reals raises(NotImplementedError, lambda: normalize_theta_set(Interval(0, 1))) # NotImplementedError without pi as coefficient raises(NotImplementedError, lambda: normalize_theta_set(Interval(1, 2*pi))) raises(NotImplementedError, lambda: normalize_theta_set(Interval(2*pi, 10))) raises(NotImplementedError, lambda: normalize_theta_set(FiniteSet(0, 3, 3*pi))) def test_ComplexRegion_FiniteSet(): x, y, z, a, b, c = symbols('x y z a b c') # Issue #9669 assert ComplexRegion(FiniteSet(a, b, c)*FiniteSet(x, y, z)) == \ FiniteSet(a + I*x, a + I*y, a + I*z, b + I*x, b + I*y, b + I*z, c + I*x, c + I*y, c + I*z) assert ComplexRegion(FiniteSet(2)*FiniteSet(3)) == FiniteSet(2 + 3*I) def test_union_RealSubSet(): assert (S.Complexes).union(Interval(1, 2)) == S.Complexes assert (S.Complexes).union(S.Integers) == S.Complexes def test_issue_9980(): c1 = ComplexRegion(Interval(1, 2)*Interval(2, 3)) c2 = ComplexRegion(Interval(1, 5)*Interval(1, 3)) R = Union(c1, c2) assert simplify(R) == ComplexRegion(Union(Interval(1, 2)*Interval(2, 3), \ Interval(1, 5)*Interval(1, 3)), False) assert c1.func(*c1.args) == c1 assert R.func(*R.args) == R def test_issue_11732(): interval12 = Interval(1, 2) finiteset1234 = FiniteSet(1, 2, 3, 4) pointComplex = Tuple(1, 5) assert (interval12 in S.Naturals) == False assert (interval12 in S.Naturals0) == False assert (interval12 in S.Integers) == False assert (interval12 in S.Complexes) == False assert (finiteset1234 in S.Naturals) == False assert (finiteset1234 in S.Naturals0) == False assert (finiteset1234 in S.Integers) == False assert (finiteset1234 in S.Complexes) == False assert (pointComplex in S.Naturals) == False assert (pointComplex in S.Naturals0) == False assert (pointComplex in S.Integers) == False assert (pointComplex in S.Complexes) == True def test_issue_11730(): unit = Interval(0, 1) square = ComplexRegion(unit ** 2) assert Union(S.Complexes, FiniteSet(oo)) != S.Complexes assert Union(S.Complexes, FiniteSet(eye(4))) != S.Complexes assert Union(unit, square) == square assert Intersection(S.Reals, square) == unit def test_issue_11938(): unit = Interval(0, 1) ival = Interval(1, 2) cr1 = ComplexRegion(ival * unit) assert Intersection(cr1, S.Reals) == ival assert Intersection(cr1, unit) == FiniteSet(1) arg1 = Interval(0, S.Pi) arg2 = FiniteSet(S.Pi) arg3 = Interval(S.Pi / 4, 3 * S.Pi / 4) cp1 = ComplexRegion(unit * arg1, polar=True) cp2 = ComplexRegion(unit * arg2, polar=True) cp3 = ComplexRegion(unit * arg3, polar=True) assert Intersection(cp1, S.Reals) == Interval(-1, 1) assert Intersection(cp2, S.Reals) == Interval(-1, 0) assert Intersection(cp3, S.Reals) == FiniteSet(0) def test_issue_11914(): a, b = Interval(0, 1), Interval(0, pi) c, d = Interval(2, 3), Interval(pi, 3 * pi / 2) cp1 = ComplexRegion(a * b, polar=True) cp2 = ComplexRegion(c * d, polar=True) assert -3 in cp1.union(cp2) assert -3 in cp2.union(cp1) assert -5 not in cp1.union(cp2) def test_issue_9543(): assert ImageSet(Lambda(x, x**2), S.Naturals).is_subset(S.Reals) def test_issue_16871(): assert ImageSet(Lambda(x, x), FiniteSet(1)) == {1} assert ImageSet(Lambda(x, x - 3), S.Integers ).intersection(S.Integers) is S.Integers @XFAIL def test_issue_16871b(): assert ImageSet(Lambda(x, x - 3), S.Integers).is_subset(S.Integers) def test_issue_18050(): assert imageset(Lambda(x, I*x + 1), S.Integers ) == ImageSet(Lambda(x, I*x + 1), S.Integers) assert imageset(Lambda(x, 3*I*x + 4 + 8*I), S.Integers ) == ImageSet(Lambda(x, 3*I*x + 4 + 2*I), S.Integers) # no 'Mod' for next 2 tests: assert imageset(Lambda(x, 2*x + 3*I), S.Integers ) == ImageSet(Lambda(x, 2*x + 3*I), S.Integers) r = Symbol('r', positive=True) assert imageset(Lambda(x, r*x + 10), S.Integers ) == ImageSet(Lambda(x, r*x + 10), S.Integers) # reduce real part: assert imageset(Lambda(x, 3*x + 8 + 5*I), S.Integers ) == ImageSet(Lambda(x, 3*x + 2 + 5*I), S.Integers) def test_Rationals(): assert S.Integers.is_subset(S.Rationals) assert S.Naturals.is_subset(S.Rationals) assert S.Naturals0.is_subset(S.Rationals) assert S.Rationals.is_subset(S.Reals) assert S.Rationals.inf is -oo assert S.Rationals.sup is oo it = iter(S.Rationals) assert [next(it) for i in range(12)] == [ 0, 1, -1, S.Half, 2, Rational(-1, 2), -2, Rational(1, 3), 3, Rational(-1, 3), -3, Rational(2, 3)] assert Basic() not in S.Rationals assert S.Half in S.Rationals assert 1.0 not in S.Rationals assert 2 in S.Rationals r = symbols('r', rational=True) assert r in S.Rationals raises(TypeError, lambda: x in S.Rationals) # issue #18134: assert S.Rationals.boundary == S.Reals assert S.Rationals.closure == S.Reals assert S.Rationals.is_open == False assert S.Rationals.is_closed == False def test_NZQRC_unions(): # check that all trivial number set unions are simplified: nbrsets = (S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals, S.Complexes) unions = (Union(a, b) for a in nbrsets for b in nbrsets) assert all(u.is_Union is False for u in unions) def test_imageset_intersection(): n = Dummy() s = ImageSet(Lambda(n, -I*(I*(2*pi*n - pi/4) + log(Abs(sqrt(-I))))), S.Integers) assert s.intersect(S.Reals) == ImageSet( Lambda(n, 2*pi*n + pi*Rational(7, 4)), S.Integers) def test_issue_17858(): assert 1 in Range(-oo, oo) assert 0 in Range(oo, -oo, -1) assert oo not in Range(-oo, oo) assert -oo not in Range(-oo, oo) def test_issue_17859(): r = Range(-oo,oo) raises(ValueError,lambda: r[::2]) raises(ValueError, lambda: r[::-2]) r = Range(oo,-oo,-1) raises(ValueError,lambda: r[::2]) raises(ValueError, lambda: r[::-2])
3c85da4793933dea2847c8a7c57c9fe2a74c38ff7a9d9ff6ae5729c0f7d7d7d8
from sympy import (Symbol, Set, Union, Interval, oo, S, sympify, nan, Max, Min, Float, FiniteSet, Intersection, imageset, I, true, false, ProductSet, sqrt, Complement, EmptySet, sin, cos, Lambda, ImageSet, pi, Pow, Contains, Sum, rootof, SymmetricDifference, Piecewise, Matrix, Range, Add, symbols, zoo, Rational) from mpmath import mpi from sympy.core.compatibility import range from sympy.core.expr import unchanged from sympy.core.relational import Eq, Ne, Le, Lt, LessThan from sympy.logic import And, Or, Xor from sympy.utilities.pytest import raises, XFAIL, warns_deprecated_sympy from sympy.abc import x, y, z, m, n def test_imageset(): ints = S.Integers assert imageset(x, x - 1, S.Naturals) is S.Naturals0 assert imageset(x, x + 1, S.Naturals0) is S.Naturals assert imageset(x, abs(x), S.Naturals0) is S.Naturals0 assert imageset(x, abs(x), S.Naturals) is S.Naturals assert imageset(x, abs(x), S.Integers) is S.Naturals0 # issue 16878a r = symbols('r', real=True) assert imageset(x, (x, x), S.Reals)._contains((1, r)) == None assert imageset(x, (x, x), S.Reals)._contains((1, 2)) == False assert (r, r) in imageset(x, (x, x), S.Reals) assert 1 + I in imageset(x, x + I, S.Reals) assert {1} not in imageset(x, (x,), S.Reals) assert (1, 1) not in imageset(x, (x,) , S.Reals) raises(TypeError, lambda: imageset(x, ints)) raises(ValueError, lambda: imageset(x, y, z, ints)) raises(ValueError, lambda: imageset(Lambda(x, cos(x)), y)) assert (1, 2) in imageset(Lambda((x, y), (x, y)), ints, ints) raises(ValueError, lambda: imageset(Lambda(x, x), ints, ints)) assert imageset(cos, ints) == ImageSet(Lambda(x, cos(x)), ints) def f(x): return cos(x) assert imageset(f, ints) == imageset(x, cos(x), ints) f = lambda x: cos(x) assert imageset(f, ints) == ImageSet(Lambda(x, cos(x)), ints) assert imageset(x, 1, ints) == FiniteSet(1) assert imageset(x, y, ints) == {y} assert imageset((x, y), (1, z), ints, S.Reals) == {(1, z)} clash = Symbol('x', integer=true) assert (str(imageset(lambda x: x + clash, Interval(-2, 1)).lamda.expr) in ('_x + x', 'x + _x')) x1, x2 = symbols("x1, x2") assert imageset(lambda x, y: Add(x, y), Interval(1, 2), Interval(2, 3)) == \ ImageSet(Lambda((x1, x2), x1+x2), Interval(1, 2), Interval(2, 3)) def test_is_empty(): for s in [S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals, S.UniversalSet]: assert s.is_empty is False assert S.EmptySet.is_empty is True def test_is_finiteset(): for s in [S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals, S.UniversalSet]: assert s.is_finite_set is False assert S.EmptySet.is_finite_set is True assert FiniteSet(1, 2).is_finite_set is True assert Interval(1, 2).is_finite_set is False assert Interval(x, y).is_finite_set is None assert ProductSet(FiniteSet(1), FiniteSet(2)).is_finite_set is True assert ProductSet(FiniteSet(1), Interval(1, 2)).is_finite_set is False assert ProductSet(FiniteSet(1), Interval(x, y)).is_finite_set is None assert Union(Interval(0, 1), Interval(2, 3)).is_finite_set is False assert Union(FiniteSet(1), Interval(2, 3)).is_finite_set is False assert Union(FiniteSet(1), FiniteSet(2)).is_finite_set is True assert Union(FiniteSet(1), Interval(x, y)).is_finite_set is None assert Intersection(Interval(x, y), FiniteSet(1)).is_finite_set is True assert Intersection(Interval(x, y), Interval(1, 2)).is_finite_set is None assert Intersection(FiniteSet(x), FiniteSet(y)).is_finite_set is True assert Complement(FiniteSet(1), Interval(x, y)).is_finite_set is True assert Complement(Interval(x, y), FiniteSet(1)).is_finite_set is None assert Complement(Interval(1, 2), FiniteSet(x)).is_finite_set is False def test_deprecated_is_EmptySet(): with warns_deprecated_sympy(): S.EmptySet.is_EmptySet def test_interval_arguments(): assert Interval(0, oo) == Interval(0, oo, False, True) assert Interval(0, oo).right_open is true assert Interval(-oo, 0) == Interval(-oo, 0, True, False) assert Interval(-oo, 0).left_open is true assert Interval(oo, -oo) == S.EmptySet assert Interval(oo, oo) == S.EmptySet assert Interval(-oo, -oo) == S.EmptySet assert Interval(oo, x) == S.EmptySet assert Interval(oo, oo) == S.EmptySet assert Interval(x, -oo) == S.EmptySet assert Interval(x, x) == {x} assert isinstance(Interval(1, 1), FiniteSet) e = Sum(x, (x, 1, 3)) assert isinstance(Interval(e, e), FiniteSet) assert Interval(1, 0) == S.EmptySet assert Interval(1, 1).measure == 0 assert Interval(1, 1, False, True) == S.EmptySet assert Interval(1, 1, True, False) == S.EmptySet assert Interval(1, 1, True, True) == S.EmptySet assert isinstance(Interval(0, Symbol('a')), Interval) assert Interval(Symbol('a', real=True, positive=True), 0) == S.EmptySet raises(ValueError, lambda: Interval(0, S.ImaginaryUnit)) raises(ValueError, lambda: Interval(0, Symbol('z', extended_real=False))) raises(NotImplementedError, lambda: Interval(0, 1, And(x, y))) raises(NotImplementedError, lambda: Interval(0, 1, False, And(x, y))) raises(NotImplementedError, lambda: Interval(0, 1, z, And(x, y))) def test_interval_symbolic_end_points(): a = Symbol('a', real=True) assert Union(Interval(0, a), Interval(0, 3)).sup == Max(a, 3) assert Union(Interval(a, 0), Interval(-3, 0)).inf == Min(-3, a) assert Interval(0, a).contains(1) == LessThan(1, a) def test_interval_is_empty(): x, y = symbols('x, y') r = Symbol('r', real=True) p = Symbol('p', positive=True) n = Symbol('n', negative=True) nn = Symbol('nn', nonnegative=True) assert Interval(1, 2).is_empty == False assert Interval(3, 3).is_empty == False # FiniteSet assert Interval(r, r).is_empty == False # FiniteSet assert Interval(r, r + nn).is_empty == False assert Interval(x, x).is_empty == False assert Interval(1, oo).is_empty == False assert Interval(-oo, oo).is_empty == False assert Interval(-oo, 1).is_empty == False assert Interval(x, y).is_empty == None assert Interval(r, oo).is_empty == False # real implies finite assert Interval(n, 0).is_empty == False assert Interval(n, 0, left_open=True).is_empty == False assert Interval(p, 0).is_empty == True # EmptySet assert Interval(nn, 0).is_empty == None assert Interval(n, p).is_empty == False assert Interval(0, p, left_open=True).is_empty == False assert Interval(0, p, right_open=True).is_empty == False assert Interval(0, nn, left_open=True).is_empty == None assert Interval(0, nn, right_open=True).is_empty == None def test_union(): assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3) assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3) assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4) assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3) assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3) assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \ Interval(1, 3, False, True) assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3) assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3) assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \ Interval(1, 3, True) assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \ Interval(1, 3, True, True) assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \ Interval(1, 3, True) assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3) assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \ Interval(1, 3) assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \ Interval(1, 3) assert Union(Interval(1, 2), S.EmptySet) == Interval(1, 2) assert Union(S.EmptySet) == S.EmptySet assert Union(Interval(0, 1), *[FiniteSet(1.0/n) for n in range(1, 10)]) == \ Interval(0, 1) assert Interval(1, 2).union(Interval(2, 3)) == \ Interval(1, 2) + Interval(2, 3) assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3) assert Union(Set()) == Set() assert FiniteSet(1) + FiniteSet(2) + FiniteSet(3) == FiniteSet(1, 2, 3) assert FiniteSet('ham') + FiniteSet('eggs') == FiniteSet('ham', 'eggs') assert FiniteSet(1, 2, 3) + S.EmptySet == FiniteSet(1, 2, 3) assert FiniteSet(1, 2, 3) & FiniteSet(2, 3, 4) == FiniteSet(2, 3) assert FiniteSet(1, 2, 3) | FiniteSet(2, 3, 4) == FiniteSet(1, 2, 3, 4) x = Symbol("x") y = Symbol("y") z = Symbol("z") assert S.EmptySet | FiniteSet(x, FiniteSet(y, z)) == \ FiniteSet(x, FiniteSet(y, z)) # Test that Intervals and FiniteSets play nicely assert Interval(1, 3) + FiniteSet(2) == Interval(1, 3) assert Interval(1, 3, True, True) + FiniteSet(3) == \ Interval(1, 3, True, False) X = Interval(1, 3) + FiniteSet(5) Y = Interval(1, 2) + FiniteSet(3) XandY = X.intersect(Y) assert 2 in X and 3 in X and 3 in XandY assert XandY.is_subset(X) and XandY.is_subset(Y) raises(TypeError, lambda: Union(1, 2, 3)) assert X.is_iterable is False # issue 7843 assert Union(S.EmptySet, FiniteSet(-sqrt(-I), sqrt(-I))) == \ FiniteSet(-sqrt(-I), sqrt(-I)) assert Union(S.Reals, S.Integers) == S.Reals def test_union_iter(): # Use Range because it is ordered u = Union(Range(3), Range(5), Range(4), evaluate=False) # Round robin assert list(u) == [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4] def test_union_is_empty(): assert (Interval(x, y) + FiniteSet(1)).is_empty == False assert (Interval(x, y) + Interval(-x, y)).is_empty == None def test_difference(): assert Interval(1, 3) - Interval(1, 2) == Interval(2, 3, True) assert Interval(1, 3) - Interval(2, 3) == Interval(1, 2, False, True) assert Interval(1, 3, True) - Interval(2, 3) == Interval(1, 2, True, True) assert Interval(1, 3, True) - Interval(2, 3, True) == \ Interval(1, 2, True, False) assert Interval(0, 2) - FiniteSet(1) == \ Union(Interval(0, 1, False, True), Interval(1, 2, True, False)) # issue #18119 assert S.Reals - FiniteSet(I) == S.Reals assert S.Reals - FiniteSet(-I, I) == S.Reals assert Interval(0, 10) - FiniteSet(-I, I) == Interval(0, 10) assert Interval(0, 10) - FiniteSet(1, I) == Union( Interval.Ropen(0, 1), Interval.Lopen(1, 10)) assert S.Reals - FiniteSet(1, 2 + I, x, y**2) == Complement( Union(Interval.open(-oo, 1), Interval.open(1, oo)), FiniteSet(x, y**2), evaluate=False) assert FiniteSet(1, 2, 3) - FiniteSet(2) == FiniteSet(1, 3) assert FiniteSet('ham', 'eggs') - FiniteSet('eggs') == FiniteSet('ham') assert FiniteSet(1, 2, 3, 4) - Interval(2, 10, True, False) == \ FiniteSet(1, 2) assert FiniteSet(1, 2, 3, 4) - S.EmptySet == FiniteSet(1, 2, 3, 4) assert Union(Interval(0, 2), FiniteSet(2, 3, 4)) - Interval(1, 3) == \ Union(Interval(0, 1, False, True), FiniteSet(4)) assert -1 in S.Reals - S.Naturals def test_Complement(): A = FiniteSet(1, 3, 4) B = FiniteSet(3, 4) C = Interval(1, 3) D = Interval(1, 2) assert Complement(A, B, evaluate=False).is_iterable is True assert Complement(A, C, evaluate=False).is_iterable is True assert Complement(C, D, evaluate=False).is_iterable is None assert FiniteSet(*Complement(A, B, evaluate=False)) == FiniteSet(1) assert FiniteSet(*Complement(A, C, evaluate=False)) == FiniteSet(4) raises(TypeError, lambda: FiniteSet(*Complement(C, A, evaluate=False))) assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True) assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1) assert Complement(Union(Interval(0, 2), FiniteSet(2, 3, 4)), Interval(1, 3)) == \ Union(Interval(0, 1, False, True), FiniteSet(4)) assert not 3 in Complement(Interval(0, 5), Interval(1, 4), evaluate=False) assert -1 in Complement(S.Reals, S.Naturals, evaluate=False) assert not 1 in Complement(S.Reals, S.Naturals, evaluate=False) assert Complement(S.Integers, S.UniversalSet) == EmptySet assert S.UniversalSet.complement(S.Integers) == EmptySet assert (not 0 in S.Reals.intersect(S.Integers - FiniteSet(0))) assert S.EmptySet - S.Integers == S.EmptySet assert (S.Integers - FiniteSet(0)) - FiniteSet(1) == S.Integers - FiniteSet(0, 1) assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \ Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi)) # issue 12712 assert Complement(FiniteSet(x, y, 2), Interval(-10, 10)) == \ Complement(FiniteSet(x, y), Interval(-10, 10)) A = FiniteSet(*symbols('a:c')) B = FiniteSet(*symbols('d:f')) assert unchanged(Complement, ProductSet(A, A), B) A2 = ProductSet(A, A) B3 = ProductSet(B, B, B) assert A2 - B3 == A2 assert B3 - A2 == B3 def test_set_operations_nonsets(): '''Tests that e.g. FiniteSet(1) * 2 raises TypeError''' ops = [ lambda a, b: a + b, lambda a, b: a - b, lambda a, b: a * b, lambda a, b: a / b, lambda a, b: a // b, lambda a, b: a | b, lambda a, b: a & b, lambda a, b: a ^ b, # FiniteSet(1) ** 2 gives a ProductSet #lambda a, b: a ** b, ] Sx = FiniteSet(x) Sy = FiniteSet(y) sets = [ {1}, FiniteSet(1), Interval(1, 2), Union(Sx, Interval(1, 2)), Intersection(Sx, Sy), Complement(Sx, Sy), ProductSet(Sx, Sy), S.EmptySet, ] nums = [0, 1, 2, S(0), S(1), S(2)] for s in sets: for n in nums: for op in ops: raises(TypeError, lambda : op(s, n)) raises(TypeError, lambda : op(n, s)) raises(TypeError, lambda: s ** object()) raises(TypeError, lambda: s ** {1}) def test_complement(): assert Interval(0, 1).complement(S.Reals) == \ Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True)) assert Interval(0, 1, True, False).complement(S.Reals) == \ Union(Interval(-oo, 0, True, False), Interval(1, oo, True, True)) assert Interval(0, 1, False, True).complement(S.Reals) == \ Union(Interval(-oo, 0, True, True), Interval(1, oo, False, True)) assert Interval(0, 1, True, True).complement(S.Reals) == \ Union(Interval(-oo, 0, True, False), Interval(1, oo, False, True)) assert S.UniversalSet.complement(S.EmptySet) == S.EmptySet assert S.UniversalSet.complement(S.Reals) == S.EmptySet assert S.UniversalSet.complement(S.UniversalSet) == S.EmptySet assert S.EmptySet.complement(S.Reals) == S.Reals assert Union(Interval(0, 1), Interval(2, 3)).complement(S.Reals) == \ Union(Interval(-oo, 0, True, True), Interval(1, 2, True, True), Interval(3, oo, True, True)) assert FiniteSet(0).complement(S.Reals) == \ Union(Interval(-oo, 0, True, True), Interval(0, oo, True, True)) assert (FiniteSet(5) + Interval(S.NegativeInfinity, 0)).complement(S.Reals) == \ Interval(0, 5, True, True) + Interval(5, S.Infinity, True, True) assert FiniteSet(1, 2, 3).complement(S.Reals) == \ Interval(S.NegativeInfinity, 1, True, True) + \ Interval(1, 2, True, True) + Interval(2, 3, True, True) +\ Interval(3, S.Infinity, True, True) assert FiniteSet(x).complement(S.Reals) == Complement(S.Reals, FiniteSet(x)) assert FiniteSet(0, x).complement(S.Reals) == Complement(Interval(-oo, 0, True, True) + Interval(0, oo, True, True) ,FiniteSet(x), evaluate=False) square = Interval(0, 1) * Interval(0, 1) notsquare = square.complement(S.Reals*S.Reals) assert all(pt in square for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)]) assert not any( pt in notsquare for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)]) assert not any(pt in square for pt in [(-1, 0), (1.5, .5), (10, 10)]) assert all(pt in notsquare for pt in [(-1, 0), (1.5, .5), (10, 10)]) def test_intersect1(): assert all(S.Integers.intersection(i) is i for i in (S.Naturals, S.Naturals0)) assert all(i.intersection(S.Integers) is i for i in (S.Naturals, S.Naturals0)) s = S.Naturals0 assert S.Naturals.intersection(s) is S.Naturals assert s.intersection(S.Naturals) is S.Naturals x = Symbol('x') assert Interval(0, 2).intersect(Interval(1, 2)) == Interval(1, 2) assert Interval(0, 2).intersect(Interval(1, 2, True)) == \ Interval(1, 2, True) assert Interval(0, 2, True).intersect(Interval(1, 2)) == \ Interval(1, 2, False, False) assert Interval(0, 2, True, True).intersect(Interval(1, 2)) == \ Interval(1, 2, False, True) assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \ Union(Interval(0, 1), Interval(2, 2)) assert FiniteSet(1, 2).intersect(FiniteSet(1, 2, 3)) == FiniteSet(1, 2) assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x) assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \ FiniteSet('ham') assert FiniteSet(1, 2, 3, 4, 5).intersect(S.EmptySet) == S.EmptySet assert Interval(0, 5).intersect(FiniteSet(1, 3)) == FiniteSet(1, 3) assert Interval(0, 1, True, True).intersect(FiniteSet(1)) == S.EmptySet assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2)) == \ Union(Interval(1, 1), Interval(2, 2)) assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(0, 2)) == \ Union(Interval(0, 1), Interval(2, 2)) assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2, True, True)) == \ S.EmptySet assert Union(Interval(0, 1), Interval(2, 3)).intersect(S.EmptySet) == \ S.EmptySet assert Union(Interval(0, 5), FiniteSet('ham')).intersect(FiniteSet(2, 3, 4, 5, 6)) == \ Intersection(FiniteSet(2, 3, 4, 5, 6), Union(FiniteSet('ham'), Interval(0, 5))) assert Intersection(FiniteSet(1, 2, 3), Interval(2, x), Interval(3, y)) == \ Intersection(FiniteSet(3), Interval(2, x), Interval(3, y), evaluate=False) assert Intersection(FiniteSet(1, 2), Interval(0, 3), Interval(x, y)) == \ Intersection({1, 2}, Interval(x, y), evaluate=False) assert Intersection(FiniteSet(1, 2, 4), Interval(0, 3), Interval(x, y)) == \ Intersection({1, 2}, Interval(x, y), evaluate=False) # XXX: Is the real=True necessary here? # https://github.com/sympy/sympy/issues/17532 m, n = symbols('m, n', real=True) assert Intersection(FiniteSet(m), FiniteSet(m, n), Interval(m, m+1)) == \ FiniteSet(m) # issue 8217 assert Intersection(FiniteSet(x), FiniteSet(y)) == \ Intersection(FiniteSet(x), FiniteSet(y), evaluate=False) assert FiniteSet(x).intersect(S.Reals) == \ Intersection(S.Reals, FiniteSet(x), evaluate=False) # tests for the intersection alias assert Interval(0, 5).intersection(FiniteSet(1, 3)) == FiniteSet(1, 3) assert Interval(0, 1, True, True).intersection(FiniteSet(1)) == S.EmptySet assert Union(Interval(0, 1), Interval(2, 3)).intersection(Interval(1, 2)) == \ Union(Interval(1, 1), Interval(2, 2)) def test_intersection(): # iterable i = Intersection(FiniteSet(1, 2, 3), Interval(2, 5), evaluate=False) assert i.is_iterable assert set(i) == {S(2), S(3)} # challenging intervals x = Symbol('x', real=True) i = Intersection(Interval(0, 3), Interval(x, 6)) assert (5 in i) is False raises(TypeError, lambda: 2 in i) # Singleton special cases assert Intersection(Interval(0, 1), S.EmptySet) == S.EmptySet assert Intersection(Interval(-oo, oo), Interval(-oo, x)) == Interval(-oo, x) # Products line = Interval(0, 5) i = Intersection(line**2, line**3, evaluate=False) assert (2, 2) not in i assert (2, 2, 2) not in i raises(TypeError, lambda: list(i)) a = Intersection(Intersection(S.Integers, S.Naturals, evaluate=False), S.Reals, evaluate=False) assert a._argset == frozenset([Intersection(S.Naturals, S.Integers, evaluate=False), S.Reals]) assert Intersection(S.Complexes, FiniteSet(S.ComplexInfinity)) == S.EmptySet # issue 12178 assert Intersection() == S.UniversalSet # issue 16987 assert Intersection({1}, {1}, {x}) == Intersection({1}, {x}) def test_issue_9623(): n = Symbol('n') a = S.Reals b = Interval(0, oo) c = FiniteSet(n) assert Intersection(a, b, c) == Intersection(b, c) assert Intersection(Interval(1, 2), Interval(3, 4), FiniteSet(n)) == EmptySet def test_is_disjoint(): assert Interval(0, 2).is_disjoint(Interval(1, 2)) == False assert Interval(0, 2).is_disjoint(Interval(3, 4)) == True def test_ProductSet__len__(): A = FiniteSet(1, 2) B = FiniteSet(1, 2, 3) assert ProductSet(A).__len__() == 2 assert ProductSet(A).__len__() is not S(2) assert ProductSet(A, B).__len__() == 6 assert ProductSet(A, B).__len__() is not S(6) def test_ProductSet(): # ProductSet is always a set of Tuples assert ProductSet(S.Reals) == S.Reals ** 1 assert ProductSet(S.Reals, S.Reals) == S.Reals ** 2 assert ProductSet(S.Reals, S.Reals, S.Reals) == S.Reals ** 3 assert ProductSet(S.Reals) != S.Reals assert ProductSet(S.Reals, S.Reals) == S.Reals * S.Reals assert ProductSet(S.Reals, S.Reals, S.Reals) != S.Reals * S.Reals * S.Reals assert ProductSet(S.Reals, S.Reals, S.Reals) == (S.Reals * S.Reals * S.Reals).flatten() assert 1 not in ProductSet(S.Reals) assert (1,) in ProductSet(S.Reals) assert 1 not in ProductSet(S.Reals, S.Reals) assert (1, 2) in ProductSet(S.Reals, S.Reals) assert (1, I) not in ProductSet(S.Reals, S.Reals) assert (1, 2, 3) in ProductSet(S.Reals, S.Reals, S.Reals) assert (1, 2, 3) in S.Reals ** 3 assert (1, 2, 3) not in S.Reals * S.Reals * S.Reals assert ((1, 2), 3) in S.Reals * S.Reals * S.Reals assert (1, (2, 3)) not in S.Reals * S.Reals * S.Reals assert (1, (2, 3)) in S.Reals * (S.Reals * S.Reals) assert ProductSet() == FiniteSet(()) assert ProductSet(S.Reals, S.EmptySet) == S.EmptySet # See GH-17458 for ni in range(5): Rn = ProductSet(*(S.Reals,) * ni) assert (1,) * ni in Rn assert 1 not in Rn assert (S.Reals * S.Reals) * S.Reals != S.Reals * (S.Reals * S.Reals) S1 = S.Reals S2 = S.Integers x1 = pi x2 = 3 assert x1 in S1 assert x2 in S2 assert (x1, x2) in S1 * S2 S3 = S1 * S2 x3 = (x1, x2) assert x3 in S3 assert (x3, x3) in S3 * S3 assert x3 + x3 not in S3 * S3 raises(ValueError, lambda: S.Reals**-1) with warns_deprecated_sympy(): ProductSet(FiniteSet(s) for s in range(2)) raises(TypeError, lambda: ProductSet(None)) S1 = FiniteSet(1, 2) S2 = FiniteSet(3, 4) S3 = ProductSet(S1, S2) assert (S3.as_relational(x, y) == And(S1.as_relational(x), S2.as_relational(y)) == And(Or(Eq(x, 1), Eq(x, 2)), Or(Eq(y, 3), Eq(y, 4)))) raises(ValueError, lambda: S3.as_relational(x)) raises(ValueError, lambda: S3.as_relational(x, 1)) raises(ValueError, lambda: ProductSet(Interval(0, 1)).as_relational(x, y)) Z2 = ProductSet(S.Integers, S.Integers) assert Z2.contains((1, 2)) is S.true assert Z2.contains((1,)) is S.false assert Z2.contains(x) == Contains(x, Z2, evaluate=False) assert Z2.contains(x).subs(x, 1) is S.false assert Z2.contains((x, 1)).subs(x, 2) is S.true assert Z2.contains((x, y)) == Contains((x, y), Z2, evaluate=False) assert unchanged(Contains, (x, y), Z2) assert Contains((1, 2), Z2) is S.true def test_ProductSet_of_single_arg_is_not_arg(): assert unchanged(ProductSet, Interval(0, 1)) assert ProductSet(Interval(0, 1)) != Interval(0, 1) def test_ProductSet_is_empty(): assert ProductSet(S.Integers, S.Reals).is_empty == False assert ProductSet(Interval(x, 1), S.Reals).is_empty == None def test_interval_subs(): a = Symbol('a', real=True) assert Interval(0, a).subs(a, 2) == Interval(0, 2) assert Interval(a, 0).subs(a, 2) == S.EmptySet def test_interval_to_mpi(): assert Interval(0, 1).to_mpi() == mpi(0, 1) assert Interval(0, 1, True, False).to_mpi() == mpi(0, 1) assert type(Interval(0, 1).to_mpi()) == type(mpi(0, 1)) def test_measure(): a = Symbol('a', real=True) assert Interval(1, 3).measure == 2 assert Interval(0, a).measure == a assert Interval(1, a).measure == a - 1 assert Union(Interval(1, 2), Interval(3, 4)).measure == 2 assert Union(Interval(1, 2), Interval(3, 4), FiniteSet(5, 6, 7)).measure \ == 2 assert FiniteSet(1, 2, oo, a, -oo, -5).measure == 0 assert S.EmptySet.measure == 0 square = Interval(0, 10) * Interval(0, 10) offsetsquare = Interval(5, 15) * Interval(5, 15) band = Interval(-oo, oo) * Interval(2, 4) assert square.measure == offsetsquare.measure == 100 assert (square + offsetsquare).measure == 175 # there is some overlap assert (square - offsetsquare).measure == 75 assert (square * FiniteSet(1, 2, 3)).measure == 0 assert (square.intersect(band)).measure == 20 assert (square + band).measure is oo assert (band * FiniteSet(1, 2, 3)).measure is nan def test_is_subset(): assert Interval(0, 1).is_subset(Interval(0, 2)) is True assert Interval(0, 3).is_subset(Interval(0, 2)) is False assert Interval(0, 1).is_subset(FiniteSet(0, 1)) is False assert FiniteSet(1, 2).is_subset(FiniteSet(1, 2, 3, 4)) assert FiniteSet(4, 5).is_subset(FiniteSet(1, 2, 3, 4)) is False assert FiniteSet(1).is_subset(Interval(0, 2)) assert FiniteSet(1, 2).is_subset(Interval(0, 2, True, True)) is False assert (Interval(1, 2) + FiniteSet(3)).is_subset( (Interval(0, 2, False, True) + FiniteSet(2, 3))) assert Interval(3, 4).is_subset(Union(Interval(0, 1), Interval(2, 5))) is True assert Interval(3, 6).is_subset(Union(Interval(0, 1), Interval(2, 5))) is False assert FiniteSet(1, 2, 3, 4).is_subset(Interval(0, 5)) is True assert S.EmptySet.is_subset(FiniteSet(1, 2, 3)) is True assert Interval(0, 1).is_subset(S.EmptySet) is False assert S.EmptySet.is_subset(S.EmptySet) is True raises(ValueError, lambda: S.EmptySet.is_subset(1)) # tests for the issubset alias assert FiniteSet(1, 2, 3, 4).issubset(Interval(0, 5)) is True assert S.EmptySet.issubset(FiniteSet(1, 2, 3)) is True assert S.Naturals.is_subset(S.Integers) assert S.Naturals0.is_subset(S.Integers) assert FiniteSet(x).is_subset(FiniteSet(y)) is None assert FiniteSet(x).is_subset(FiniteSet(y).subs(y, x)) is True assert FiniteSet(x).is_subset(FiniteSet(y).subs(y, x+1)) is False assert Interval(0, 1).is_subset(Interval(0, 1, left_open=True)) is False assert Interval(-2, 3).is_subset(Union(Interval(-oo, -2), Interval(3, oo))) is False n = Symbol('n', integer=True) assert Range(-3, 4, 1).is_subset(FiniteSet(-10, 10)) is False assert Range(S(10)**100).is_subset(FiniteSet(0, 1, 2)) is False assert Range(6, 0, -2).is_subset(FiniteSet(2, 4, 6)) is True assert Range(1, oo).is_subset(FiniteSet(1, 2)) is False assert Range(-oo, 1).is_subset(FiniteSet(1)) is False assert Range(3).is_subset(FiniteSet(0, 1, n)) is None assert Range(n, n + 2).is_subset(FiniteSet(n, n + 1)) is True assert Range(5).is_subset(Interval(0, 4, right_open=True)) is False def test_is_proper_subset(): assert Interval(0, 1).is_proper_subset(Interval(0, 2)) is True assert Interval(0, 3).is_proper_subset(Interval(0, 2)) is False assert S.EmptySet.is_proper_subset(FiniteSet(1, 2, 3)) is True raises(ValueError, lambda: Interval(0, 1).is_proper_subset(0)) def test_is_superset(): assert Interval(0, 1).is_superset(Interval(0, 2)) == False assert Interval(0, 3).is_superset(Interval(0, 2)) assert FiniteSet(1, 2).is_superset(FiniteSet(1, 2, 3, 4)) == False assert FiniteSet(4, 5).is_superset(FiniteSet(1, 2, 3, 4)) == False assert FiniteSet(1).is_superset(Interval(0, 2)) == False assert FiniteSet(1, 2).is_superset(Interval(0, 2, True, True)) == False assert (Interval(1, 2) + FiniteSet(3)).is_superset( (Interval(0, 2, False, True) + FiniteSet(2, 3))) == False assert Interval(3, 4).is_superset(Union(Interval(0, 1), Interval(2, 5))) == False assert FiniteSet(1, 2, 3, 4).is_superset(Interval(0, 5)) == False assert S.EmptySet.is_superset(FiniteSet(1, 2, 3)) == False assert Interval(0, 1).is_superset(S.EmptySet) == True assert S.EmptySet.is_superset(S.EmptySet) == True raises(ValueError, lambda: S.EmptySet.is_superset(1)) # tests for the issuperset alias assert Interval(0, 1).issuperset(S.EmptySet) == True assert S.EmptySet.issuperset(S.EmptySet) == True def test_is_proper_superset(): assert Interval(0, 1).is_proper_superset(Interval(0, 2)) is False assert Interval(0, 3).is_proper_superset(Interval(0, 2)) is True assert FiniteSet(1, 2, 3).is_proper_superset(S.EmptySet) is True raises(ValueError, lambda: Interval(0, 1).is_proper_superset(0)) def test_contains(): assert Interval(0, 2).contains(1) is S.true assert Interval(0, 2).contains(3) is S.false assert Interval(0, 2, True, False).contains(0) is S.false assert Interval(0, 2, True, False).contains(2) is S.true assert Interval(0, 2, False, True).contains(0) is S.true assert Interval(0, 2, False, True).contains(2) is S.false assert Interval(0, 2, True, True).contains(0) is S.false assert Interval(0, 2, True, True).contains(2) is S.false assert (Interval(0, 2) in Interval(0, 2)) is False assert FiniteSet(1, 2, 3).contains(2) is S.true assert FiniteSet(1, 2, Symbol('x')).contains(Symbol('x')) is S.true assert FiniteSet(y)._contains(x) is None raises(TypeError, lambda: x in FiniteSet(y)) assert FiniteSet({x, y})._contains({x}) is None assert FiniteSet({x, y}).subs(y, x)._contains({x}) is True assert FiniteSet({x, y}).subs(y, x+1)._contains({x}) is False # issue 8197 from sympy.abc import a, b assert isinstance(FiniteSet(b).contains(-a), Contains) assert isinstance(FiniteSet(b).contains(a), Contains) assert isinstance(FiniteSet(a).contains(1), Contains) raises(TypeError, lambda: 1 in FiniteSet(a)) # issue 8209 rad1 = Pow(Pow(2, Rational(1, 3)) - 1, Rational(1, 3)) rad2 = Pow(Rational(1, 9), Rational(1, 3)) - Pow(Rational(2, 9), Rational(1, 3)) + Pow(Rational(4, 9), Rational(1, 3)) s1 = FiniteSet(rad1) s2 = FiniteSet(rad2) assert s1 - s2 == S.EmptySet items = [1, 2, S.Infinity, S('ham'), -1.1] fset = FiniteSet(*items) assert all(item in fset for item in items) assert all(fset.contains(item) is S.true for item in items) assert Union(Interval(0, 1), Interval(2, 5)).contains(3) is S.true assert Union(Interval(0, 1), Interval(2, 5)).contains(6) is S.false assert Union(Interval(0, 1), FiniteSet(2, 5)).contains(3) is S.false assert S.EmptySet.contains(1) is S.false assert FiniteSet(rootof(x**3 + x - 1, 0)).contains(S.Infinity) is S.false assert rootof(x**5 + x**3 + 1, 0) in S.Reals assert not rootof(x**5 + x**3 + 1, 1) in S.Reals # non-bool results assert Union(Interval(1, 2), Interval(3, 4)).contains(x) == \ Or(And(S.One <= x, x <= 2), And(S(3) <= x, x <= 4)) assert Intersection(Interval(1, x), Interval(2, 3)).contains(y) == \ And(y <= 3, y <= x, S.One <= y, S(2) <= y) assert (S.Complexes).contains(S.ComplexInfinity) == S.false def test_interval_symbolic(): x = Symbol('x') e = Interval(0, 1) assert e.contains(x) == And(S.Zero <= x, x <= 1) raises(TypeError, lambda: x in e) e = Interval(0, 1, True, True) assert e.contains(x) == And(S.Zero < x, x < 1) def test_union_contains(): x = Symbol('x') i1 = Interval(0, 1) i2 = Interval(2, 3) i3 = Union(i1, i2) assert i3.as_relational(x) == Or(And(S.Zero <= x, x <= 1), And(S(2) <= x, x <= 3)) raises(TypeError, lambda: x in i3) e = i3.contains(x) assert e == i3.as_relational(x) assert e.subs(x, -0.5) is false assert e.subs(x, 0.5) is true assert e.subs(x, 1.5) is false assert e.subs(x, 2.5) is true assert e.subs(x, 3.5) is false U = Interval(0, 2, True, True) + Interval(10, oo) + FiniteSet(-1, 2, 5, 6) assert all(el not in U for el in [0, 4, -oo]) assert all(el in U for el in [2, 5, 10]) def test_is_number(): assert Interval(0, 1).is_number is False assert Set().is_number is False def test_Interval_is_left_unbounded(): assert Interval(3, 4).is_left_unbounded is False assert Interval(-oo, 3).is_left_unbounded is True assert Interval(Float("-inf"), 3).is_left_unbounded is True def test_Interval_is_right_unbounded(): assert Interval(3, 4).is_right_unbounded is False assert Interval(3, oo).is_right_unbounded is True assert Interval(3, Float("+inf")).is_right_unbounded is True def test_Interval_as_relational(): x = Symbol('x') assert Interval(-1, 2, False, False).as_relational(x) == \ And(Le(-1, x), Le(x, 2)) assert Interval(-1, 2, True, False).as_relational(x) == \ And(Lt(-1, x), Le(x, 2)) assert Interval(-1, 2, False, True).as_relational(x) == \ And(Le(-1, x), Lt(x, 2)) assert Interval(-1, 2, True, True).as_relational(x) == \ And(Lt(-1, x), Lt(x, 2)) assert Interval(-oo, 2, right_open=False).as_relational(x) == And(Lt(-oo, x), Le(x, 2)) assert Interval(-oo, 2, right_open=True).as_relational(x) == And(Lt(-oo, x), Lt(x, 2)) assert Interval(-2, oo, left_open=False).as_relational(x) == And(Le(-2, x), Lt(x, oo)) assert Interval(-2, oo, left_open=True).as_relational(x) == And(Lt(-2, x), Lt(x, oo)) assert Interval(-oo, oo).as_relational(x) == And(Lt(-oo, x), Lt(x, oo)) x = Symbol('x', real=True) y = Symbol('y', real=True) assert Interval(x, y).as_relational(x) == (x <= y) assert Interval(y, x).as_relational(x) == (y <= x) def test_Finite_as_relational(): x = Symbol('x') y = Symbol('y') assert FiniteSet(1, 2).as_relational(x) == Or(Eq(x, 1), Eq(x, 2)) assert FiniteSet(y, -5).as_relational(x) == Or(Eq(x, y), Eq(x, -5)) def test_Union_as_relational(): x = Symbol('x') assert (Interval(0, 1) + FiniteSet(2)).as_relational(x) == \ Or(And(Le(0, x), Le(x, 1)), Eq(x, 2)) assert (Interval(0, 1, True, True) + FiniteSet(1)).as_relational(x) == \ And(Lt(0, x), Le(x, 1)) def test_Intersection_as_relational(): x = Symbol('x') assert (Intersection(Interval(0, 1), FiniteSet(2), evaluate=False).as_relational(x) == And(And(Le(0, x), Le(x, 1)), Eq(x, 2))) def test_Complement_as_relational(): x = Symbol('x') expr = Complement(Interval(0, 1), FiniteSet(2), evaluate=False) assert expr.as_relational(x) == \ And(Le(0, x), Le(x, 1), Ne(x, 2)) @XFAIL def test_Complement_as_relational_fail(): x = Symbol('x') expr = Complement(Interval(0, 1), FiniteSet(2), evaluate=False) # XXX This example fails because 0 <= x changes to x >= 0 # during the evaluation. assert expr.as_relational(x) == \ (0 <= x) & (x <= 1) & Ne(x, 2) def test_SymmetricDifference_as_relational(): x = Symbol('x') expr = SymmetricDifference(Interval(0, 1), FiniteSet(2), evaluate=False) assert expr.as_relational(x) == Xor(Eq(x, 2), Le(0, x) & Le(x, 1)) def test_EmptySet(): assert S.EmptySet.as_relational(Symbol('x')) is S.false assert S.EmptySet.intersect(S.UniversalSet) == S.EmptySet assert S.EmptySet.boundary == S.EmptySet def test_finite_basic(): x = Symbol('x') A = FiniteSet(1, 2, 3) B = FiniteSet(3, 4, 5) AorB = Union(A, B) AandB = A.intersect(B) assert A.is_subset(AorB) and B.is_subset(AorB) assert AandB.is_subset(A) assert AandB == FiniteSet(3) assert A.inf == 1 and A.sup == 3 assert AorB.inf == 1 and AorB.sup == 5 assert FiniteSet(x, 1, 5).sup == Max(x, 5) assert FiniteSet(x, 1, 5).inf == Min(x, 1) # issue 7335 assert FiniteSet(S.EmptySet) != S.EmptySet assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3) assert FiniteSet((1, 2, 3)) != FiniteSet(1, 2, 3) # Ensure a variety of types can exist in a FiniteSet assert FiniteSet((1, 2), Float, A, -5, x, 'eggs', x**2, Interval) assert (A > B) is False assert (A >= B) is False assert (A < B) is False assert (A <= B) is False assert AorB > A and AorB > B assert AorB >= A and AorB >= B assert A >= A and A <= A assert A >= AandB and B >= AandB assert A > AandB and B > AandB assert FiniteSet(1.0) == FiniteSet(1) def test_product_basic(): H, T = 'H', 'T' unit_line = Interval(0, 1) d6 = FiniteSet(1, 2, 3, 4, 5, 6) d4 = FiniteSet(1, 2, 3, 4) coin = FiniteSet(H, T) square = unit_line * unit_line assert (0, 0) in square assert 0 not in square assert (H, T) in coin ** 2 assert (.5, .5, .5) in (square * unit_line).flatten() assert ((.5, .5), .5) in square * unit_line assert (H, 3, 3) in (coin * d6 * d6).flatten() assert ((H, 3), 3) in coin * d6 * d6 HH, TT = sympify(H), sympify(T) assert set(coin**2) == set(((HH, HH), (HH, TT), (TT, HH), (TT, TT))) assert (d4*d4).is_subset(d6*d6) assert square.complement(Interval(-oo, oo)*Interval(-oo, oo)) == Union( (Interval(-oo, 0, True, True) + Interval(1, oo, True, True))*Interval(-oo, oo), Interval(-oo, oo)*(Interval(-oo, 0, True, True) + Interval(1, oo, True, True))) assert (Interval(-5, 5)**3).is_subset(Interval(-10, 10)**3) assert not (Interval(-10, 10)**3).is_subset(Interval(-5, 5)**3) assert not (Interval(-5, 5)**2).is_subset(Interval(-10, 10)**3) assert (Interval(.2, .5)*FiniteSet(.5)).is_subset(square) # segment in square assert len(coin*coin*coin) == 8 assert len(S.EmptySet*S.EmptySet) == 0 assert len(S.EmptySet*coin) == 0 raises(TypeError, lambda: len(coin*Interval(0, 2))) def test_real(): x = Symbol('x', real=True, finite=True) I = Interval(0, 5) J = Interval(10, 20) A = FiniteSet(1, 2, 30, x, S.Pi) B = FiniteSet(-4, 0) C = FiniteSet(100) D = FiniteSet('Ham', 'Eggs') assert all(s.is_subset(S.Reals) for s in [I, J, A, B, C]) assert not D.is_subset(S.Reals) assert all((a + b).is_subset(S.Reals) for a in [I, J, A, B, C] for b in [I, J, A, B, C]) assert not any((a + D).is_subset(S.Reals) for a in [I, J, A, B, C, D]) assert not (I + A + D).is_subset(S.Reals) def test_supinf(): x = Symbol('x', real=True) y = Symbol('y', real=True) assert (Interval(0, 1) + FiniteSet(2)).sup == 2 assert (Interval(0, 1) + FiniteSet(2)).inf == 0 assert (Interval(0, 1) + FiniteSet(x)).sup == Max(1, x) assert (Interval(0, 1) + FiniteSet(x)).inf == Min(0, x) assert FiniteSet(5, 1, x).sup == Max(5, x) assert FiniteSet(5, 1, x).inf == Min(1, x) assert FiniteSet(5, 1, x, y).sup == Max(5, x, y) assert FiniteSet(5, 1, x, y).inf == Min(1, x, y) assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).sup == \ S.Infinity assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).inf == \ S.NegativeInfinity assert FiniteSet('Ham', 'Eggs').sup == Max('Ham', 'Eggs') def test_universalset(): U = S.UniversalSet x = Symbol('x') assert U.as_relational(x) is S.true assert U.union(Interval(2, 4)) == U assert U.intersect(Interval(2, 4)) == Interval(2, 4) assert U.measure is S.Infinity assert U.boundary == S.EmptySet assert U.contains(0) is S.true def test_Union_of_ProductSets_shares(): line = Interval(0, 2) points = FiniteSet(0, 1, 2) assert Union(line * line, line * points) == line * line def test_Interval_free_symbols(): # issue 6211 assert Interval(0, 1).free_symbols == set() x = Symbol('x', real=True) assert Interval(0, x).free_symbols == {x} def test_image_interval(): from sympy.core.numbers import Rational x = Symbol('x', real=True) a = Symbol('a', real=True) assert imageset(x, 2*x, Interval(-2, 1)) == Interval(-4, 2) assert imageset(x, 2*x, Interval(-2, 1, True, False)) == \ Interval(-4, 2, True, False) assert imageset(x, x**2, Interval(-2, 1, True, False)) == \ Interval(0, 4, False, True) assert imageset(x, x**2, Interval(-2, 1)) == Interval(0, 4) assert imageset(x, x**2, Interval(-2, 1, True, False)) == \ Interval(0, 4, False, True) assert imageset(x, x**2, Interval(-2, 1, True, True)) == \ Interval(0, 4, False, True) assert imageset(x, (x - 2)**2, Interval(1, 3)) == Interval(0, 1) assert imageset(x, 3*x**4 - 26*x**3 + 78*x**2 - 90*x, Interval(0, 4)) == \ Interval(-35, 0) # Multiple Maxima assert imageset(x, x + 1/x, Interval(-oo, oo)) == Interval(-oo, -2) \ + Interval(2, oo) # Single Infinite discontinuity assert imageset(x, 1/x + 1/(x-1)**2, Interval(0, 2, True, False)) == \ Interval(Rational(3, 2), oo, False) # Multiple Infinite discontinuities # Test for Python lambda assert imageset(lambda x: 2*x, Interval(-2, 1)) == Interval(-4, 2) assert imageset(Lambda(x, a*x), Interval(0, 1)) == \ ImageSet(Lambda(x, a*x), Interval(0, 1)) assert imageset(Lambda(x, sin(cos(x))), Interval(0, 1)) == \ ImageSet(Lambda(x, sin(cos(x))), Interval(0, 1)) def test_image_piecewise(): f = Piecewise((x, x <= -1), (1/x**2, x <= 5), (x**3, True)) f1 = Piecewise((0, x <= 1), (1, x <= 2), (2, True)) assert imageset(x, f, Interval(-5, 5)) == Union(Interval(-5, -1), Interval(Rational(1, 25), oo)) assert imageset(x, f1, Interval(1, 2)) == FiniteSet(0, 1) @XFAIL # See: https://github.com/sympy/sympy/pull/2723#discussion_r8659826 def test_image_Intersection(): x = Symbol('x', real=True) y = Symbol('y', real=True) assert imageset(x, x**2, Interval(-2, 0).intersect(Interval(x, y))) == \ Interval(0, 4).intersect(Interval(Min(x**2, y**2), Max(x**2, y**2))) def test_image_FiniteSet(): x = Symbol('x', real=True) assert imageset(x, 2*x, FiniteSet(1, 2, 3)) == FiniteSet(2, 4, 6) def test_image_Union(): x = Symbol('x', real=True) assert imageset(x, x**2, Interval(-2, 0) + FiniteSet(1, 2, 3)) == \ (Interval(0, 4) + FiniteSet(9)) def test_image_EmptySet(): x = Symbol('x', real=True) assert imageset(x, 2*x, S.EmptySet) == S.EmptySet def test_issue_5724_7680(): assert I not in S.Reals # issue 7680 assert Interval(-oo, oo).contains(I) is S.false def test_boundary(): assert FiniteSet(1).boundary == FiniteSet(1) assert all(Interval(0, 1, left_open, right_open).boundary == FiniteSet(0, 1) for left_open in (true, false) for right_open in (true, false)) def test_boundary_Union(): assert (Interval(0, 1) + Interval(2, 3)).boundary == FiniteSet(0, 1, 2, 3) assert ((Interval(0, 1, False, True) + Interval(1, 2, True, False)).boundary == FiniteSet(0, 1, 2)) assert (Interval(0, 1) + FiniteSet(2)).boundary == FiniteSet(0, 1, 2) assert Union(Interval(0, 10), Interval(5, 15), evaluate=False).boundary \ == FiniteSet(0, 15) assert Union(Interval(0, 10), Interval(0, 1), evaluate=False).boundary \ == FiniteSet(0, 10) assert Union(Interval(0, 10, True, True), Interval(10, 15, True, True), evaluate=False).boundary \ == FiniteSet(0, 10, 15) @XFAIL def test_union_boundary_of_joining_sets(): """ Testing the boundary of unions is a hard problem """ assert Union(Interval(0, 10), Interval(10, 15), evaluate=False).boundary \ == FiniteSet(0, 15) def test_boundary_ProductSet(): open_square = Interval(0, 1, True, True) ** 2 assert open_square.boundary == (FiniteSet(0, 1) * Interval(0, 1) + Interval(0, 1) * FiniteSet(0, 1)) second_square = Interval(1, 2, True, True) * Interval(0, 1, True, True) assert (open_square + second_square).boundary == ( FiniteSet(0, 1) * Interval(0, 1) + FiniteSet(1, 2) * Interval(0, 1) + Interval(0, 1) * FiniteSet(0, 1) + Interval(1, 2) * FiniteSet(0, 1)) def test_boundary_ProductSet_line(): line_in_r2 = Interval(0, 1) * FiniteSet(0) assert line_in_r2.boundary == line_in_r2 def test_is_open(): assert Interval(0, 1, False, False).is_open is False assert Interval(0, 1, True, False).is_open is False assert Interval(0, 1, True, True).is_open is True assert FiniteSet(1, 2, 3).is_open is False def test_is_closed(): assert Interval(0, 1, False, False).is_closed is True assert Interval(0, 1, True, False).is_closed is False assert FiniteSet(1, 2, 3).is_closed is True def test_closure(): assert Interval(0, 1, False, True).closure == Interval(0, 1, False, False) def test_interior(): assert Interval(0, 1, False, True).interior == Interval(0, 1, True, True) def test_issue_7841(): raises(TypeError, lambda: x in S.Reals) def test_Eq(): assert Eq(Interval(0, 1), Interval(0, 1)) assert Eq(Interval(0, 1), Interval(0, 2)) == False s1 = FiniteSet(0, 1) s2 = FiniteSet(1, 2) assert Eq(s1, s1) assert Eq(s1, s2) == False assert Eq(s1*s2, s1*s2) assert Eq(s1*s2, s2*s1) == False assert unchanged(Eq, FiniteSet({x, y}), FiniteSet({x})) assert Eq(FiniteSet({x, y}).subs(y, x), FiniteSet({x})) is S.true assert Eq(FiniteSet({x, y}), FiniteSet({x})).subs(y, x) is S.true assert Eq(FiniteSet({x, y}).subs(y, x+1), FiniteSet({x})) is S.false assert Eq(FiniteSet({x, y}), FiniteSet({x})).subs(y, x+1) is S.false assert Eq(ProductSet({1}, {2}), Interval(1, 2)) not in (S.true, S.false) assert Eq(ProductSet({1}), ProductSet({1}, {2})) is S.false assert Eq(FiniteSet(()), FiniteSet(1)) is S.false assert Eq(ProductSet(), FiniteSet(1)) is S.false i1 = Interval(0, 1) i2 = Interval(x, y) assert unchanged(Eq, ProductSet(i1, i1), ProductSet(i2, i2)) def test_SymmetricDifference(): A = FiniteSet(0, 1, 2, 3, 4, 5) B = FiniteSet(2, 4, 6, 8, 10) C = Interval(8, 10) assert SymmetricDifference(A, B, evaluate=False).is_iterable is True assert SymmetricDifference(A, C, evaluate=False).is_iterable is None assert FiniteSet(*SymmetricDifference(A, B, evaluate=False)) == \ FiniteSet(0, 1, 3, 5, 6, 8, 10) raises(TypeError, lambda: FiniteSet(*SymmetricDifference(A, C, evaluate=False))) assert SymmetricDifference(FiniteSet(0, 1, 2, 3, 4, 5), \ FiniteSet(2, 4, 6, 8, 10)) == FiniteSet(0, 1, 3, 5, 6, 8, 10) assert SymmetricDifference(FiniteSet(2, 3, 4), FiniteSet(2, 3 ,4 ,5 )) \ == FiniteSet(5) assert FiniteSet(1, 2, 3, 4, 5) ^ FiniteSet(1, 2, 5, 6) == \ FiniteSet(3, 4, 6) assert Set(1, 2 ,3) ^ Set(2, 3, 4) == Union(Set(1, 2, 3) - Set(2, 3, 4), \ Set(2, 3, 4) - Set(1, 2, 3)) assert Interval(0, 4) ^ Interval(2, 5) == Union(Interval(0, 4) - \ Interval(2, 5), Interval(2, 5) - Interval(0, 4)) def test_issue_9536(): from sympy.functions.elementary.exponential import log a = Symbol('a', real=True) assert FiniteSet(log(a)).intersect(S.Reals) == Intersection(S.Reals, FiniteSet(log(a))) def test_issue_9637(): n = Symbol('n') a = FiniteSet(n) b = FiniteSet(2, n) assert Complement(S.Reals, a) == Complement(S.Reals, a, evaluate=False) assert Complement(Interval(1, 3), a) == Complement(Interval(1, 3), a, evaluate=False) assert Complement(Interval(1, 3), b) == \ Complement(Union(Interval(1, 2, False, True), Interval(2, 3, True, False)), a) assert Complement(a, S.Reals) == Complement(a, S.Reals, evaluate=False) assert Complement(a, Interval(1, 3)) == Complement(a, Interval(1, 3), evaluate=False) def test_issue_9808(): # See https://github.com/sympy/sympy/issues/16342 assert Complement(FiniteSet(y), FiniteSet(1)) == Complement(FiniteSet(y), FiniteSet(1), evaluate=False) assert Complement(FiniteSet(1, 2, x), FiniteSet(x, y, 2, 3)) == \ Complement(FiniteSet(1), FiniteSet(y), evaluate=False) def test_issue_9956(): assert Union(Interval(-oo, oo), FiniteSet(1)) == Interval(-oo, oo) assert Interval(-oo, oo).contains(1) is S.true def test_issue_Symbol_inter(): i = Interval(0, oo) r = S.Reals mat = Matrix([0, 0, 0]) assert Intersection(r, i, FiniteSet(m), FiniteSet(m, n)) == \ Intersection(i, FiniteSet(m)) assert Intersection(FiniteSet(1, m, n), FiniteSet(m, n, 2), i) == \ Intersection(i, FiniteSet(m, n)) assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \ Intersection(Intersection({m, z}, {m, n, x}), r) assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \ Intersection(FiniteSet(3, m, n), FiniteSet(m, n, x), r, evaluate=False) assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \ Intersection(FiniteSet(3, m, n), r) assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \ Intersection(r, FiniteSet(n)) assert Intersection(FiniteSet(sin(x), cos(x)), FiniteSet(sin(x), cos(x), 1), r) == \ Intersection(r, FiniteSet(sin(x), cos(x))) assert Intersection(FiniteSet(x**2, 1, sin(x)), FiniteSet(x**2, 2, sin(x)), r) == \ Intersection(r, FiniteSet(x**2, sin(x))) def test_issue_11827(): assert S.Naturals0**4 def test_issue_10113(): f = x**2/(x**2 - 4) assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True)) assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0) assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(Rational(9, 5), oo)) def test_issue_10248(): raises( TypeError, lambda: list(Intersection(S.Reals, FiniteSet(x))) ) A = Symbol('A', real=True) assert list(Intersection(S.Reals, FiniteSet(A))) == [A] def test_issue_9447(): a = Interval(0, 1) + Interval(2, 3) assert Complement(S.UniversalSet, a) == Complement( S.UniversalSet, Union(Interval(0, 1), Interval(2, 3)), evaluate=False) assert Complement(S.Naturals, a) == Complement( S.Naturals, Union(Interval(0, 1), Interval(2, 3)), evaluate=False) def test_issue_10337(): assert (FiniteSet(2) == 3) is False assert (FiniteSet(2) != 3) is True raises(TypeError, lambda: FiniteSet(2) < 3) raises(TypeError, lambda: FiniteSet(2) <= 3) raises(TypeError, lambda: FiniteSet(2) > 3) raises(TypeError, lambda: FiniteSet(2) >= 3) def test_issue_10326(): bad = [ EmptySet, FiniteSet(1), Interval(1, 2), S.ComplexInfinity, S.ImaginaryUnit, S.Infinity, S.NaN, S.NegativeInfinity, ] interval = Interval(0, 5) for i in bad: assert i not in interval x = Symbol('x', real=True) nr = Symbol('nr', extended_real=False) assert x + 1 in Interval(x, x + 4) assert nr not in Interval(x, x + 4) assert Interval(1, 2) in FiniteSet(Interval(0, 5), Interval(1, 2)) assert Interval(-oo, oo).contains(oo) is S.false assert Interval(-oo, oo).contains(-oo) is S.false def test_issue_2799(): U = S.UniversalSet a = Symbol('a', real=True) inf_interval = Interval(a, oo) R = S.Reals assert U + inf_interval == inf_interval + U assert U + R == R + U assert R + inf_interval == inf_interval + R def test_issue_9706(): assert Interval(-oo, 0).closure == Interval(-oo, 0, True, False) assert Interval(0, oo).closure == Interval(0, oo, False, True) assert Interval(-oo, oo).closure == Interval(-oo, oo) def test_issue_8257(): reals_plus_infinity = Union(Interval(-oo, oo), FiniteSet(oo)) reals_plus_negativeinfinity = Union(Interval(-oo, oo), FiniteSet(-oo)) assert Interval(-oo, oo) + FiniteSet(oo) == reals_plus_infinity assert FiniteSet(oo) + Interval(-oo, oo) == reals_plus_infinity assert Interval(-oo, oo) + FiniteSet(-oo) == reals_plus_negativeinfinity assert FiniteSet(-oo) + Interval(-oo, oo) == reals_plus_negativeinfinity def test_issue_10931(): assert S.Integers - S.Integers == EmptySet assert S.Integers - S.Reals == EmptySet def test_issue_11174(): soln = Intersection(Interval(-oo, oo), FiniteSet(-x), evaluate=False) assert Intersection(FiniteSet(-x), S.Reals) == soln soln = Intersection(S.Reals, FiniteSet(x), evaluate=False) assert Intersection(FiniteSet(x), S.Reals) == soln def test_finite_set_intersection(): # The following should not produce recursion errors # Note: some of these are not completely correct. See # https://github.com/sympy/sympy/issues/16342. assert Intersection(FiniteSet(-oo, x), FiniteSet(x)) == FiniteSet(x) assert Intersection._handle_finite_sets([FiniteSet(-oo, x), FiniteSet(0, x)]) == FiniteSet(x) assert Intersection._handle_finite_sets([FiniteSet(-oo, x), FiniteSet(x)]) == FiniteSet(x) assert Intersection._handle_finite_sets([FiniteSet(2, 3, x, y), FiniteSet(1, 2, x)]) == \ Intersection._handle_finite_sets([FiniteSet(1, 2, x), FiniteSet(2, 3, x, y)]) == \ Intersection(FiniteSet(1, 2, x), FiniteSet(2, 3, x, y)) == \ Intersection(FiniteSet(1, 2, x), FiniteSet(2, x, y)) assert FiniteSet(1+x-y) & FiniteSet(1) == \ FiniteSet(1) & FiniteSet(1+x-y) == \ Intersection(FiniteSet(1+x-y), FiniteSet(1), evaluate=False) assert FiniteSet(1) & FiniteSet(x) == FiniteSet(x) & FiniteSet(1) == \ Intersection(FiniteSet(1), FiniteSet(x), evaluate=False) assert FiniteSet({x}) & FiniteSet({x, y}) == \ Intersection(FiniteSet({x}), FiniteSet({x, y}), evaluate=False) def test_union_intersection_constructor(): # The actual exception does not matter here, so long as these fail sets = [FiniteSet(1), FiniteSet(2)] raises(Exception, lambda: Union(sets)) raises(Exception, lambda: Intersection(sets)) raises(Exception, lambda: Union(tuple(sets))) raises(Exception, lambda: Intersection(tuple(sets))) raises(Exception, lambda: Union(i for i in sets)) raises(Exception, lambda: Intersection(i for i in sets)) # Python sets are treated the same as FiniteSet # The union of a single set (of sets) is the set (of sets) itself assert Union(set(sets)) == FiniteSet(*sets) assert Intersection(set(sets)) == FiniteSet(*sets) assert Union({1}, {2}) == FiniteSet(1, 2) assert Intersection({1, 2}, {2, 3}) == FiniteSet(2) def test_Union_contains(): assert zoo not in Union( Interval.open(-oo, 0), Interval.open(0, oo)) @XFAIL def test_issue_16878b(): # in intersection_sets for (ImageSet, Set) there is no code # that handles the base_set of S.Reals like there is # for Integers assert imageset(x, (x, x), S.Reals).is_subset(S.Reals**2) is True
bbe3ad07fa33cd93823ad2c9a5812fcf08b18af2290c0124a740d4894e6cb7cf
from __future__ import print_function, division import random from collections import defaultdict from sympy.core.basic import Atom, Basic from sympy.core.parameters import global_parameters from sympy.core.expr import Expr from sympy.core.compatibility import \ is_sequence, reduce, range, as_int, Iterable from sympy.core.numbers import Integer from sympy.core.sympify import _sympify from sympy.logic.boolalg import as_Boolean from sympy.matrices import zeros from sympy.polys.polytools import lcm from sympy.utilities.iterables import (flatten, has_variety, minlex, has_dups, runs) from mpmath.libmp.libintmath import ifac def _af_rmul(a, b): """ Return the product b*a; input and output are array forms. The ith value is a[b[i]]. Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> _af_rmul(a, b) [1, 2, 0] >>> [a[b[i]] for i in range(3)] [1, 2, 0] This handles the operands in reverse order compared to the ``*`` operator: >>> a = Permutation(a) >>> b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] See Also ======== rmul, _af_rmuln """ return [a[i] for i in b] def _af_rmuln(*abc): """ Given [a, b, c, ...] return the product of ...*c*b*a using array forms. The ith value is a[b[c[i]]]. Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> _af_rmul(a, b) [1, 2, 0] >>> [a[b[i]] for i in range(3)] [1, 2, 0] This handles the operands in reverse order compared to the ``*`` operator: >>> a = Permutation(a); b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] See Also ======== rmul, _af_rmul """ a = abc m = len(a) if m == 3: p0, p1, p2 = a return [p0[p1[i]] for i in p2] if m == 4: p0, p1, p2, p3 = a return [p0[p1[p2[i]]] for i in p3] if m == 5: p0, p1, p2, p3, p4 = a return [p0[p1[p2[p3[i]]]] for i in p4] if m == 6: p0, p1, p2, p3, p4, p5 = a return [p0[p1[p2[p3[p4[i]]]]] for i in p5] if m == 7: p0, p1, p2, p3, p4, p5, p6 = a return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6] if m == 8: p0, p1, p2, p3, p4, p5, p6, p7 = a return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7] if m == 1: return a[0][:] if m == 2: a, b = a return [a[i] for i in b] if m == 0: raise ValueError("String must not be empty") p0 = _af_rmuln(*a[:m//2]) p1 = _af_rmuln(*a[m//2:]) return [p0[i] for i in p1] def _af_parity(pi): """ Computes the parity of a permutation in array form. The parity of a permutation reflects the parity of the number of inversions in the permutation, i.e., the number of pairs of x and y such that x > y but p[x] < p[y]. Examples ======== >>> from sympy.combinatorics.permutations import _af_parity >>> _af_parity([0, 1, 2, 3]) 0 >>> _af_parity([3, 2, 0, 1]) 1 See Also ======== Permutation """ n = len(pi) a = [0] * n c = 0 for j in range(n): if a[j] == 0: c += 1 a[j] = 1 i = j while pi[i] != j: i = pi[i] a[i] = 1 return (n - c) % 2 def _af_invert(a): """ Finds the inverse, ~A, of a permutation, A, given in array form. Examples ======== >>> from sympy.combinatorics.permutations import _af_invert, _af_rmul >>> A = [1, 2, 0, 3] >>> _af_invert(A) [2, 0, 1, 3] >>> _af_rmul(_, A) [0, 1, 2, 3] See Also ======== Permutation, __invert__ """ inv_form = [0] * len(a) for i, ai in enumerate(a): inv_form[ai] = i return inv_form def _af_pow(a, n): """ Routine for finding powers of a permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation, _af_pow >>> p = Permutation([2, 0, 3, 1]) >>> p.order() 4 >>> _af_pow(p._array_form, 4) [0, 1, 2, 3] """ if n == 0: return list(range(len(a))) if n < 0: return _af_pow(_af_invert(a), -n) if n == 1: return a[:] elif n == 2: b = [a[i] for i in a] elif n == 3: b = [a[a[i]] for i in a] elif n == 4: b = [a[a[a[i]]] for i in a] else: # use binary multiplication b = list(range(len(a))) while 1: if n & 1: b = [b[i] for i in a] n -= 1 if not n: break if n % 4 == 0: a = [a[a[a[i]]] for i in a] n = n // 4 elif n % 2 == 0: a = [a[i] for i in a] n = n // 2 return b def _af_commutes_with(a, b): """ Checks if the two permutations with array forms given by ``a`` and ``b`` commute. Examples ======== >>> from sympy.combinatorics.permutations import _af_commutes_with >>> _af_commutes_with([1, 2, 0], [0, 2, 1]) False See Also ======== Permutation, commutes_with """ return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1)) class Cycle(dict): """ Wrapper around dict which provides the functionality of a disjoint cycle. A cycle shows the rule to use to move subsets of elements to obtain a permutation. The Cycle class is more flexible than Permutation in that 1) all elements need not be present in order to investigate how multiple cycles act in sequence and 2) it can contain singletons: >>> from sympy.combinatorics.permutations import Perm, Cycle A Cycle will automatically parse a cycle given as a tuple on the rhs: >>> Cycle(1, 2)(2, 3) (1 3 2) The identity cycle, Cycle(), can be used to start a product: >>> Cycle()(1, 2)(2, 3) (1 3 2) The array form of a Cycle can be obtained by calling the list method (or passing it to the list function) and all elements from 0 will be shown: >>> a = Cycle(1, 2) >>> a.list() [0, 2, 1] >>> list(a) [0, 2, 1] If a larger (or smaller) range is desired use the list method and provide the desired size -- but the Cycle cannot be truncated to a size smaller than the largest element that is out of place: >>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3) >>> b.list() [0, 2, 1, 3, 4] >>> b.list(b.size + 1) [0, 2, 1, 3, 4, 5] >>> b.list(-1) [0, 2, 1] Singletons are not shown when printing with one exception: the largest element is always shown -- as a singleton if necessary: >>> Cycle(1, 4, 10)(4, 5) (1 5 4 10) >>> Cycle(1, 2)(4)(5)(10) (1 2)(10) The array form can be used to instantiate a Permutation so other properties of the permutation can be investigated: >>> Perm(Cycle(1, 2)(3, 4).list()).transpositions() [(1, 2), (3, 4)] Notes ===== The underlying structure of the Cycle is a dictionary and although the __iter__ method has been redefined to give the array form of the cycle, the underlying dictionary items are still available with the such methods as items(): >>> list(Cycle(1, 2).items()) [(1, 2), (2, 1)] See Also ======== Permutation """ def __missing__(self, arg): """Enter arg into dictionary and return arg.""" return as_int(arg) def __iter__(self): for i in self.list(): yield i def __call__(self, *other): """Return product of cycles processed from R to L. Examples ======== >>> from sympy.combinatorics.permutations import Cycle as C >>> from sympy.combinatorics.permutations import Permutation as Perm >>> C(1, 2)(2, 3) (1 3 2) An instance of a Cycle will automatically parse list-like objects and Permutations that are on the right. It is more flexible than the Permutation in that all elements need not be present: >>> a = C(1, 2) >>> a(2, 3) (1 3 2) >>> a(2, 3)(4, 5) (1 3 2)(4 5) """ rv = Cycle(*other) for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]): rv[k] = v return rv def list(self, size=None): """Return the cycles as an explicit list starting from 0 up to the greater of the largest value in the cycles and size. Truncation of trailing unmoved items will occur when size is less than the maximum element in the cycle; if this is desired, setting ``size=-1`` will guarantee such trimming. Examples ======== >>> from sympy.combinatorics.permutations import Cycle >>> from sympy.combinatorics.permutations import Permutation >>> p = Cycle(2, 3)(4, 5) >>> p.list() [0, 1, 3, 2, 5, 4] >>> p.list(10) [0, 1, 3, 2, 5, 4, 6, 7, 8, 9] Passing a length too small will trim trailing, unchanged elements in the permutation: >>> Cycle(2, 4)(1, 2, 4).list(-1) [0, 2, 1] """ if not self and size is None: raise ValueError('must give size for empty Cycle') if size is not None: big = max([i for i in self.keys() if self[i] != i] + [0]) size = max(size, big + 1) else: size = self.size return [self[i] for i in range(size)] def __repr__(self): """We want it to print as a Cycle, not as a dict. Examples ======== >>> from sympy.combinatorics import Cycle >>> Cycle(1, 2) (1 2) >>> print(_) (1 2) >>> list(Cycle(1, 2).items()) [(1, 2), (2, 1)] """ if not self: return 'Cycle()' cycles = Permutation(self).cyclic_form s = ''.join(str(tuple(c)) for c in cycles) big = self.size - 1 if not any(i == big for c in cycles for i in c): s += '(%s)' % big return 'Cycle%s' % s def __str__(self): """We want it to be printed in a Cycle notation with no comma in-between. Examples ======== >>> from sympy.combinatorics import Cycle >>> Cycle(1, 2) (1 2) >>> Cycle(1, 2, 4)(5, 6) (1 2 4)(5 6) """ if not self: return '()' cycles = Permutation(self).cyclic_form s = ''.join(str(tuple(c)) for c in cycles) big = self.size - 1 if not any(i == big for c in cycles for i in c): s += '(%s)' % big s = s.replace(',', '') return s def __init__(self, *args): """Load up a Cycle instance with the values for the cycle. Examples ======== >>> from sympy.combinatorics.permutations import Cycle >>> Cycle(1, 2, 6) (1 2 6) """ if not args: return if len(args) == 1: if isinstance(args[0], Permutation): for c in args[0].cyclic_form: self.update(self(*c)) return elif isinstance(args[0], Cycle): for k, v in args[0].items(): self[k] = v return args = [as_int(a) for a in args] if any(i < 0 for i in args): raise ValueError('negative integers are not allowed in a cycle.') if has_dups(args): raise ValueError('All elements must be unique in a cycle.') for i in range(-len(args), 0): self[args[i]] = args[i + 1] @property def size(self): if not self: return 0 return max(self.keys()) + 1 def copy(self): return Cycle(self) class Permutation(Atom): """ A permutation, alternatively known as an 'arrangement number' or 'ordering' is an arrangement of the elements of an ordered list into a one-to-one mapping with itself. The permutation of a given arrangement is given by indicating the positions of the elements after re-arrangement [2]_. For example, if one started with elements [x, y, a, b] (in that order) and they were reordered as [x, y, b, a] then the permutation would be [0, 1, 3, 2]. Notice that (in SymPy) the first element is always referred to as 0 and the permutation uses the indices of the elements in the original ordering, not the elements (a, b, etc...) themselves. >>> from sympy.combinatorics import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) Permutations Notation ===================== Permutations are commonly represented in disjoint cycle or array forms. Array Notation and 2-line Form ------------------------------------ In the 2-line form, the elements and their final positions are shown as a matrix with 2 rows: [0 1 2 ... n-1] [p(0) p(1) p(2) ... p(n-1)] Since the first line is always range(n), where n is the size of p, it is sufficient to represent the permutation by the second line, referred to as the "array form" of the permutation. This is entered in brackets as the argument to the Permutation class: >>> p = Permutation([0, 2, 1]); p Permutation([0, 2, 1]) Given i in range(p.size), the permutation maps i to i^p >>> [i^p for i in range(p.size)] [0, 2, 1] The composite of two permutations p*q means first apply p, then q, so i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules: >>> q = Permutation([2, 1, 0]) >>> [i^p^q for i in range(3)] [2, 0, 1] >>> [i^(p*q) for i in range(3)] [2, 0, 1] One can use also the notation p(i) = i^p, but then the composition rule is (p*q)(i) = q(p(i)), not p(q(i)): >>> [(p*q)(i) for i in range(p.size)] [2, 0, 1] >>> [q(p(i)) for i in range(p.size)] [2, 0, 1] >>> [p(q(i)) for i in range(p.size)] [1, 2, 0] Disjoint Cycle Notation ----------------------- In disjoint cycle notation, only the elements that have shifted are indicated. In the above case, the 2 and 1 switched places. This can be entered in two ways: >>> Permutation(1, 2) == Permutation([[1, 2]]) == p True Only the relative ordering of elements in a cycle matter: >>> Permutation(1,2,3) == Permutation(2,3,1) == Permutation(3,1,2) True The disjoint cycle notation is convenient when representing permutations that have several cycles in them: >>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]]) True It also provides some economy in entry when computing products of permutations that are written in disjoint cycle notation: >>> Permutation(1, 2)(1, 3)(2, 3) Permutation([0, 3, 2, 1]) >>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]]) True Caution: when the cycles have common elements between them then the order in which the permutations are applied matters. The convention is that the permutations are applied from *right to left*. In the following, the transposition of elements 2 and 3 is followed by the transposition of elements 1 and 2: >>> Permutation(1, 2)(2, 3) == Permutation([(1, 2), (2, 3)]) True >>> Permutation(1, 2)(2, 3).list() [0, 3, 1, 2] If the first and second elements had been swapped first, followed by the swapping of the second and third, the result would have been [0, 2, 3, 1]. If, for some reason, you want to apply the cycles in the order they are entered, you can simply reverse the order of cycles: >>> Permutation([(1, 2), (2, 3)][::-1]).list() [0, 2, 3, 1] Entering a singleton in a permutation is a way to indicate the size of the permutation. The ``size`` keyword can also be used. Array-form entry: >>> Permutation([[1, 2], [9]]) Permutation([0, 2, 1], size=10) >>> Permutation([[1, 2]], size=10) Permutation([0, 2, 1], size=10) Cyclic-form entry: >>> Permutation(1, 2, size=10) Permutation([0, 2, 1], size=10) >>> Permutation(9)(1, 2) Permutation([0, 2, 1], size=10) Caution: no singleton containing an element larger than the largest in any previous cycle can be entered. This is an important difference in how Permutation and Cycle handle the __call__ syntax. A singleton argument at the start of a Permutation performs instantiation of the Permutation and is permitted: >>> Permutation(5) Permutation([], size=6) A singleton entered after instantiation is a call to the permutation -- a function call -- and if the argument is out of range it will trigger an error. For this reason, it is better to start the cycle with the singleton: The following fails because there is is no element 3: >>> Permutation(1, 2)(3) Traceback (most recent call last): ... IndexError: list index out of range This is ok: only the call to an out of range singleton is prohibited; otherwise the permutation autosizes: >>> Permutation(3)(1, 2) Permutation([0, 2, 1, 3]) >>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2) True Equality testing ---------------- The array forms must be the same in order for permutations to be equal: >>> Permutation([1, 0, 2, 3]) == Permutation([1, 0]) False Identity Permutation -------------------- The identity permutation is a permutation in which no element is out of place. It can be entered in a variety of ways. All the following create an identity permutation of size 4: >>> I = Permutation([0, 1, 2, 3]) >>> all(p == I for p in [ ... Permutation(3), ... Permutation(range(4)), ... Permutation([], size=4), ... Permutation(size=4)]) True Watch out for entering the range *inside* a set of brackets (which is cycle notation): >>> I == Permutation([range(4)]) False Permutation Printing ==================== There are a few things to note about how Permutations are printed. 1) If you prefer one form (array or cycle) over another, you can set ``init_printing`` with the ``perm_cyclic`` flag. >>> from sympy import init_printing >>> p = Permutation(1, 2)(4, 5)(3, 4) >>> p Permutation([0, 2, 1, 4, 5, 3]) >>> init_printing(perm_cyclic=True, pretty_print=False) >>> p (1 2)(3 4 5) 2) Regardless of the setting, a list of elements in the array for cyclic form can be obtained and either of those can be copied and supplied as the argument to Permutation: >>> p.array_form [0, 2, 1, 4, 5, 3] >>> p.cyclic_form [[1, 2], [3, 4, 5]] >>> Permutation(_) == p True 3) Printing is economical in that as little as possible is printed while retaining all information about the size of the permutation: >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation([1, 0, 2, 3]) Permutation([1, 0, 2, 3]) >>> Permutation([1, 0, 2, 3], size=20) Permutation([1, 0], size=20) >>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20) Permutation([1, 0, 2, 4, 3], size=20) >>> p = Permutation([1, 0, 2, 3]) >>> init_printing(perm_cyclic=True, pretty_print=False) >>> p (3)(0 1) >>> init_printing(perm_cyclic=False, pretty_print=False) The 2 was not printed but it is still there as can be seen with the array_form and size methods: >>> p.array_form [1, 0, 2, 3] >>> p.size 4 Short introduction to other methods =================================== The permutation can act as a bijective function, telling what element is located at a given position >>> q = Permutation([5, 2, 3, 4, 1, 0]) >>> q.array_form[1] # the hard way 2 >>> q(1) # the easy way 2 >>> {i: q(i) for i in range(q.size)} # showing the bijection {0: 5, 1: 2, 2: 3, 3: 4, 4: 1, 5: 0} The full cyclic form (including singletons) can be obtained: >>> p.full_cyclic_form [[0, 1], [2], [3]] Any permutation can be factored into transpositions of pairs of elements: >>> Permutation([[1, 2], [3, 4, 5]]).transpositions() [(1, 2), (3, 5), (3, 4)] >>> Permutation.rmul(*[Permutation([ti], size=6) for ti in _]).cyclic_form [[1, 2], [3, 4, 5]] The number of permutations on a set of n elements is given by n! and is called the cardinality. >>> p.size 4 >>> p.cardinality 24 A given permutation has a rank among all the possible permutations of the same elements, but what that rank is depends on how the permutations are enumerated. (There are a number of different methods of doing so.) The lexicographic rank is given by the rank method and this rank is used to increment a permutation with addition/subtraction: >>> p.rank() 6 >>> p + 1 Permutation([1, 0, 3, 2]) >>> p.next_lex() Permutation([1, 0, 3, 2]) >>> _.rank() 7 >>> p.unrank_lex(p.size, rank=7) Permutation([1, 0, 3, 2]) The product of two permutations p and q is defined as their composition as functions, (p*q)(i) = q(p(i)) [6]_. >>> p = Permutation([1, 0, 2, 3]) >>> q = Permutation([2, 3, 1, 0]) >>> list(q*p) [2, 3, 0, 1] >>> list(p*q) [3, 2, 1, 0] >>> [q(p(i)) for i in range(p.size)] [3, 2, 1, 0] The permutation can be 'applied' to any list-like object, not only Permutations: >>> p(['zero', 'one', 'four', 'two']) ['one', 'zero', 'four', 'two'] >>> p('zo42') ['o', 'z', '4', '2'] If you have a list of arbitrary elements, the corresponding permutation can be found with the from_sequence method: >>> Permutation.from_sequence('SymPy') Permutation([1, 3, 2, 0, 4]) See Also ======== Cycle References ========== .. [1] Skiena, S. 'Permutations.' 1.1 in Implementing Discrete Mathematics Combinatorics and Graph Theory with Mathematica. Reading, MA: Addison-Wesley, pp. 3-16, 1990. .. [2] Knuth, D. E. The Art of Computer Programming, Vol. 4: Combinatorial Algorithms, 1st ed. Reading, MA: Addison-Wesley, 2011. .. [3] Wendy Myrvold and Frank Ruskey. 2001. Ranking and unranking permutations in linear time. Inf. Process. Lett. 79, 6 (September 2001), 281-284. DOI=10.1016/S0020-0190(01)00141-7 .. [4] D. L. Kreher, D. R. Stinson 'Combinatorial Algorithms' CRC Press, 1999 .. [5] Graham, R. L.; Knuth, D. E.; and Patashnik, O. Concrete Mathematics: A Foundation for Computer Science, 2nd ed. Reading, MA: Addison-Wesley, 1994. .. [6] https://en.wikipedia.org/wiki/Permutation#Product_and_inverse .. [7] https://en.wikipedia.org/wiki/Lehmer_code """ is_Permutation = True _array_form = None _cyclic_form = None _cycle_structure = None _size = None _rank = None def __new__(cls, *args, **kwargs): """ Constructor for the Permutation object from a list or a list of lists in which all elements of the permutation may appear only once. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) Permutations entered in array-form are left unaltered: >>> Permutation([0, 2, 1]) Permutation([0, 2, 1]) Permutations entered in cyclic form are converted to array form; singletons need not be entered, but can be entered to indicate the largest element: >>> Permutation([[4, 5, 6], [0, 1]]) Permutation([1, 0, 2, 3, 5, 6, 4]) >>> Permutation([[4, 5, 6], [0, 1], [19]]) Permutation([1, 0, 2, 3, 5, 6, 4], size=20) All manipulation of permutations assumes that the smallest element is 0 (in keeping with 0-based indexing in Python) so if the 0 is missing when entering a permutation in array form, an error will be raised: >>> Permutation([2, 1]) Traceback (most recent call last): ... ValueError: Integers 0 through 2 must be present. If a permutation is entered in cyclic form, it can be entered without singletons and the ``size`` specified so those values can be filled in, otherwise the array form will only extend to the maximum value in the cycles: >>> Permutation([[1, 4], [3, 5, 2]], size=10) Permutation([0, 4, 3, 5, 1, 2], size=10) >>> _.array_form [0, 4, 3, 5, 1, 2, 6, 7, 8, 9] """ size = kwargs.pop('size', None) if size is not None: size = int(size) #a) () #b) (1) = identity #c) (1, 2) = cycle #d) ([1, 2, 3]) = array form #e) ([[1, 2]]) = cyclic form #f) (Cycle) = conversion to permutation #g) (Permutation) = adjust size or return copy ok = True if not args: # a return cls._af_new(list(range(size or 0))) elif len(args) > 1: # c return cls._af_new(Cycle(*args).list(size)) if len(args) == 1: a = args[0] if isinstance(a, cls): # g if size is None or size == a.size: return a return cls(a.array_form, size=size) if isinstance(a, Cycle): # f return cls._af_new(a.list(size)) if not is_sequence(a): # b return cls._af_new(list(range(a + 1))) if has_variety(is_sequence(ai) for ai in a): ok = False else: ok = False if not ok: raise ValueError("Permutation argument must be a list of ints, " "a list of lists, Permutation or Cycle.") # safe to assume args are valid; this also makes a copy # of the args args = list(args[0]) is_cycle = args and is_sequence(args[0]) if is_cycle: # e args = [[int(i) for i in c] for c in args] else: # d args = [int(i) for i in args] # if there are n elements present, 0, 1, ..., n-1 should be present # unless a cycle notation has been provided. A 0 will be added # for convenience in case one wants to enter permutations where # counting starts from 1. temp = flatten(args) if has_dups(temp) and not is_cycle: raise ValueError('there were repeated elements.') temp = set(temp) if not is_cycle and \ any(i not in temp for i in range(len(temp))): raise ValueError("Integers 0 through %s must be present." % max(temp)) if is_cycle: # it's not necessarily canonical so we won't store # it -- use the array form instead c = Cycle() for ci in args: c = c(*ci) aform = c.list() else: aform = list(args) if size and size > len(aform): # don't allow for truncation of permutation which # might split a cycle and lead to an invalid aform # but do allow the permutation size to be increased aform.extend(list(range(len(aform), size))) return cls._af_new(aform) def _eval_Eq(self, other): other = _sympify(other) if not isinstance(other, Permutation): return None if self._size != other._size: return None return as_Boolean(self._array_form == other._array_form) @classmethod def _af_new(cls, perm): """A method to produce a Permutation object from a list; the list is bound to the _array_form attribute, so it must not be modified; this method is meant for internal use only; the list ``a`` is supposed to be generated as a temporary value in a method, so p = Perm._af_new(a) is the only object to hold a reference to ``a``:: Examples ======== >>> from sympy.combinatorics.permutations import Perm >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> a = [2, 1, 3, 0] >>> p = Perm._af_new(a) >>> p Permutation([2, 1, 3, 0]) """ p = super(Permutation, cls).__new__(cls) p._array_form = perm p._size = len(perm) return p def _hashable_content(self): # the array_form (a list) is the Permutation arg, so we need to # return a tuple, instead return tuple(self.array_form) @property def array_form(self): """ Return a copy of the attribute _array_form Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[2, 0], [3, 1]]) >>> p.array_form [2, 3, 0, 1] >>> Permutation([[2, 0, 3, 1]]).array_form [3, 2, 0, 1] >>> Permutation([2, 0, 3, 1]).array_form [2, 0, 3, 1] >>> Permutation([[1, 2], [4, 5]]).array_form [0, 2, 1, 3, 5, 4] """ return self._array_form[:] def list(self, size=None): """Return the permutation as an explicit list, possibly trimming unmoved elements if size is less than the maximum element in the permutation; if this is desired, setting ``size=-1`` will guarantee such trimming. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation(2, 3)(4, 5) >>> p.list() [0, 1, 3, 2, 5, 4] >>> p.list(10) [0, 1, 3, 2, 5, 4, 6, 7, 8, 9] Passing a length too small will trim trailing, unchanged elements in the permutation: >>> Permutation(2, 4)(1, 2, 4).list(-1) [0, 2, 1] >>> Permutation(3).list(-1) [] """ if not self and size is None: raise ValueError('must give size for empty Cycle') rv = self.array_form if size is not None: if size > self.size: rv.extend(list(range(self.size, size))) else: # find first value from rhs where rv[i] != i i = self.size - 1 while rv: if rv[-1] != i: break rv.pop() i -= 1 return rv @property def cyclic_form(self): """ This is used to convert to the cyclic notation from the canonical notation. Singletons are omitted. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 3, 1, 2]) >>> p.cyclic_form [[1, 3, 2]] >>> Permutation([1, 0, 2, 4, 3, 5]).cyclic_form [[0, 1], [3, 4]] See Also ======== array_form, full_cyclic_form """ if self._cyclic_form is not None: return list(self._cyclic_form) array_form = self.array_form unchecked = [True] * len(array_form) cyclic_form = [] for i in range(len(array_form)): if unchecked[i]: cycle = [] cycle.append(i) unchecked[i] = False j = i while unchecked[array_form[j]]: j = array_form[j] cycle.append(j) unchecked[j] = False if len(cycle) > 1: cyclic_form.append(cycle) assert cycle == list(minlex(cycle, is_set=True)) cyclic_form.sort() self._cyclic_form = cyclic_form[:] return cyclic_form @property def full_cyclic_form(self): """Return permutation in cyclic form including singletons. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> Permutation([0, 2, 1]).full_cyclic_form [[0], [1, 2]] """ need = set(range(self.size)) - set(flatten(self.cyclic_form)) rv = self.cyclic_form rv.extend([[i] for i in need]) rv.sort() return rv @property def size(self): """ Returns the number of elements in the permutation. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([[3, 2], [0, 1]]).size 4 See Also ======== cardinality, length, order, rank """ return self._size def support(self): """Return the elements in permutation, P, for which P[i] != i. Examples ======== >>> from sympy.combinatorics import Permutation >>> p = Permutation([[3, 2], [0, 1], [4]]) >>> p.array_form [1, 0, 3, 2, 4] >>> p.support() [0, 1, 2, 3] """ a = self.array_form return [i for i, e in enumerate(a) if a[i] != i] def __add__(self, other): """Return permutation that is other higher in rank than self. The rank is the lexicographical rank, with the identity permutation having rank of 0. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> I = Permutation([0, 1, 2, 3]) >>> a = Permutation([2, 1, 3, 0]) >>> I + a.rank() == a True See Also ======== __sub__, inversion_vector """ rank = (self.rank() + other) % self.cardinality rv = self.unrank_lex(self.size, rank) rv._rank = rank return rv def __sub__(self, other): """Return the permutation that is other lower in rank than self. See Also ======== __add__ """ return self.__add__(-other) @staticmethod def rmul(*args): """ Return product of Permutations [a, b, c, ...] as the Permutation whose ith value is a(b(c(i))). a, b, c, ... can be Permutation objects or tuples. Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> a = Permutation(a); b = Permutation(b) >>> list(Permutation.rmul(a, b)) [1, 2, 0] >>> [a(b(i)) for i in range(3)] [1, 2, 0] This handles the operands in reverse order compared to the ``*`` operator: >>> a = Permutation(a); b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] Notes ===== All items in the sequence will be parsed by Permutation as necessary as long as the first item is a Permutation: >>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b) True The reverse order of arguments will raise a TypeError. """ rv = args[0] for i in range(1, len(args)): rv = args[i]*rv return rv @classmethod def rmul_with_af(cls, *args): """ same as rmul, but the elements of args are Permutation objects which have _array_form """ a = [x._array_form for x in args] rv = cls._af_new(_af_rmuln(*a)) return rv def mul_inv(self, other): """ other*~self, self and other have _array_form """ a = _af_invert(self._array_form) b = other._array_form return self._af_new(_af_rmul(a, b)) def __rmul__(self, other): """This is needed to coerce other to Permutation in rmul.""" cls = type(self) return cls(other)*self def __mul__(self, other): """ Return the product a*b as a Permutation; the ith value is b(a(i)). Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> a = Permutation(a); b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] This handles operands in reverse order compared to _af_rmul and rmul: >>> al = list(a); bl = list(b) >>> _af_rmul(al, bl) [1, 2, 0] >>> [al[bl[i]] for i in range(3)] [1, 2, 0] It is acceptable for the arrays to have different lengths; the shorter one will be padded to match the longer one: >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> b*Permutation([1, 0]) Permutation([1, 2, 0]) >>> Permutation([1, 0])*b Permutation([2, 0, 1]) It is also acceptable to allow coercion to handle conversion of a single list to the left of a Permutation: >>> [0, 1]*a # no change: 2-element identity Permutation([1, 0, 2]) >>> [[0, 1]]*a # exchange first two elements Permutation([0, 1, 2]) You cannot use more than 1 cycle notation in a product of cycles since coercion can only handle one argument to the left. To handle multiple cycles it is convenient to use Cycle instead of Permutation: >>> [[1, 2]]*[[2, 3]]*Permutation([]) # doctest: +SKIP >>> from sympy.combinatorics.permutations import Cycle >>> Cycle(1, 2)(2, 3) (1 3 2) """ a = self.array_form # __rmul__ makes sure the other is a Permutation b = other.array_form if not b: perm = a else: b.extend(list(range(len(b), len(a)))) perm = [b[i] for i in a] + b[len(a):] return self._af_new(perm) def commutes_with(self, other): """ Checks if the elements are commuting. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> a = Permutation([1, 4, 3, 0, 2, 5]) >>> b = Permutation([0, 1, 2, 3, 4, 5]) >>> a.commutes_with(b) True >>> b = Permutation([2, 3, 5, 4, 1, 0]) >>> a.commutes_with(b) False """ a = self.array_form b = other.array_form return _af_commutes_with(a, b) def __pow__(self, n): """ Routine for finding powers of a permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([2, 0, 3, 1]) >>> p.order() 4 >>> p**4 Permutation([0, 1, 2, 3]) """ if isinstance(n, Permutation): raise NotImplementedError( 'p**p is not defined; do you mean p^p (conjugate)?') n = int(n) return self._af_new(_af_pow(self.array_form, n)) def __rxor__(self, i): """Return self(i) when ``i`` is an int. Examples ======== >>> from sympy.combinatorics import Permutation >>> p = Permutation(1, 2, 9) >>> 2^p == p(2) == 9 True """ if int(i) == i: return self(i) else: raise NotImplementedError( "i^p = p(i) when i is an integer, not %s." % i) def __xor__(self, h): """Return the conjugate permutation ``~h*self*h` `. If ``a`` and ``b`` are conjugates, ``a = h*b*~h`` and ``b = ~h*a*h`` and both have the same cycle structure. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation(1, 2, 9) >>> q = Permutation(6, 9, 8) >>> p*q != q*p True Calculate and check properties of the conjugate: >>> c = p^q >>> c == ~q*p*q and p == q*c*~q True The expression q^p^r is equivalent to q^(p*r): >>> r = Permutation(9)(4, 6, 8) >>> q^p^r == q^(p*r) True If the term to the left of the conjugate operator, i, is an integer then this is interpreted as selecting the ith element from the permutation to the right: >>> all(i^p == p(i) for i in range(p.size)) True Note that the * operator as higher precedence than the ^ operator: >>> q^r*p^r == q^(r*p)^r == Permutation(9)(1, 6, 4) True Notes ===== In Python the precedence rule is p^q^r = (p^q)^r which differs in general from p^(q^r) >>> q^p^r (9)(1 4 8) >>> q^(p^r) (9)(1 8 6) For a given r and p, both of the following are conjugates of p: ~r*p*r and r*p*~r. But these are not necessarily the same: >>> ~r*p*r == r*p*~r True >>> p = Permutation(1, 2, 9)(5, 6) >>> ~r*p*r == r*p*~r False The conjugate ~r*p*r was chosen so that ``p^q^r`` would be equivalent to ``p^(q*r)`` rather than ``p^(r*q)``. To obtain r*p*~r, pass ~r to this method: >>> p^~r == r*p*~r True """ if self.size != h.size: raise ValueError("The permutations must be of equal size.") a = [None]*self.size h = h._array_form p = self._array_form for i in range(self.size): a[h[i]] = h[p[i]] return self._af_new(a) def transpositions(self): """ Return the permutation decomposed into a list of transpositions. It is always possible to express a permutation as the product of transpositions, see [1] Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[1, 2, 3], [0, 4, 5, 6, 7]]) >>> t = p.transpositions() >>> t [(0, 7), (0, 6), (0, 5), (0, 4), (1, 3), (1, 2)] >>> print(''.join(str(c) for c in t)) (0, 7)(0, 6)(0, 5)(0, 4)(1, 3)(1, 2) >>> Permutation.rmul(*[Permutation([ti], size=p.size) for ti in t]) == p True References ========== .. [1] https://en.wikipedia.org/wiki/Transposition_%28mathematics%29#Properties """ a = self.cyclic_form res = [] for x in a: nx = len(x) if nx == 2: res.append(tuple(x)) elif nx > 2: first = x[0] for y in x[nx - 1:0:-1]: res.append((first, y)) return res @classmethod def from_sequence(self, i, key=None): """Return the permutation needed to obtain ``i`` from the sorted elements of ``i``. If custom sorting is desired, a key can be given. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation.from_sequence('SymPy') (4)(0 1 3) >>> _(sorted("SymPy")) ['S', 'y', 'm', 'P', 'y'] >>> Permutation.from_sequence('SymPy', key=lambda x: x.lower()) (4)(0 2)(1 3) """ ic = list(zip(i, list(range(len(i))))) if key: ic.sort(key=lambda x: key(x[0])) else: ic.sort() return ~Permutation([i[1] for i in ic]) def __invert__(self): """ Return the inverse of the permutation. A permutation multiplied by its inverse is the identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([[2, 0], [3, 1]]) >>> ~p Permutation([2, 3, 0, 1]) >>> _ == p**-1 True >>> p*~p == ~p*p == Permutation([0, 1, 2, 3]) True """ return self._af_new(_af_invert(self._array_form)) def __iter__(self): """Yield elements from array form. Examples ======== >>> from sympy.combinatorics import Permutation >>> list(Permutation(range(3))) [0, 1, 2] """ for i in self.array_form: yield i def __repr__(self): from sympy.printing.repr import srepr return srepr(self) def __call__(self, *i): """ Allows applying a permutation instance as a bijective function. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[2, 0], [3, 1]]) >>> p.array_form [2, 3, 0, 1] >>> [p(i) for i in range(4)] [2, 3, 0, 1] If an array is given then the permutation selects the items from the array (i.e. the permutation is applied to the array): >>> from sympy.abc import x >>> p([x, 1, 0, x**2]) [0, x**2, x, 1] """ # list indices can be Integer or int; leave this # as it is (don't test or convert it) because this # gets called a lot and should be fast if len(i) == 1: i = i[0] if not isinstance(i, Iterable): i = as_int(i) if i < 0 or i > self.size: raise TypeError( "{} should be an integer between 0 and {}" .format(i, self.size-1)) return self._array_form[i] # P([a, b, c]) if len(i) != self.size: raise TypeError( "{} should have the length {}.".format(i, self.size)) return [i[j] for j in self._array_form] # P(1, 2, 3) return self*Permutation(Cycle(*i), size=self.size) def atoms(self): """ Returns all the elements of a permutation Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0, 1, 2, 3, 4, 5]).atoms() {0, 1, 2, 3, 4, 5} >>> Permutation([[0, 1], [2, 3], [4, 5]]).atoms() {0, 1, 2, 3, 4, 5} """ return set(self.array_form) def apply(self, i): r"""Apply the permutation to an expression. Parameters ========== i : Expr It should be an integer between $0$ and $n-1$ where $n$ is the size of the permutation. If it is a symbol or a symbolic expression that can have integer values, an ``AppliedPermutation`` object will be returned which can represent an unevaluated function. Notes ===== Any permutation can be defined as a bijective function $\sigma : \{ 0, 1, ..., n-1 \} \rightarrow \{ 0, 1, ..., n-1 \}$ where $n$ denotes the size of the permutation. The definition may even be extended for any set with distinctive elements, such that the permutation can even be applied for real numbers or such, however, it is not implemented for now for computational reasons and the integrity with the group theory module. This function is similar to the ``__call__`` magic, however, ``__call__`` magic already has some other applications like permuting an array or attatching new cycles, which would not always be mathematically consistent. This also guarantees that the return type is a SymPy integer, which guarantees the safety to use assumptions. """ i = _sympify(i) if i.is_integer is False: raise NotImplementedError("{} should be an integer.".format(i)) n = self.size if (i < 0) == True or (i >= n) == True: raise NotImplementedError( "{} should be an integer between 0 and {}".format(i, n-1)) if i.is_Integer: return Integer(self._array_form[i]) return AppliedPermutation(self, i) def next_lex(self): """ Returns the next permutation in lexicographical order. If self is the last permutation in lexicographical order it returns None. See [4] section 2.4. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([2, 3, 1, 0]) >>> p = Permutation([2, 3, 1, 0]); p.rank() 17 >>> p = p.next_lex(); p.rank() 18 See Also ======== rank, unrank_lex """ perm = self.array_form[:] n = len(perm) i = n - 2 while perm[i + 1] < perm[i]: i -= 1 if i == -1: return None else: j = n - 1 while perm[j] < perm[i]: j -= 1 perm[j], perm[i] = perm[i], perm[j] i += 1 j = n - 1 while i < j: perm[j], perm[i] = perm[i], perm[j] i += 1 j -= 1 return self._af_new(perm) @classmethod def unrank_nonlex(self, n, r): """ This is a linear time unranking algorithm that does not respect lexicographic order [3]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation.unrank_nonlex(4, 5) Permutation([2, 0, 3, 1]) >>> Permutation.unrank_nonlex(4, -1) Permutation([0, 1, 2, 3]) See Also ======== next_nonlex, rank_nonlex """ def _unrank1(n, r, a): if n > 0: a[n - 1], a[r % n] = a[r % n], a[n - 1] _unrank1(n - 1, r//n, a) id_perm = list(range(n)) n = int(n) r = r % ifac(n) _unrank1(n, r, id_perm) return self._af_new(id_perm) def rank_nonlex(self, inv_perm=None): """ This is a linear time ranking algorithm that does not enforce lexicographic order [3]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.rank_nonlex() 23 See Also ======== next_nonlex, unrank_nonlex """ def _rank1(n, perm, inv_perm): if n == 1: return 0 s = perm[n - 1] t = inv_perm[n - 1] perm[n - 1], perm[t] = perm[t], s inv_perm[n - 1], inv_perm[s] = inv_perm[s], t return s + n*_rank1(n - 1, perm, inv_perm) if inv_perm is None: inv_perm = (~self).array_form if not inv_perm: return 0 perm = self.array_form[:] r = _rank1(len(perm), perm, inv_perm) return r def next_nonlex(self): """ Returns the next permutation in nonlex order [3]. If self is the last permutation in this order it returns None. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([2, 0, 3, 1]); p.rank_nonlex() 5 >>> p = p.next_nonlex(); p Permutation([3, 0, 1, 2]) >>> p.rank_nonlex() 6 See Also ======== rank_nonlex, unrank_nonlex """ r = self.rank_nonlex() if r == ifac(self.size) - 1: return None return self.unrank_nonlex(self.size, r + 1) def rank(self): """ Returns the lexicographic rank of the permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.rank() 0 >>> p = Permutation([3, 2, 1, 0]) >>> p.rank() 23 See Also ======== next_lex, unrank_lex, cardinality, length, order, size """ if not self._rank is None: return self._rank rank = 0 rho = self.array_form[:] n = self.size - 1 size = n + 1 psize = int(ifac(n)) for j in range(size - 1): rank += rho[j]*psize for i in range(j + 1, size): if rho[i] > rho[j]: rho[i] -= 1 psize //= n n -= 1 self._rank = rank return rank @property def cardinality(self): """ Returns the number of all possible permutations. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.cardinality 24 See Also ======== length, order, rank, size """ return int(ifac(self.size)) def parity(self): """ Computes the parity of a permutation. The parity of a permutation reflects the parity of the number of inversions in the permutation, i.e., the number of pairs of x and y such that ``x > y`` but ``p[x] < p[y]``. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.parity() 0 >>> p = Permutation([3, 2, 0, 1]) >>> p.parity() 1 See Also ======== _af_parity """ if self._cyclic_form is not None: return (self.size - self.cycles) % 2 return _af_parity(self.array_form) @property def is_even(self): """ Checks if a permutation is even. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.is_even True >>> p = Permutation([3, 2, 1, 0]) >>> p.is_even True See Also ======== is_odd """ return not self.is_odd @property def is_odd(self): """ Checks if a permutation is odd. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.is_odd False >>> p = Permutation([3, 2, 0, 1]) >>> p.is_odd True See Also ======== is_even """ return bool(self.parity() % 2) @property def is_Singleton(self): """ Checks to see if the permutation contains only one number and is thus the only possible permutation of this set of numbers Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0]).is_Singleton True >>> Permutation([0, 1]).is_Singleton False See Also ======== is_Empty """ return self.size == 1 @property def is_Empty(self): """ Checks to see if the permutation is a set with zero elements Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([]).is_Empty True >>> Permutation([0]).is_Empty False See Also ======== is_Singleton """ return self.size == 0 @property def is_identity(self): return self.is_Identity @property def is_Identity(self): """ Returns True if the Permutation is an identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([]) >>> p.is_Identity True >>> p = Permutation([[0], [1], [2]]) >>> p.is_Identity True >>> p = Permutation([0, 1, 2]) >>> p.is_Identity True >>> p = Permutation([0, 2, 1]) >>> p.is_Identity False See Also ======== order """ af = self.array_form return not af or all(i == af[i] for i in range(self.size)) def ascents(self): """ Returns the positions of ascents in a permutation, ie, the location where p[i] < p[i+1] Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([4, 0, 1, 3, 2]) >>> p.ascents() [1, 2] See Also ======== descents, inversions, min, max """ a = self.array_form pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]] return pos def descents(self): """ Returns the positions of descents in a permutation, ie, the location where p[i] > p[i+1] Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([4, 0, 1, 3, 2]) >>> p.descents() [0, 3] See Also ======== ascents, inversions, min, max """ a = self.array_form pos = [i for i in range(len(a) - 1) if a[i] > a[i + 1]] return pos def max(self): """ The maximum element moved by the permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([1, 0, 2, 3, 4]) >>> p.max() 1 See Also ======== min, descents, ascents, inversions """ max = 0 a = self.array_form for i in range(len(a)): if a[i] != i and a[i] > max: max = a[i] return max def min(self): """ The minimum element moved by the permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 4, 3, 2]) >>> p.min() 2 See Also ======== max, descents, ascents, inversions """ a = self.array_form min = len(a) for i in range(len(a)): if a[i] != i and a[i] < min: min = a[i] return min def inversions(self): """ Computes the number of inversions of a permutation. An inversion is where i > j but p[i] < p[j]. For small length of p, it iterates over all i and j values and calculates the number of inversions. For large length of p, it uses a variation of merge sort to calculate the number of inversions. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3, 4, 5]) >>> p.inversions() 0 >>> Permutation([3, 2, 1, 0]).inversions() 6 See Also ======== descents, ascents, min, max References ========== .. [1] http://www.cp.eng.chula.ac.th/~piak/teaching/algo/algo2008/count-inv.htm """ inversions = 0 a = self.array_form n = len(a) if n < 130: for i in range(n - 1): b = a[i] for c in a[i + 1:]: if b > c: inversions += 1 else: k = 1 right = 0 arr = a[:] temp = a[:] while k < n: i = 0 while i + k < n: right = i + k * 2 - 1 if right >= n: right = n - 1 inversions += _merge(arr, temp, i, i + k, right) i = i + k * 2 k = k * 2 return inversions def commutator(self, x): """Return the commutator of self and x: ``~x*~self*x*self`` If f and g are part of a group, G, then the commutator of f and g is the group identity iff f and g commute, i.e. fg == gf. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([0, 2, 3, 1]) >>> x = Permutation([2, 0, 3, 1]) >>> c = p.commutator(x); c Permutation([2, 1, 3, 0]) >>> c == ~x*~p*x*p True >>> I = Permutation(3) >>> p = [I + i for i in range(6)] >>> for i in range(len(p)): ... for j in range(len(p)): ... c = p[i].commutator(p[j]) ... if p[i]*p[j] == p[j]*p[i]: ... assert c == I ... else: ... assert c != I ... References ========== https://en.wikipedia.org/wiki/Commutator """ a = self.array_form b = x.array_form n = len(a) if len(b) != n: raise ValueError("The permutations must be of equal size.") inva = [None]*n for i in range(n): inva[a[i]] = i invb = [None]*n for i in range(n): invb[b[i]] = i return self._af_new([a[b[inva[i]]] for i in invb]) def signature(self): """ Gives the signature of the permutation needed to place the elements of the permutation in canonical order. The signature is calculated as (-1)^<number of inversions> Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2]) >>> p.inversions() 0 >>> p.signature() 1 >>> q = Permutation([0,2,1]) >>> q.inversions() 1 >>> q.signature() -1 See Also ======== inversions """ if self.is_even: return 1 return -1 def order(self): """ Computes the order of a permutation. When the permutation is raised to the power of its order it equals the identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([3, 1, 5, 2, 4, 0]) >>> p.order() 4 >>> (p**(p.order())) Permutation([], size=6) See Also ======== identity, cardinality, length, rank, size """ return reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1) def length(self): """ Returns the number of integers moved by a permutation. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0, 3, 2, 1]).length() 2 >>> Permutation([[0, 1], [2, 3]]).length() 4 See Also ======== min, max, support, cardinality, order, rank, size """ return len(self.support()) @property def cycle_structure(self): """Return the cycle structure of the permutation as a dictionary indicating the multiplicity of each cycle length. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation(3).cycle_structure {1: 4} >>> Permutation(0, 4, 3)(1, 2)(5, 6).cycle_structure {2: 2, 3: 1} """ if self._cycle_structure: rv = self._cycle_structure else: rv = defaultdict(int) singletons = self.size for c in self.cyclic_form: rv[len(c)] += 1 singletons -= len(c) if singletons: rv[1] = singletons self._cycle_structure = rv return dict(rv) # make a copy @property def cycles(self): """ Returns the number of cycles contained in the permutation (including singletons). Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0, 1, 2]).cycles 3 >>> Permutation([0, 1, 2]).full_cyclic_form [[0], [1], [2]] >>> Permutation(0, 1)(2, 3).cycles 2 See Also ======== sympy.functions.combinatorial.numbers.stirling """ return len(self.full_cyclic_form) def index(self): """ Returns the index of a permutation. The index of a permutation is the sum of all subscripts j such that p[j] is greater than p[j+1]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([3, 0, 2, 1, 4]) >>> p.index() 2 """ a = self.array_form return sum([j for j in range(len(a) - 1) if a[j] > a[j + 1]]) def runs(self): """ Returns the runs of a permutation. An ascending sequence in a permutation is called a run [5]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([2, 5, 7, 3, 6, 0, 1, 4, 8]) >>> p.runs() [[2, 5, 7], [3, 6], [0, 1, 4, 8]] >>> q = Permutation([1,3,2,0]) >>> q.runs() [[1, 3], [2], [0]] """ return runs(self.array_form) def inversion_vector(self): """Return the inversion vector of the permutation. The inversion vector consists of elements whose value indicates the number of elements in the permutation that are lesser than it and lie on its right hand side. The inversion vector is the same as the Lehmer encoding of a permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([4, 8, 0, 7, 1, 5, 3, 6, 2]) >>> p.inversion_vector() [4, 7, 0, 5, 0, 2, 1, 1] >>> p = Permutation([3, 2, 1, 0]) >>> p.inversion_vector() [3, 2, 1] The inversion vector increases lexicographically with the rank of the permutation, the -ith element cycling through 0..i. >>> p = Permutation(2) >>> while p: ... print('%s %s %s' % (p, p.inversion_vector(), p.rank())) ... p = p.next_lex() (2) [0, 0] 0 (1 2) [0, 1] 1 (2)(0 1) [1, 0] 2 (0 1 2) [1, 1] 3 (0 2 1) [2, 0] 4 (0 2) [2, 1] 5 See Also ======== from_inversion_vector """ self_array_form = self.array_form n = len(self_array_form) inversion_vector = [0] * (n - 1) for i in range(n - 1): val = 0 for j in range(i + 1, n): if self_array_form[j] < self_array_form[i]: val += 1 inversion_vector[i] = val return inversion_vector def rank_trotterjohnson(self): """ Returns the Trotter Johnson rank, which we get from the minimal change algorithm. See [4] section 2.4. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.rank_trotterjohnson() 0 >>> p = Permutation([0, 2, 1, 3]) >>> p.rank_trotterjohnson() 7 See Also ======== unrank_trotterjohnson, next_trotterjohnson """ if self.array_form == [] or self.is_Identity: return 0 if self.array_form == [1, 0]: return 1 perm = self.array_form n = self.size rank = 0 for j in range(1, n): k = 1 i = 0 while perm[i] != j: if perm[i] < j: k += 1 i += 1 j1 = j + 1 if rank % 2 == 0: rank = j1*rank + j1 - k else: rank = j1*rank + k - 1 return rank @classmethod def unrank_trotterjohnson(cls, size, rank): """ Trotter Johnson permutation unranking. See [4] section 2.4. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation.unrank_trotterjohnson(5, 10) Permutation([0, 3, 1, 2, 4]) See Also ======== rank_trotterjohnson, next_trotterjohnson """ perm = [0]*size r2 = 0 n = ifac(size) pj = 1 for j in range(2, size + 1): pj *= j r1 = (rank * pj) // n k = r1 - j*r2 if r2 % 2 == 0: for i in range(j - 1, j - k - 1, -1): perm[i] = perm[i - 1] perm[j - k - 1] = j - 1 else: for i in range(j - 1, k, -1): perm[i] = perm[i - 1] perm[k] = j - 1 r2 = r1 return cls._af_new(perm) def next_trotterjohnson(self): """ Returns the next permutation in Trotter-Johnson order. If self is the last permutation it returns None. See [4] section 2.4. If it is desired to generate all such permutations, they can be generated in order more quickly with the ``generate_bell`` function. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([3, 0, 2, 1]) >>> p.rank_trotterjohnson() 4 >>> p = p.next_trotterjohnson(); p Permutation([0, 3, 2, 1]) >>> p.rank_trotterjohnson() 5 See Also ======== rank_trotterjohnson, unrank_trotterjohnson, sympy.utilities.iterables.generate_bell """ pi = self.array_form[:] n = len(pi) st = 0 rho = pi[:] done = False m = n-1 while m > 0 and not done: d = rho.index(m) for i in range(d, m): rho[i] = rho[i + 1] par = _af_parity(rho[:m]) if par == 1: if d == m: m -= 1 else: pi[st + d], pi[st + d + 1] = pi[st + d + 1], pi[st + d] done = True else: if d == 0: m -= 1 st += 1 else: pi[st + d], pi[st + d - 1] = pi[st + d - 1], pi[st + d] done = True if m == 0: return None return self._af_new(pi) def get_precedence_matrix(self): """ Gets the precedence matrix. This is used for computing the distance between two permutations. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation.josephus(3, 6, 1) >>> p Permutation([2, 5, 3, 1, 4, 0]) >>> p.get_precedence_matrix() Matrix([ [0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 0, 0, 1, 0], [1, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0]]) See Also ======== get_precedence_distance, get_adjacency_matrix, get_adjacency_distance """ m = zeros(self.size) perm = self.array_form for i in range(m.rows): for j in range(i + 1, m.cols): m[perm[i], perm[j]] = 1 return m def get_precedence_distance(self, other): """ Computes the precedence distance between two permutations. Suppose p and p' represent n jobs. The precedence metric counts the number of times a job j is preceded by job i in both p and p'. This metric is commutative. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([2, 0, 4, 3, 1]) >>> q = Permutation([3, 1, 2, 4, 0]) >>> p.get_precedence_distance(q) 7 >>> q.get_precedence_distance(p) 7 See Also ======== get_precedence_matrix, get_adjacency_matrix, get_adjacency_distance """ if self.size != other.size: raise ValueError("The permutations must be of equal size.") self_prec_mat = self.get_precedence_matrix() other_prec_mat = other.get_precedence_matrix() n_prec = 0 for i in range(self.size): for j in range(self.size): if i == j: continue if self_prec_mat[i, j] * other_prec_mat[i, j] == 1: n_prec += 1 d = self.size * (self.size - 1)//2 - n_prec return d def get_adjacency_matrix(self): """ Computes the adjacency matrix of a permutation. If job i is adjacent to job j in a permutation p then we set m[i, j] = 1 where m is the adjacency matrix of p. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation.josephus(3, 6, 1) >>> p.get_adjacency_matrix() Matrix([ [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]) >>> q = Permutation([0, 1, 2, 3]) >>> q.get_adjacency_matrix() Matrix([ [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]) See Also ======== get_precedence_matrix, get_precedence_distance, get_adjacency_distance """ m = zeros(self.size) perm = self.array_form for i in range(self.size - 1): m[perm[i], perm[i + 1]] = 1 return m def get_adjacency_distance(self, other): """ Computes the adjacency distance between two permutations. This metric counts the number of times a pair i,j of jobs is adjacent in both p and p'. If n_adj is this quantity then the adjacency distance is n - n_adj - 1 [1] [1] Reeves, Colin R. Landscapes, Operators and Heuristic search, Annals of Operational Research, 86, pp 473-490. (1999) Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 3, 1, 2, 4]) >>> q = Permutation.josephus(4, 5, 2) >>> p.get_adjacency_distance(q) 3 >>> r = Permutation([0, 2, 1, 4, 3]) >>> p.get_adjacency_distance(r) 4 See Also ======== get_precedence_matrix, get_precedence_distance, get_adjacency_matrix """ if self.size != other.size: raise ValueError("The permutations must be of the same size.") self_adj_mat = self.get_adjacency_matrix() other_adj_mat = other.get_adjacency_matrix() n_adj = 0 for i in range(self.size): for j in range(self.size): if i == j: continue if self_adj_mat[i, j] * other_adj_mat[i, j] == 1: n_adj += 1 d = self.size - n_adj - 1 return d def get_positional_distance(self, other): """ Computes the positional distance between two permutations. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 3, 1, 2, 4]) >>> q = Permutation.josephus(4, 5, 2) >>> r = Permutation([3, 1, 4, 0, 2]) >>> p.get_positional_distance(q) 12 >>> p.get_positional_distance(r) 12 See Also ======== get_precedence_distance, get_adjacency_distance """ a = self.array_form b = other.array_form if len(a) != len(b): raise ValueError("The permutations must be of the same size.") return sum([abs(a[i] - b[i]) for i in range(len(a))]) @classmethod def josephus(cls, m, n, s=1): """Return as a permutation the shuffling of range(n) using the Josephus scheme in which every m-th item is selected until all have been chosen. The returned permutation has elements listed by the order in which they were selected. The parameter ``s`` stops the selection process when there are ``s`` items remaining and these are selected by continuing the selection, counting by 1 rather than by ``m``. Consider selecting every 3rd item from 6 until only 2 remain:: choices chosen ======== ====== 012345 01 345 2 01 34 25 01 4 253 0 4 2531 0 25314 253140 Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation.josephus(3, 6, 2).array_form [2, 5, 3, 1, 4, 0] References ========== .. [1] https://en.wikipedia.org/wiki/Flavius_Josephus .. [2] https://en.wikipedia.org/wiki/Josephus_problem .. [3] http://www.wou.edu/~burtonl/josephus.html """ from collections import deque m -= 1 Q = deque(list(range(n))) perm = [] while len(Q) > max(s, 1): for dp in range(m): Q.append(Q.popleft()) perm.append(Q.popleft()) perm.extend(list(Q)) return cls(perm) @classmethod def from_inversion_vector(cls, inversion): """ Calculates the permutation from the inversion vector. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation.from_inversion_vector([3, 2, 1, 0, 0]) Permutation([3, 2, 1, 0, 4, 5]) """ size = len(inversion) N = list(range(size + 1)) perm = [] try: for k in range(size): val = N[inversion[k]] perm.append(val) N.remove(val) except IndexError: raise ValueError("The inversion vector is not valid.") perm.extend(N) return cls._af_new(perm) @classmethod def random(cls, n): """ Generates a random permutation of length ``n``. Uses the underlying Python pseudo-random number generator. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1])) True """ perm_array = list(range(n)) random.shuffle(perm_array) return cls._af_new(perm_array) @classmethod def unrank_lex(cls, size, rank): """ Lexicographic permutation unranking. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> a = Permutation.unrank_lex(5, 10) >>> a.rank() 10 >>> a Permutation([0, 2, 4, 1, 3]) See Also ======== rank, next_lex """ perm_array = [0] * size psize = 1 for i in range(size): new_psize = psize*(i + 1) d = (rank % new_psize) // psize rank -= d*psize perm_array[size - i - 1] = d for j in range(size - i, size): if perm_array[j] > d - 1: perm_array[j] += 1 psize = new_psize return cls._af_new(perm_array) def resize(self, n): """Resize the permutation to the new size ``n``. Parameters ========== n : int The new size of the permutation. Raises ====== ValueError If the permutation cannot be resized to the given size. This may only happen when resized to a smaller size than the original. Examples ======== >>> from sympy.combinatorics.permutations import Permutation Increasing the size of a permutation: >>> p = Permutation(0, 1, 2) >>> p = p.resize(5) >>> p (4)(0 1 2) Decreasing the size of the permutation: >>> p = p.resize(4) >>> p (3)(0 1 2) If resizing to the specific size breaks the cycles: >>> p.resize(2) Traceback (most recent call last): ... ValueError: The permutation can not be resized to 2 because the cycle (0, 1, 2) may break. """ aform = self.array_form l = len(aform) if n > l: aform += list(range(l, n)) return Permutation._af_new(aform) elif n < l: cyclic_form = self.full_cyclic_form new_cyclic_form = [] for cycle in cyclic_form: cycle_min = min(cycle) cycle_max = max(cycle) if cycle_min <= n-1: if cycle_max > n-1: raise ValueError( "The permutation can not be resized to {} " "because the cycle {} may break." .format(n, tuple(cycle))) new_cyclic_form.append(cycle) return Permutation(new_cyclic_form) return self # XXX Deprecated flag print_cyclic = None def _merge(arr, temp, left, mid, right): """ Merges two sorted arrays and calculates the inversion count. Helper function for calculating inversions. This method is for internal use only. """ i = k = left j = mid inv_count = 0 while i < mid and j <= right: if arr[i] < arr[j]: temp[k] = arr[i] k += 1 i += 1 else: temp[k] = arr[j] k += 1 j += 1 inv_count += (mid -i) while i < mid: temp[k] = arr[i] k += 1 i += 1 if j <= right: k += right - j + 1 j += right - j + 1 arr[left:k + 1] = temp[left:k + 1] else: arr[left:right + 1] = temp[left:right + 1] return inv_count Perm = Permutation _af_new = Perm._af_new class AppliedPermutation(Expr): """A permutation applied to a symbolic variable. Parameters ========== perm : Permutation x : Expr Examples ======== >>> from sympy import Symbol >>> from sympy.combinatorics import Permutation Creating a symbolic permutation function application: >>> x = Symbol('x') >>> p = Permutation(0, 1, 2) >>> p.apply(x) AppliedPermutation((0 1 2), x) >>> _.subs(x, 1) 2 """ def __new__(cls, perm, x, evaluate=None): if evaluate is None: evaluate = global_parameters.evaluate perm = _sympify(perm) x = _sympify(x) if not isinstance(perm, Permutation): raise ValueError("{} must be a Permutation instance." .format(perm)) if evaluate: if x.is_Integer: return perm.apply(x) obj = super(AppliedPermutation, cls).__new__(cls, perm, x) return obj
abaf3baf641818f409f75d1ba7a9bb992afed1bc7c6fea9ab4a0f24d6243e903
""" This module contain solvers for all kinds of equations: - algebraic or transcendental, use solve() - recurrence, use rsolve() - differential, use dsolve() - nonlinear (numerically), use nsolve() (you will need a good starting point) """ from __future__ import print_function, division from sympy import divisors from sympy.core.compatibility import (iterable, is_sequence, ordered, default_sort_key, range) from sympy.core.sympify import sympify from sympy.core import (S, Add, Symbol, Equality, Dummy, Expr, Mul, Pow, Unequality) from sympy.core.exprtools import factor_terms from sympy.core.function import (expand_mul, expand_log, Derivative, AppliedUndef, UndefinedFunction, nfloat, Function, expand_power_exp, _mexpand, expand) from sympy.integrals.integrals import Integral from sympy.core.numbers import ilcm, Float, Rational from sympy.core.relational import Relational from sympy.core.logic import fuzzy_not, fuzzy_and from sympy.core.power import integer_log from sympy.logic.boolalg import And, Or, BooleanAtom from sympy.core.basic import preorder_traversal from sympy.functions import (log, exp, LambertW, cos, sin, tan, acos, asin, atan, Abs, re, im, arg, sqrt, atan2) from sympy.functions.elementary.trigonometric import (TrigonometricFunction, HyperbolicFunction) from sympy.simplify import (simplify, collect, powsimp, posify, powdenest, nsimplify, denom, logcombine, sqrtdenest, fraction, separatevars) from sympy.simplify.sqrtdenest import sqrt_depth from sympy.simplify.fu import TR1 from sympy.matrices import Matrix, zeros from sympy.polys import roots, cancel, factor, Poly, degree from sympy.polys.polyerrors import GeneratorsNeeded, PolynomialError from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise from sympy.utilities.lambdify import lambdify from sympy.utilities.misc import filldedent from sympy.utilities.iterables import uniq, generate_bell, flatten from sympy.utilities.decorator import conserve_mpmath_dps from mpmath import findroot from sympy.solvers.polysys import solve_poly_system from sympy.solvers.inequalities import reduce_inequalities from types import GeneratorType from collections import defaultdict import itertools import warnings def recast_to_symbols(eqs, symbols): """ Return (e, s, d) where e and s are versions of *eqs* and *symbols* in which any non-Symbol objects in *symbols* have been replaced with generic Dummy symbols and d is a dictionary that can be used to restore the original expressions. Examples ======== >>> from sympy.solvers.solvers import recast_to_symbols >>> from sympy import symbols, Function >>> x, y = symbols('x y') >>> fx = Function('f')(x) >>> eqs, syms = [fx + 1, x, y], [fx, y] >>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d) ([_X0 + 1, x, y], [_X0, y], {_X0: f(x)}) The original equations and symbols can be restored using d: >>> assert [i.xreplace(d) for i in eqs] == eqs >>> assert [d.get(i, i) for i in s] == syms """ if not iterable(eqs) and iterable(symbols): raise ValueError('Both eqs and symbols must be iterable') new_symbols = list(symbols) swap_sym = {} for i, s in enumerate(symbols): if not isinstance(s, Symbol) and s not in swap_sym: swap_sym[s] = Dummy('X%d' % i) new_symbols[i] = swap_sym[s] new_f = [] for i in eqs: isubs = getattr(i, 'subs', None) if isubs is not None: new_f.append(isubs(swap_sym)) else: new_f.append(i) swap_sym = {v: k for k, v in swap_sym.items()} return new_f, new_symbols, swap_sym def _ispow(e): """Return True if e is a Pow or is exp.""" return isinstance(e, Expr) and (e.is_Pow or isinstance(e, exp)) def _simple_dens(f, symbols): # when checking if a denominator is zero, we can just check the # base of powers with nonzero exponents since if the base is zero # the power will be zero, too. To keep it simple and fast, we # limit simplification to exponents that are Numbers dens = set() for d in denoms(f, symbols): if d.is_Pow and d.exp.is_Number: if d.exp.is_zero: continue # foo**0 is never 0 d = d.base dens.add(d) return dens def denoms(eq, *symbols): """ Return (recursively) set of all denominators that appear in *eq* that contain any symbol in *symbols*; if *symbols* are not provided then all denominators will be returned. Examples ======== >>> from sympy.solvers.solvers import denoms >>> from sympy.abc import x, y, z >>> from sympy import sqrt >>> denoms(x/y) {y} >>> denoms(x/(y*z)) {y, z} >>> denoms(3/x + y/z) {x, z} >>> denoms(x/2 + y/z) {2, z} If *symbols* are provided then only denominators containing those symbols will be returned: >>> denoms(1/x + 1/y + 1/z, y, z) {y, z} """ pot = preorder_traversal(eq) dens = set() for p in pot: # lhs and rhs will be traversed after anyway if isinstance(p, Relational): continue den = denom(p) if den is S.One: continue for d in Mul.make_args(den): dens.add(d) if not symbols: return dens elif len(symbols) == 1: if iterable(symbols[0]): symbols = symbols[0] rv = [] for d in dens: free = d.free_symbols if any(s in free for s in symbols): rv.append(d) return set(rv) def checksol(f, symbol, sol=None, **flags): """ Checks whether sol is a solution of equation f == 0. Explanation =========== Input can be either a single symbol and corresponding value or a dictionary of symbols and values. When given as a dictionary and flag ``simplify=True``, the values in the dictionary will be simplified. *f* can be a single equation or an iterable of equations. A solution must satisfy all equations in *f* to be considered valid; if a solution does not satisfy any equation, False is returned; if one or more checks are inconclusive (and none are False) then None is returned. Examples ======== >>> from sympy import symbols >>> from sympy.solvers import checksol >>> x, y = symbols('x,y') >>> checksol(x**4 - 1, x, 1) True >>> checksol(x**4 - 1, x, 0) False >>> checksol(x**2 + y**2 - 5**2, {x: 3, y: 4}) True To check if an expression is zero using ``checksol()``, pass it as *f* and send an empty dictionary for *symbol*: >>> checksol(x**2 + x - x*(x + 1), {}) True None is returned if ``checksol()`` could not conclude. flags: 'numerical=True (default)' do a fast numerical check if ``f`` has only one symbol. 'minimal=True (default is False)' a very fast, minimal testing. 'warn=True (default is False)' show a warning if checksol() could not conclude. 'simplify=True (default)' simplify solution before substituting into function and simplify the function before trying specific simplifications 'force=True (default is False)' make positive all symbols without assumptions regarding sign. """ from sympy.physics.units import Unit minimal = flags.get('minimal', False) if sol is not None: sol = {symbol: sol} elif isinstance(symbol, dict): sol = symbol else: msg = 'Expecting (sym, val) or ({sym: val}, None) but got (%s, %s)' raise ValueError(msg % (symbol, sol)) if iterable(f): if not f: raise ValueError('no functions to check') rv = True for fi in f: check = checksol(fi, sol, **flags) if check: continue if check is False: return False rv = None # don't return, wait to see if there's a False return rv if isinstance(f, Poly): f = f.as_expr() elif isinstance(f, (Equality, Unequality)): if f.rhs in (S.true, S.false): f = f.reversed B, E = f.args if B in (S.true, S.false): f = f.subs(sol) if f not in (S.true, S.false): return else: f = f.rewrite(Add, evaluate=False) if isinstance(f, BooleanAtom): return bool(f) elif not f.is_Relational and not f: return True if sol and not f.free_symbols & set(sol.keys()): # if f(y) == 0, x=3 does not set f(y) to zero...nor does it not return None illegal = set([S.NaN, S.ComplexInfinity, S.Infinity, S.NegativeInfinity]) if any(sympify(v).atoms() & illegal for k, v in sol.items()): return False was = f attempt = -1 numerical = flags.get('numerical', True) while 1: attempt += 1 if attempt == 0: val = f.subs(sol) if isinstance(val, Mul): val = val.as_independent(Unit)[0] if val.atoms() & illegal: return False elif attempt == 1: if not val.is_number: if not val.is_constant(*list(sol.keys()), simplify=not minimal): return False # there are free symbols -- simple expansion might work _, val = val.as_content_primitive() val = _mexpand(val.as_numer_denom()[0], recursive=True) elif attempt == 2: if minimal: return if flags.get('simplify', True): for k in sol: sol[k] = simplify(sol[k]) # start over without the failed expanded form, possibly # with a simplified solution val = simplify(f.subs(sol)) if flags.get('force', True): val, reps = posify(val) # expansion may work now, so try again and check exval = _mexpand(val, recursive=True) if exval.is_number: # we can decide now val = exval else: # if there are no radicals and no functions then this can't be # zero anymore -- can it? pot = preorder_traversal(expand_mul(val)) seen = set() saw_pow_func = False for p in pot: if p in seen: continue seen.add(p) if p.is_Pow and not p.exp.is_Integer: saw_pow_func = True elif p.is_Function: saw_pow_func = True elif isinstance(p, UndefinedFunction): saw_pow_func = True if saw_pow_func: break if saw_pow_func is False: return False if flags.get('force', True): # don't do a zero check with the positive assumptions in place val = val.subs(reps) nz = fuzzy_not(val.is_zero) if nz is not None: # issue 5673: nz may be True even when False # so these are just hacks to keep a false positive # from being returned # HACK 1: LambertW (issue 5673) if val.is_number and val.has(LambertW): # don't eval this to verify solution since if we got here, # numerical must be False return None # add other HACKs here if necessary, otherwise we assume # the nz value is correct return not nz break if val == was: continue elif val.is_Rational: return val == 0 if numerical and val.is_number: if val in (S.true, S.false): return bool(val) return (abs(val.n(18).n(12, chop=True)) < 1e-9) is S.true was = val if flags.get('warn', False): warnings.warn("\n\tWarning: could not verify solution %s." % sol) # returns None if it can't conclude # TODO: improve solution testing def failing_assumptions(expr, **assumptions): """ Return a dictionary containing assumptions with values not matching those of the passed assumptions. Examples ======== >>> from sympy import failing_assumptions, Symbol >>> x = Symbol('x', real=True, positive=True) >>> y = Symbol('y') >>> failing_assumptions(6*x + y, real=True, positive=True) {'positive': None, 'real': None} >>> failing_assumptions(x**2 - 1, positive=True) {'positive': None} If *expr* satisfies all of the assumptions, an empty dictionary is returned. >>> failing_assumptions(x**2, positive=True) {} """ expr = sympify(expr) failed = {} for key in list(assumptions.keys()): test = getattr(expr, 'is_%s' % key, None) if test is not assumptions[key]: failed[key] = test return failed # {} or {assumption: value != desired} def check_assumptions(expr, against=None, **assumptions): """ Checks whether expression *expr* satisfies all assumptions. Explanation =========== *assumptions* is a dict of assumptions: {'assumption': True|False, ...}. Examples ======== >>> from sympy import Symbol, pi, I, exp, check_assumptions >>> check_assumptions(-5, integer=True) True >>> check_assumptions(pi, real=True, integer=False) True >>> check_assumptions(pi, real=True, negative=True) False >>> check_assumptions(exp(I*pi/7), real=False) True >>> x = Symbol('x', real=True, positive=True) >>> check_assumptions(2*x + 1, real=True, positive=True) True >>> check_assumptions(-2*x - 5, real=True, positive=True) False To check assumptions of *expr* against another variable or expression, pass the expression or variable as ``against``. >>> check_assumptions(2*x + 1, x) True ``None`` is returned if ``check_assumptions()`` could not conclude. >>> check_assumptions(2*x - 1, real=True, positive=True) >>> z = Symbol('z') >>> check_assumptions(z, real=True) See Also ======== failing_assumptions """ expr = sympify(expr) if against: if not isinstance(against, Symbol): raise TypeError('against should be of type Symbol') if assumptions: raise AssertionError('No assumptions should be specified') assumptions = against.assumptions0 def _test(key): v = getattr(expr, 'is_' + key, None) if v is not None: return assumptions[key] is v return fuzzy_and(_test(key) for key in assumptions) def solve(f, *symbols, **flags): r""" Algebraically solves equations and systems of equations. Explanation =========== Currently supported: - polynomial - transcendental - piecewise combinations of the above - systems of linear and polynomial equations - systems containing relational expressions Examples ======== The output varies according to the input and can be seen by example: >>> from sympy import solve, Poly, Eq, Function, exp >>> from sympy.abc import x, y, z, a, b >>> f = Function('f') Boolean or univariate Relational: >>> solve(x < 3) (-oo < x) & (x < 3) To always get a list of solution mappings, use flag dict=True: >>> solve(x - 3, dict=True) [{x: 3}] >>> sol = solve([x - 3, y - 1], dict=True) >>> sol [{x: 3, y: 1}] >>> sol[0][x] 3 >>> sol[0][y] 1 To get a list of *symbols* and set of solution(s) use flag set=True: >>> solve([x**2 - 3, y - 1], set=True) ([x, y], {(-sqrt(3), 1), (sqrt(3), 1)}) Single expression and single symbol that is in the expression: >>> solve(x - y, x) [y] >>> solve(x - 3, x) [3] >>> solve(Eq(x, 3), x) [3] >>> solve(Poly(x - 3), x) [3] >>> solve(x**2 - y**2, x, set=True) ([x], {(-y,), (y,)}) >>> solve(x**4 - 1, x, set=True) ([x], {(-1,), (1,), (-I,), (I,)}) Single expression with no symbol that is in the expression: >>> solve(3, x) [] >>> solve(x - 3, y) [] Single expression with no symbol given. In this case, all free *symbols* will be selected as potential *symbols* to solve for. If the equation is univariate then a list of solutions is returned; otherwise - as is the case when *symbols* are given as an iterable of length greater than 1 - a list of mappings will be returned: >>> solve(x - 3) [3] >>> solve(x**2 - y**2) [{x: -y}, {x: y}] >>> solve(z**2*x**2 - z**2*y**2) [{x: -y}, {x: y}, {z: 0}] >>> solve(z**2*x - z**2*y**2) [{x: y**2}, {z: 0}] When an object other than a Symbol is given as a symbol, it is isolated algebraically and an implicit solution may be obtained. This is mostly provided as a convenience to save you from replacing the object with a Symbol and solving for that Symbol. It will only work if the specified object can be replaced with a Symbol using the subs method: >>> solve(f(x) - x, f(x)) [x] >>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x)) [x + f(x)] >>> solve(f(x).diff(x) - f(x) - x, f(x)) [-x + Derivative(f(x), x)] >>> solve(x + exp(x)**2, exp(x), set=True) ([exp(x)], {(-sqrt(-x),), (sqrt(-x),)}) >>> from sympy import Indexed, IndexedBase, Tuple, sqrt >>> A = IndexedBase('A') >>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1) >>> solve(eqs, eqs.atoms(Indexed)) {A[1]: 1, A[2]: 2} * To solve for a symbol implicitly, use implicit=True: >>> solve(x + exp(x), x) [-LambertW(1)] >>> solve(x + exp(x), x, implicit=True) [-exp(x)] * It is possible to solve for anything that can be targeted with subs: >>> solve(x + 2 + sqrt(3), x + 2) [-sqrt(3)] >>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2) {y: -2 + sqrt(3), x + 2: -sqrt(3)} * Nothing heroic is done in this implicit solving so you may end up with a symbol still in the solution: >>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y) >>> solve(eqs, y, x + 2) {y: -sqrt(3)/(x + 3), x + 2: (-2*x - 6 + sqrt(3))/(x + 3)} >>> solve(eqs, y*x, x) {x: -y - 4, x*y: -3*y - sqrt(3)} * If you attempt to solve for a number remember that the number you have obtained does not necessarily mean that the value is equivalent to the expression obtained: >>> solve(sqrt(2) - 1, 1) [sqrt(2)] >>> solve(x - y + 1, 1) # /!\ -1 is targeted, too [x/(y - 1)] >>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)] [-x + y] * To solve for a function within a derivative, use ``dsolve``. Single expression and more than one symbol: * When there is a linear solution: >>> solve(x - y**2, x, y) [(y**2, y)] >>> solve(x**2 - y, x, y) [(x, x**2)] >>> solve(x**2 - y, x, y, dict=True) [{y: x**2}] * When undetermined coefficients are identified: * That are linear: >>> solve((a + b)*x - b + 2, a, b) {a: -2, b: 2} * That are nonlinear: >>> solve((a + b)*x - b**2 + 2, a, b, set=True) ([a, b], {(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))}) * If there is no linear solution, then the first successful attempt for a nonlinear solution will be returned: >>> solve(x**2 - y**2, x, y, dict=True) [{x: -y}, {x: y}] >>> solve(x**2 - y**2/exp(x), x, y, dict=True) [{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}] >>> solve(x**2 - y**2/exp(x), y, x) [(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)] Iterable of one or more of the above: * Involving relationals or bools: >>> solve([x < 3, x - 2]) Eq(x, 2) >>> solve([x > 3, x - 2]) False * When the system is linear: * With a solution: >>> solve([x - 3], x) {x: 3} >>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y) {x: -3, y: 1} >>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z) {x: -3, y: 1} >>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y) {x: 2 - 5*y, z: 21*y - 6} * Without a solution: >>> solve([x + 3, x - 3]) [] * When the system is not linear: >>> solve([x**2 + y -2, y**2 - 4], x, y, set=True) ([x, y], {(-2, -2), (0, 2), (2, -2)}) * If no *symbols* are given, all free *symbols* will be selected and a list of mappings returned: >>> solve([x - 2, x**2 + y]) [{x: 2, y: -4}] >>> solve([x - 2, x**2 + f(x)], {f(x), x}) [{x: 2, f(x): -4}] * If any equation does not depend on the symbol(s) given, it will be eliminated from the equation set and an answer may be given implicitly in terms of variables that were not of interest: >>> solve([x - y, y - 3], x) {x: y} **Additional Examples** ``solve()`` with check=True (default) will run through the symbol tags to elimate unwanted solutions. If no assumptions are included, all possible solutions will be returned: >>> from sympy import Symbol, solve >>> x = Symbol("x") >>> solve(x**2 - 1) [-1, 1] By using the positive tag, only one solution will be returned: >>> pos = Symbol("pos", positive=True) >>> solve(pos**2 - 1) [1] Assumptions are not checked when ``solve()`` input involves relationals or bools. When the solutions are checked, those that make any denominator zero are automatically excluded. If you do not want to exclude such solutions, then use the check=False option: >>> from sympy import sin, limit >>> solve(sin(x)/x) # 0 is excluded [pi] If check=False, then a solution to the numerator being zero is found: x = 0. In this case, this is a spurious solution since $\sin(x)/x$ has the well known limit (without dicontinuity) of 1 at x = 0: >>> solve(sin(x)/x, check=False) [0, pi] In the following case, however, the limit exists and is equal to the value of x = 0 that is excluded when check=True: >>> eq = x**2*(1/x - z**2/x) >>> solve(eq, x) [] >>> solve(eq, x, check=False) [0] >>> limit(eq, x, 0, '-') 0 >>> limit(eq, x, 0, '+') 0 **Disabling High-Order Explicit Solutions** When solving polynomial expressions, you might not want explicit solutions (which can be quite long). If the expression is univariate, ``CRootOf`` instances will be returned instead: >>> solve(x**3 - x + 1) [-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) - (-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3, -(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 - 1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)), -(3*sqrt(69)/2 + 27/2)**(1/3)/3 - 1/(3*sqrt(69)/2 + 27/2)**(1/3)] >>> solve(x**3 - x + 1, cubics=False) [CRootOf(x**3 - x + 1, 0), CRootOf(x**3 - x + 1, 1), CRootOf(x**3 - x + 1, 2)] If the expression is multivariate, no solution might be returned: >>> solve(x**3 - x + a, x, cubics=False) [] Sometimes solutions will be obtained even when a flag is False because the expression could be factored. In the following example, the equation can be factored as the product of a linear and a quadratic factor so explicit solutions (which did not require solving a cubic expression) are obtained: >>> eq = x**3 + 3*x**2 + x - 1 >>> solve(eq, cubics=False) [-1, -1 + sqrt(2), -sqrt(2) - 1] **Solving Equations Involving Radicals** Because of SymPy's use of the principle root, some solutions to radical equations will be missed unless check=False: >>> from sympy import root >>> eq = root(x**3 - 3*x**2, 3) + 1 - x >>> solve(eq) [] >>> solve(eq, check=False) [1/3] In the above example, there is only a single solution to the equation. Other expressions will yield spurious roots which must be checked manually; roots which give a negative argument to odd-powered radicals will also need special checking: >>> from sympy import real_root, S >>> eq = root(x, 3) - root(x, 5) + S(1)/7 >>> solve(eq) # this gives 2 solutions but misses a 3rd [CRootOf(7*_p**5 - 7*_p**3 + 1, 1)**15, CRootOf(7*_p**5 - 7*_p**3 + 1, 2)**15] >>> sol = solve(eq, check=False) >>> [abs(eq.subs(x,i).n(2)) for i in sol] [0.48, 0.e-110, 0.e-110, 0.052, 0.052] The first solution is negative so ``real_root`` must be used to see that it satisfies the expression: >>> abs(real_root(eq.subs(x, sol[0])).n(2)) 0.e-110 If the roots of the equation are not real then more care will be necessary to find the roots, especially for higher order equations. Consider the following expression: >>> expr = root(x, 3) - root(x, 5) We will construct a known value for this expression at x = 3 by selecting the 1-th root for each radical: >>> expr1 = root(x, 3, 1) - root(x, 5, 1) >>> v = expr1.subs(x, -3) The ``solve`` function is unable to find any exact roots to this equation: >>> eq = Eq(expr, v); eq1 = Eq(expr1, v) >>> solve(eq, check=False), solve(eq1, check=False) ([], []) The function ``unrad``, however, can be used to get a form of the equation for which numerical roots can be found: >>> from sympy.solvers.solvers import unrad >>> from sympy import nroots >>> e, (p, cov) = unrad(eq) >>> pvals = nroots(e) >>> inversion = solve(cov, x)[0] >>> xvals = [inversion.subs(p, i) for i in pvals] Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the solution can only be verified with ``expr1``: >>> z = expr - v >>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9] [] >>> z1 = expr1 - v >>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9] [-3.0] Parameters ========== f : - a single Expr or Poly that must be zero - an Equality - a Relational expression - a Boolean - iterable of one or more of the above symbols : (object(s) to solve for) specified as - none given (other non-numeric objects will be used) - single symbol - denested list of symbols (e.g., ``solve(f, x, y)``) - ordered iterable of symbols (e.g., ``solve(f, [x, y])``) flags : dict=True (default is False) Return list (perhaps empty) of solution mappings. set=True (default is False) Return list of symbols and set of tuple(s) of solution(s). exclude=[] (default) Do not try to solve for any of the free symbols in exclude; if expressions are given, the free symbols in them will be extracted automatically. check=True (default) If False, do not do any testing of solutions. This can be useful if you want to include solutions that make any denominator zero. numerical=True (default) Do a fast numerical check if *f* has only one symbol. minimal=True (default is False) A very fast, minimal testing. warn=True (default is False) Show a warning if ``checksol()`` could not conclude. simplify=True (default) Simplify all but polynomials of order 3 or greater before returning them and (if check is not False) use the general simplify function on the solutions and the expression obtained when they are substituted into the function which should be zero. force=True (default is False) Make positive all symbols without assumptions regarding sign. rational=True (default) Recast Floats as Rational; if this option is not used, the system containing Floats may fail to solve because of issues with polys. If rational=None, Floats will be recast as rationals but the answer will be recast as Floats. If the flag is False then nothing will be done to the Floats. manual=True (default is False) Do not use the polys/matrix method to solve a system of equations, solve them one at a time as you might "manually." implicit=True (default is False) Allows ``solve`` to return a solution for a pattern in terms of other functions that contain that pattern; this is only needed if the pattern is inside of some invertible function like cos, exp, ect. particular=True (default is False) Instructs ``solve`` to try to find a particular solution to a linear system with as many zeros as possible; this is very expensive. quick=True (default is False) When using particular=True, use a fast heuristic to find a solution with many zeros (instead of using the very slow method guaranteed to find the largest number of zeros possible). cubics=True (default) Return explicit solutions when cubic expressions are encountered. quartics=True (default) Return explicit solutions when quartic expressions are encountered. quintics=True (default) Return explicit solutions (if possible) when quintic expressions are encountered. See Also ======== rsolve: For solving recurrence relationships dsolve: For solving differential equations """ # keeping track of how f was passed since if it is a list # a dictionary of results will be returned. ########################################################################### def _sympified_list(w): return list(map(sympify, w if iterable(w) else [w])) bare_f = not iterable(f) ordered_symbols = (symbols and symbols[0] and (isinstance(symbols[0], Symbol) or is_sequence(symbols[0], include=GeneratorType) ) ) f, symbols = (_sympified_list(w) for w in [f, symbols]) if isinstance(f, list): f = [s for s in f if s is not S.true and s is not True] implicit = flags.get('implicit', False) # preprocess symbol(s) ########################################################################### if not symbols: # get symbols from equations symbols = set().union(*[fi.free_symbols for fi in f]) if len(symbols) < len(f): for fi in f: pot = preorder_traversal(fi) for p in pot: if isinstance(p, AppliedUndef): flags['dict'] = True # better show symbols symbols.add(p) pot.skip() # don't go any deeper symbols = list(symbols) ordered_symbols = False elif len(symbols) == 1 and iterable(symbols[0]): symbols = symbols[0] # remove symbols the user is not interested in exclude = flags.pop('exclude', set()) if exclude: if isinstance(exclude, Expr): exclude = [exclude] exclude = set().union(*[e.free_symbols for e in sympify(exclude)]) symbols = [s for s in symbols if s not in exclude] # preprocess equation(s) ########################################################################### for i, fi in enumerate(f): if isinstance(fi, (Equality, Unequality)): if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]: fi = fi.lhs - fi.rhs else: args = fi.args if args[1] in (S.true, S.false): args = args[1], args[0] L, R = args if L in (S.false, S.true): if isinstance(fi, Unequality): L = ~L if R.is_Relational: fi = ~R if L is S.false else R elif R.is_Symbol: return L elif R.is_Boolean and (~R).is_Symbol: return ~L else: raise NotImplementedError(filldedent(''' Unanticipated argument of Eq when other arg is True or False. ''')) else: fi = fi.rewrite(Add, evaluate=False) f[i] = fi if fi.is_Relational: return reduce_inequalities(f, symbols=symbols) if isinstance(fi, Poly): f[i] = fi.as_expr() # rewrite hyperbolics in terms of exp f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction), lambda w: w.rewrite(exp)) # if we have a Matrix, we need to iterate over its elements again if f[i].is_Matrix: bare_f = False f.extend(list(f[i])) f[i] = S.Zero # if we can split it into real and imaginary parts then do so freei = f[i].free_symbols if freei and all(s.is_extended_real or s.is_imaginary for s in freei): fr, fi = f[i].as_real_imag() # accept as long as new re, im, arg or atan2 are not introduced had = f[i].atoms(re, im, arg, atan2) if fr and fi and fr != fi and not any( i.atoms(re, im, arg, atan2) - had for i in (fr, fi)): if bare_f: bare_f = False f[i: i + 1] = [fr, fi] # real/imag handling ----------------------------- if any(isinstance(fi, (bool, BooleanAtom)) for fi in f): if flags.get('set', False): return [], set() return [] for i, fi in enumerate(f): # Abs while True: was = fi fi = fi.replace(Abs, lambda arg: separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols) else Abs(arg)) if was == fi: break for e in fi.find(Abs): if e.has(*symbols): raise NotImplementedError('solving %s when the argument ' 'is not real or imaginary.' % e) # arg _arg = [a for a in fi.atoms(arg) if a.has(*symbols)] fi = fi.xreplace(dict(list(zip(_arg, [atan(im(a.args[0])/re(a.args[0])) for a in _arg])))) # save changes f[i] = fi # see if re(s) or im(s) appear irf = [] for s in symbols: if s.is_extended_real or s.is_imaginary: continue # neither re(x) nor im(x) will appear # if re(s) or im(s) appear, the auxiliary equation must be present if any(fi.has(re(s), im(s)) for fi in f): irf.append((s, re(s) + S.ImaginaryUnit*im(s))) if irf: for s, rhs in irf: for i, fi in enumerate(f): f[i] = fi.xreplace({s: rhs}) f.append(s - rhs) symbols.extend([re(s), im(s)]) if bare_f: bare_f = False flags['dict'] = True # end of real/imag handling ----------------------------- symbols = list(uniq(symbols)) if not ordered_symbols: # we do this to make the results returned canonical in case f # contains a system of nonlinear equations; all other cases should # be unambiguous symbols = sorted(symbols, key=default_sort_key) # we can solve for non-symbol entities by replacing them with Dummy symbols f, symbols, swap_sym = recast_to_symbols(f, symbols) # this is needed in the next two events symset = set(symbols) # get rid of equations that have no symbols of interest; we don't # try to solve them because the user didn't ask and they might be # hard to solve; this means that solutions may be given in terms # of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y} newf = [] for fi in f: # let the solver handle equations that.. # - have no symbols but are expressions # - have symbols of interest # - have no symbols of interest but are constant # but when an expression is not constant and has no symbols of # interest, it can't change what we obtain for a solution from # the remaining equations so we don't include it; and if it's # zero it can be removed and if it's not zero, there is no # solution for the equation set as a whole # # The reason for doing this filtering is to allow an answer # to be obtained to queries like solve((x - y, y), x); without # this mod the return value is [] ok = False if fi.has(*symset): ok = True else: if fi.is_number: if fi.is_Number: if fi.is_zero: continue return [] ok = True else: if fi.is_constant(): ok = True if ok: newf.append(fi) if not newf: return [] f = newf del newf # mask off any Object that we aren't going to invert: Derivative, # Integral, etc... so that solving for anything that they contain will # give an implicit solution seen = set() non_inverts = set() for fi in f: pot = preorder_traversal(fi) for p in pot: if not isinstance(p, Expr) or isinstance(p, Piecewise): pass elif (isinstance(p, bool) or not p.args or p in symset or p.is_Add or p.is_Mul or p.is_Pow and not implicit or p.is_Function and not implicit) and p.func not in (re, im): continue elif not p in seen: seen.add(p) if p.free_symbols & symset: non_inverts.add(p) else: continue pot.skip() del seen non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts]))) f = [fi.subs(non_inverts) for fi in f] # Both xreplace and subs are needed below: xreplace to force substitution # inside Derivative, subs to handle non-straightforward substitutions non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()] # rationalize Floats floats = False if flags.get('rational', True) is not False: for i, fi in enumerate(f): if fi.has(Float): floats = True f[i] = nsimplify(fi, rational=True) # capture any denominators before rewriting since # they may disappear after the rewrite, e.g. issue 14779 flags['_denominators'] = _simple_dens(f[0], symbols) # Any embedded piecewise functions need to be brought out to the # top level so that the appropriate strategy gets selected. # However, this is necessary only if one of the piecewise # functions depends on one of the symbols we are solving for. def _has_piecewise(e): if e.is_Piecewise: return e.has(*symbols) return any([_has_piecewise(a) for a in e.args]) for i, fi in enumerate(f): if _has_piecewise(fi): f[i] = piecewise_fold(fi) # # try to get a solution ########################################################################### if bare_f: solution = _solve(f[0], *symbols, **flags) else: solution = _solve_system(f, symbols, **flags) # # postprocessing ########################################################################### # Restore masked-off objects if non_inverts: def _do_dict(solution): return {k: v.subs(non_inverts) for k, v in solution.items()} for i in range(1): if isinstance(solution, dict): solution = _do_dict(solution) break elif solution and isinstance(solution, list): if isinstance(solution[0], dict): solution = [_do_dict(s) for s in solution] break elif isinstance(solution[0], tuple): solution = [tuple([v.subs(non_inverts) for v in s]) for s in solution] break else: solution = [v.subs(non_inverts) for v in solution] break elif not solution: break else: raise NotImplementedError(filldedent(''' no handling of %s was implemented''' % solution)) # Restore original "symbols" if a dictionary is returned. # This is not necessary for # - the single univariate equation case # since the symbol will have been removed from the solution; # - the nonlinear poly_system since that only supports zero-dimensional # systems and those results come back as a list # # ** unless there were Derivatives with the symbols, but those were handled # above. if swap_sym: symbols = [swap_sym.get(k, k) for k in symbols] if isinstance(solution, dict): solution = {swap_sym.get(k, k): v.subs(swap_sym) for k, v in solution.items()} elif solution and isinstance(solution, list) and isinstance(solution[0], dict): for i, sol in enumerate(solution): solution[i] = {swap_sym.get(k, k): v.subs(swap_sym) for k, v in sol.items()} # undo the dictionary solutions returned when the system was only partially # solved with poly-system if all symbols are present if ( not flags.get('dict', False) and solution and ordered_symbols and not isinstance(solution, dict) and all(isinstance(sol, dict) for sol in solution) ): solution = [tuple([r.get(s, s).subs(r) for s in symbols]) for r in solution] # Get assumptions about symbols, to filter solutions. # Note that if assumptions about a solution can't be verified, it is still # returned. check = flags.get('check', True) # restore floats if floats and solution and flags.get('rational', None) is None: solution = nfloat(solution, exponent=False) if check and solution: # assumption checking warn = flags.get('warn', False) got_None = [] # solutions for which one or more symbols gave None no_False = [] # solutions for which no symbols gave False if isinstance(solution, tuple): # this has already been checked and is in as_set form return solution elif isinstance(solution, list): if isinstance(solution[0], tuple): for sol in solution: for symb, val in zip(symbols, sol): test = check_assumptions(val, **symb.assumptions0) if test is False: break if test is None: got_None.append(sol) else: no_False.append(sol) elif isinstance(solution[0], dict): for sol in solution: a_None = False for symb, val in sol.items(): test = check_assumptions(val, **symb.assumptions0) if test: continue if test is False: break a_None = True else: no_False.append(sol) if a_None: got_None.append(sol) else: # list of expressions for sol in solution: test = check_assumptions(sol, **symbols[0].assumptions0) if test is False: continue no_False.append(sol) if test is None: got_None.append(sol) elif isinstance(solution, dict): a_None = False for symb, val in solution.items(): test = check_assumptions(val, **symb.assumptions0) if test: continue if test is False: no_False = None break a_None = True else: no_False = solution if a_None: got_None.append(solution) elif isinstance(solution, (Relational, And, Or)): if len(symbols) != 1: raise ValueError("Length should be 1") if warn and symbols[0].assumptions0: warnings.warn(filldedent(""" \tWarning: assumptions about variable '%s' are not handled currently.""" % symbols[0])) # TODO: check also variable assumptions for inequalities else: raise TypeError('Unrecognized solution') # improve the checker solution = no_False if warn and got_None: warnings.warn(filldedent(""" \tWarning: assumptions concerning following solution(s) can't be checked:""" + '\n\t' + ', '.join(str(s) for s in got_None))) # # done ########################################################################### as_dict = flags.get('dict', False) as_set = flags.get('set', False) if not as_set and isinstance(solution, list): # Make sure that a list of solutions is ordered in a canonical way. solution.sort(key=default_sort_key) if not as_dict and not as_set: return solution or [] # return a list of mappings or [] if not solution: solution = [] else: if isinstance(solution, dict): solution = [solution] elif iterable(solution[0]): solution = [dict(list(zip(symbols, s))) for s in solution] elif isinstance(solution[0], dict): pass else: if len(symbols) != 1: raise ValueError("Length should be 1") solution = [{symbols[0]: s} for s in solution] if as_dict: return solution assert as_set if not solution: return [], set() k = list(ordered(solution[0].keys())) return k, {tuple([s[ki] for ki in k]) for s in solution} def _solve(f, *symbols, **flags): """ Return a checked solution for *f* in terms of one or more of the symbols. A list should be returned except for the case when a linear undetermined-coefficients equation is encountered (in which case a dictionary is returned). If no method is implemented to solve the equation, a NotImplementedError will be raised. In the case that conversion of an expression to a Poly gives None a ValueError will be raised. """ not_impl_msg = "No algorithms are implemented to solve equation %s" if len(symbols) != 1: soln = None free = f.free_symbols ex = free - set(symbols) if len(ex) != 1: ind, dep = f.as_independent(*symbols) ex = ind.free_symbols & dep.free_symbols if len(ex) == 1: ex = ex.pop() try: # soln may come back as dict, list of dicts or tuples, or # tuple of symbol list and set of solution tuples soln = solve_undetermined_coeffs(f, symbols, ex, **flags) except NotImplementedError: pass if soln: if flags.get('simplify', True): if isinstance(soln, dict): for k in soln: soln[k] = simplify(soln[k]) elif isinstance(soln, list): if isinstance(soln[0], dict): for d in soln: for k in d: d[k] = simplify(d[k]) elif isinstance(soln[0], tuple): soln = [tuple(simplify(i) for i in j) for j in soln] else: raise TypeError('unrecognized args in list') elif isinstance(soln, tuple): sym, sols = soln soln = sym, {tuple(simplify(i) for i in j) for j in sols} else: raise TypeError('unrecognized solution type') return soln # find first successful solution failed = [] got_s = set([]) result = [] for s in symbols: xi, v = solve_linear(f, symbols=[s]) if xi == s: # no need to check but we should simplify if desired if flags.get('simplify', True): v = simplify(v) vfree = v.free_symbols if got_s and any([ss in vfree for ss in got_s]): # sol depends on previously solved symbols: discard it continue got_s.add(xi) result.append({xi: v}) elif xi: # there might be a non-linear solution if xi is not 0 failed.append(s) if not failed: return result for s in failed: try: soln = _solve(f, s, **flags) for sol in soln: if got_s and any([ss in sol.free_symbols for ss in got_s]): # sol depends on previously solved symbols: discard it continue got_s.add(s) result.append({s: sol}) except NotImplementedError: continue if got_s: return result else: raise NotImplementedError(not_impl_msg % f) symbol = symbols[0] # /!\ capture this flag then set it to False so that no checking in # recursive calls will be done; only the final answer is checked flags['check'] = checkdens = check = flags.pop('check', True) # build up solutions if f is a Mul if f.is_Mul: result = set() for m in f.args: if m in set([S.NegativeInfinity, S.ComplexInfinity, S.Infinity]): result = set() break soln = _solve(m, symbol, **flags) result.update(set(soln)) result = list(result) if check: # all solutions have been checked but now we must # check that the solutions do not set denominators # in any factor to zero dens = flags.get('_denominators', _simple_dens(f, symbols)) result = [s for s in result if all(not checksol(den, {symbol: s}, **flags) for den in dens)] # set flags for quick exit at end; solutions for each # factor were already checked and simplified check = False flags['simplify'] = False elif f.is_Piecewise: result = set() for i, (expr, cond) in enumerate(f.args): if expr.is_zero: raise NotImplementedError( 'solve cannot represent interval solutions') candidates = _solve(expr, symbol, **flags) # the explicit condition for this expr is the current cond # and none of the previous conditions args = [~c for _, c in f.args[:i]] + [cond] cond = And(*args) for candidate in candidates: if candidate in result: # an unconditional value was already there continue try: v = cond.subs(symbol, candidate) _eval_simplify = getattr(v, '_eval_simplify', None) if _eval_simplify is not None: # unconditionally take the simpification of v v = _eval_simplify(ratio=2, measure=lambda x: 1) except TypeError: # incompatible type with condition(s) continue if v == False: continue if v == True: result.add(candidate) else: result.add(Piecewise( (candidate, v), (S.NaN, True))) # set flags for quick exit at end; solutions for each # piece were already checked and simplified check = False flags['simplify'] = False else: # first see if it really depends on symbol and whether there # is only a linear solution f_num, sol = solve_linear(f, symbols=symbols) if f_num.is_zero or sol is S.NaN: return [] elif f_num.is_Symbol: # no need to check but simplify if desired if flags.get('simplify', True): sol = simplify(sol) return [sol] result = False # no solution was obtained msg = '' # there is no failure message # Poly is generally robust enough to convert anything to # a polynomial and tell us the different generators that it # contains, so we will inspect the generators identified by # polys to figure out what to do. # try to identify a single generator that will allow us to solve this # as a polynomial, followed (perhaps) by a change of variables if the # generator is not a symbol try: poly = Poly(f_num) if poly is None: raise ValueError('could not convert %s to Poly' % f_num) except GeneratorsNeeded: simplified_f = simplify(f_num) if simplified_f != f_num: return _solve(simplified_f, symbol, **flags) raise ValueError('expression appears to be a constant') gens = [g for g in poly.gens if g.has(symbol)] def _as_base_q(x): """Return (b**e, q) for x = b**(p*e/q) where p/q is the leading Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3) """ b, e = x.as_base_exp() if e.is_Rational: return b, e.q if not e.is_Mul: return x, 1 c, ee = e.as_coeff_Mul() if c.is_Rational and c is not S.One: # c could be a Float return b**ee, c.q return x, 1 if len(gens) > 1: # If there is more than one generator, it could be that the # generators have the same base but different powers, e.g. # >>> Poly(exp(x) + 1/exp(x)) # Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ') # # If unrad was not disabled then there should be no rational # exponents appearing as in # >>> Poly(sqrt(x) + sqrt(sqrt(x))) # Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ') bases, qs = list(zip(*[_as_base_q(g) for g in gens])) bases = set(bases) if len(bases) > 1 or not all(q == 1 for q in qs): funcs = set(b for b in bases if b.is_Function) trig = set([_ for _ in funcs if isinstance(_, TrigonometricFunction)]) other = funcs - trig if not other and len(funcs.intersection(trig)) > 1: newf = TR1(f_num).rewrite(tan) if newf != f_num: # don't check the rewritten form --check # solutions in the un-rewritten form below flags['check'] = False result = _solve(newf, symbol, **flags) flags['check'] = check # just a simple case - see if replacement of single function # clears all symbol-dependent functions, e.g. # log(x) - log(log(x) - 1) - 3 can be solved even though it has # two generators. if result is False and funcs: funcs = list(ordered(funcs)) # put shallowest function first f1 = funcs[0] t = Dummy('t') # perform the substitution ftry = f_num.subs(f1, t) # if no Functions left, we can proceed with usual solve if not ftry.has(symbol): cv_sols = _solve(ftry, t, **flags) cv_inv = _solve(t - f1, symbol, **flags)[0] sols = list() for sol in cv_sols: sols.append(cv_inv.subs(t, sol)) result = list(ordered(sols)) if result is False: msg = 'multiple generators %s' % gens else: # e.g. case where gens are exp(x), exp(-x) u = bases.pop() t = Dummy('t') inv = _solve(u - t, symbol, **flags) if isinstance(u, (Pow, exp)): # this will be resolved by factor in _tsolve but we might # as well try a simple expansion here to get things in # order so something like the following will work now without # having to factor: # # >>> eq = (exp(I*(-x-2))+exp(I*(x+2))) # >>> eq.subs(exp(x),y) # fails # exp(I*(-x - 2)) + exp(I*(x + 2)) # >>> eq.expand().subs(exp(x),y) # works # y**I*exp(2*I) + y**(-I)*exp(-2*I) def _expand(p): b, e = p.as_base_exp() e = expand_mul(e) return expand_power_exp(b**e) ftry = f_num.replace( lambda w: w.is_Pow or isinstance(w, exp), _expand).subs(u, t) if not ftry.has(symbol): soln = _solve(ftry, t, **flags) sols = list() for sol in soln: for i in inv: sols.append(i.subs(t, sol)) result = list(ordered(sols)) elif len(gens) == 1: # There is only one generator that we are interested in, but # there may have been more than one generator identified by # polys (e.g. for symbols other than the one we are interested # in) so recast the poly in terms of our generator of interest. # Also use composite=True with f_num since Poly won't update # poly as documented in issue 8810. poly = Poly(f_num, gens[0], composite=True) # if we aren't on the tsolve-pass, use roots if not flags.pop('tsolve', False): soln = None deg = poly.degree() flags['tsolve'] = True solvers = {k: flags.get(k, True) for k in ('cubics', 'quartics', 'quintics')} soln = roots(poly, **solvers) if sum(soln.values()) < deg: # e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 + # 5000*x**2 + 6250*x + 3189) -> {} # so all_roots is used and RootOf instances are # returned *unless* the system is multivariate # or high-order EX domain. try: soln = poly.all_roots() except NotImplementedError: if not flags.get('incomplete', True): raise NotImplementedError( filldedent(''' Neither high-order multivariate polynomials nor sorting of EX-domain polynomials is supported. If you want to see any results, pass keyword incomplete=True to solve; to see numerical values of roots for univariate expressions, use nroots. ''')) else: pass else: soln = list(soln.keys()) if soln is not None: u = poly.gen if u != symbol: try: t = Dummy('t') iv = _solve(u - t, symbol, **flags) soln = list(ordered({i.subs(t, s) for i in iv for s in soln})) except NotImplementedError: # perhaps _tsolve can handle f_num soln = None else: check = False # only dens need to be checked if soln is not None: if len(soln) > 2: # if the flag wasn't set then unset it since high-order # results are quite long. Perhaps one could base this # decision on a certain critical length of the # roots. In addition, wester test M2 has an expression # whose roots can be shown to be real with the # unsimplified form of the solution whereas only one of # the simplified forms appears to be real. flags['simplify'] = flags.get('simplify', False) result = soln # fallback if above fails # ----------------------- if result is False: # try unrad if flags.pop('_unrad', True): try: u = unrad(f_num, symbol) except (ValueError, NotImplementedError): u = False if u: eq, cov = u if cov: isym, ieq = cov inv = _solve(ieq, symbol, **flags)[0] rv = {inv.subs(isym, xi) for xi in _solve(eq, isym, **flags)} else: try: rv = set(_solve(eq, symbol, **flags)) except NotImplementedError: rv = None if rv is not None: result = list(ordered(rv)) # if the flag wasn't set then unset it since unrad results # can be quite long or of very high order flags['simplify'] = flags.get('simplify', False) else: pass # for coverage # try _tsolve if result is False: flags.pop('tsolve', None) # allow tsolve to be used on next pass try: soln = _tsolve(f_num, symbol, **flags) if soln is not None: result = soln except PolynomialError: pass # ----------- end of fallback ---------------------------- if result is False: raise NotImplementedError('\n'.join([msg, not_impl_msg % f])) if flags.get('simplify', True): result = list(map(simplify, result)) # we just simplified the solution so we now set the flag to # False so the simplification doesn't happen again in checksol() flags['simplify'] = False if checkdens: # reject any result that makes any denom. affirmatively 0; # if in doubt, keep it dens = _simple_dens(f, symbols) result = [s for s in result if all(not checksol(d, {symbol: s}, **flags) for d in dens)] if check: # keep only results if the check is not False result = [r for r in result if checksol(f_num, {symbol: r}, **flags) is not False] return result def _solve_system(exprs, symbols, **flags): if not exprs: return [] polys = [] dens = set() failed = [] result = False linear = False manual = flags.get('manual', False) checkdens = check = flags.get('check', True) for j, g in enumerate(exprs): dens.update(_simple_dens(g, symbols)) i, d = _invert(g, *symbols) g = d - i g = g.as_numer_denom()[0] if manual: failed.append(g) continue poly = g.as_poly(*symbols, extension=True) if poly is not None: polys.append(poly) else: failed.append(g) if not polys: solved_syms = [] else: if all(p.is_linear for p in polys): n, m = len(polys), len(symbols) matrix = zeros(n, m + 1) for i, poly in enumerate(polys): for monom, coeff in poly.terms(): try: j = monom.index(1) matrix[i, j] = coeff except ValueError: matrix[i, m] = -coeff # returns a dictionary ({symbols: values}) or None if flags.pop('particular', False): result = minsolve_linear_system(matrix, *symbols, **flags) else: result = solve_linear_system(matrix, *symbols, **flags) if failed: if result: solved_syms = list(result.keys()) else: solved_syms = [] else: linear = True else: if len(symbols) > len(polys): from sympy.utilities.iterables import subsets free = set().union(*[p.free_symbols for p in polys]) free = list(ordered(free.intersection(symbols))) got_s = set() result = [] for syms in subsets(free, len(polys)): try: # returns [] or list of tuples of solutions for syms res = solve_poly_system(polys, *syms) if res: for r in res: skip = False for r1 in r: if got_s and any([ss in r1.free_symbols for ss in got_s]): # sol depends on previously # solved symbols: discard it skip = True if not skip: got_s.update(syms) result.extend([dict(list(zip(syms, r)))]) except NotImplementedError: pass if got_s: solved_syms = list(got_s) else: raise NotImplementedError('no valid subset found') else: try: result = solve_poly_system(polys, *symbols) if result: solved_syms = symbols # we don't know here if the symbols provided # were given or not, so let solve resolve that. # A list of dictionaries is going to always be # returned from here. result = [dict(list(zip(solved_syms, r))) for r in result] except NotImplementedError: failed.extend([g.as_expr() for g in polys]) solved_syms = [] result = None if result: if isinstance(result, dict): result = [result] else: result = [{}] if failed: # For each failed equation, see if we can solve for one of the # remaining symbols from that equation. If so, we update the # solution set and continue with the next failed equation, # repeating until we are done or we get an equation that can't # be solved. def _ok_syms(e, sort=False): rv = (e.free_symbols - solved_syms) & legal if sort: rv = list(rv) rv.sort(key=default_sort_key) return rv solved_syms = set(solved_syms) # set of symbols we have solved for legal = set(symbols) # what we are interested in # sort so equation with the fewest potential symbols is first u = Dummy() # used in solution checking for eq in ordered(failed, lambda _: len(_ok_syms(_))): newresult = [] bad_results = [] got_s = set() hit = False for r in result: # update eq with everything that is known so far eq2 = eq.subs(r) # if check is True then we see if it satisfies this # equation, otherwise we just accept it if check and r: b = checksol(u, u, eq2, minimal=True) if b is not None: # this solution is sufficient to know whether # it is valid or not so we either accept or # reject it, then continue if b: newresult.append(r) else: bad_results.append(r) continue # search for a symbol amongst those available that # can be solved for ok_syms = _ok_syms(eq2, sort=True) if not ok_syms: if r: newresult.append(r) break # skip as it's independent of desired symbols for s in ok_syms: try: soln = _solve(eq2, s, **flags) except NotImplementedError: continue # put each solution in r and append the now-expanded # result in the new result list; use copy since the # solution for s in being added in-place for sol in soln: if got_s and any([ss in sol.free_symbols for ss in got_s]): # sol depends on previously solved symbols: discard it continue rnew = r.copy() for k, v in r.items(): rnew[k] = v.subs(s, sol) # and add this new solution rnew[s] = sol newresult.append(rnew) hit = True got_s.add(s) if not hit: raise NotImplementedError('could not solve %s' % eq2) else: result = newresult for b in bad_results: if b in result: result.remove(b) default_simplify = bool(failed) # rely on system-solvers to simplify if flags.get('simplify', default_simplify): for r in result: for k in r: r[k] = simplify(r[k]) flags['simplify'] = False # don't need to do so in checksol now if checkdens: result = [r for r in result if not any(checksol(d, r, **flags) for d in dens)] if check and not linear: result = [r for r in result if not any(checksol(e, r, **flags) is False for e in exprs)] result = [r for r in result if r] if linear and result: result = result[0] return result def solve_linear(lhs, rhs=0, symbols=[], exclude=[]): r""" Return a tuple derived from ``f = lhs - rhs`` that is one of the following: ``(0, 1)``, ``(0, 0)``, ``(symbol, solution)``, ``(n, d)``. Explanation =========== ``(0, 1)`` meaning that ``f`` is independent of the symbols in *symbols* that are not in *exclude*. ``(0, 0)`` meaning that there is no solution to the equation amongst the symbols given. If the first element of the tuple is not zero, then the function is guaranteed to be dependent on a symbol in *symbols*. ``(symbol, solution)`` where symbol appears linearly in the numerator of ``f``, is in *symbols* (if given), and is not in *exclude* (if given). No simplification is done to ``f`` other than a ``mul=True`` expansion, so the solution will correspond strictly to a unique solution. ``(n, d)`` where ``n`` and ``d`` are the numerator and denominator of ``f`` when the numerator was not linear in any symbol of interest; ``n`` will never be a symbol unless a solution for that symbol was found (in which case the second element is the solution, not the denominator). Examples ======== >>> from sympy.core.power import Pow >>> from sympy.polys.polytools import cancel ``f`` is independent of the symbols in *symbols* that are not in *exclude*: >>> from sympy.solvers.solvers import solve_linear >>> from sympy.abc import x, y, z >>> from sympy import cos, sin >>> eq = y*cos(x)**2 + y*sin(x)**2 - y # = y*(1 - 1) = 0 >>> solve_linear(eq) (0, 1) >>> eq = cos(x)**2 + sin(x)**2 # = 1 >>> solve_linear(eq) (0, 1) >>> solve_linear(x, exclude=[x]) (0, 1) The variable ``x`` appears as a linear variable in each of the following: >>> solve_linear(x + y**2) (x, -y**2) >>> solve_linear(1/x - y**2) (x, y**(-2)) When not linear in ``x`` or ``y`` then the numerator and denominator are returned: >>> solve_linear(x**2/y**2 - 3) (x**2 - 3*y**2, y**2) If the numerator of the expression is a symbol, then ``(0, 0)`` is returned if the solution for that symbol would have set any denominator to 0: >>> eq = 1/(1/x - 2) >>> eq.as_numer_denom() (x, 1 - 2*x) >>> solve_linear(eq) (0, 0) But automatic rewriting may cause a symbol in the denominator to appear in the numerator so a solution will be returned: >>> (1/x)**-1 x >>> solve_linear((1/x)**-1) (x, 0) Use an unevaluated expression to avoid this: >>> solve_linear(Pow(1/x, -1, evaluate=False)) (0, 0) If ``x`` is allowed to cancel in the following expression, then it appears to be linear in ``x``, but this sort of cancellation is not done by ``solve_linear`` so the solution will always satisfy the original expression without causing a division by zero error. >>> eq = x**2*(1/x - z**2/x) >>> solve_linear(cancel(eq)) (x, 0) >>> solve_linear(eq) (x**2*(1 - z**2), x) A list of symbols for which a solution is desired may be given: >>> solve_linear(x + y + z, symbols=[y]) (y, -x - z) A list of symbols to ignore may also be given: >>> solve_linear(x + y + z, exclude=[x]) (y, -x - z) (A solution for ``y`` is obtained because it is the first variable from the canonically sorted list of symbols that had a linear solution.) """ if isinstance(lhs, Equality): if rhs: raise ValueError(filldedent(''' If lhs is an Equality, rhs must be 0 but was %s''' % rhs)) rhs = lhs.rhs lhs = lhs.lhs dens = None eq = lhs - rhs n, d = eq.as_numer_denom() if not n: return S.Zero, S.One free = n.free_symbols if not symbols: symbols = free else: bad = [s for s in symbols if not s.is_Symbol] if bad: if len(bad) == 1: bad = bad[0] if len(symbols) == 1: eg = 'solve(%s, %s)' % (eq, symbols[0]) else: eg = 'solve(%s, *%s)' % (eq, list(symbols)) raise ValueError(filldedent(''' solve_linear only handles symbols, not %s. To isolate non-symbols use solve, e.g. >>> %s <<<. ''' % (bad, eg))) symbols = free.intersection(symbols) symbols = symbols.difference(exclude) if not symbols: return S.Zero, S.One # derivatives are easy to do but tricky to analyze to see if they # are going to disallow a linear solution, so for simplicity we # just evaluate the ones that have the symbols of interest derivs = defaultdict(list) for der in n.atoms(Derivative): csym = der.free_symbols & symbols for c in csym: derivs[c].append(der) all_zero = True for xi in sorted(symbols, key=default_sort_key): # canonical order # if there are derivatives in this var, calculate them now if isinstance(derivs[xi], list): derivs[xi] = {der: der.doit() for der in derivs[xi]} newn = n.subs(derivs[xi]) dnewn_dxi = newn.diff(xi) # dnewn_dxi can be nonzero if it survives differentation by any # of its free symbols free = dnewn_dxi.free_symbols if dnewn_dxi and (not free or any(dnewn_dxi.diff(s) for s in free)): all_zero = False if dnewn_dxi is S.NaN: break if xi not in dnewn_dxi.free_symbols: vi = -1/dnewn_dxi*(newn.subs(xi, 0)) if dens is None: dens = _simple_dens(eq, symbols) if not any(checksol(di, {xi: vi}, minimal=True) is True for di in dens): # simplify any trivial integral irep = [(i, i.doit()) for i in vi.atoms(Integral) if i.function.is_number] # do a slight bit of simplification vi = expand_mul(vi.subs(irep)) return xi, vi if all_zero: return S.Zero, S.One if n.is_Symbol: # no solution for this symbol was found return S.Zero, S.Zero return n, d def minsolve_linear_system(system, *symbols, **flags): r""" Find a particular solution to a linear system. Explanation =========== In particular, try to find a solution with the minimal possible number of non-zero variables using a naive algorithm with exponential complexity. If ``quick=True``, a heuristic is used. """ quick = flags.get('quick', False) # Check if there are any non-zero solutions at all s0 = solve_linear_system(system, *symbols, **flags) if not s0 or all(v == 0 for v in s0.values()): return s0 if quick: # We just solve the system and try to heuristically find a nice # solution. s = solve_linear_system(system, *symbols) def update(determined, solution): delete = [] for k, v in solution.items(): solution[k] = v.subs(determined) if not solution[k].free_symbols: delete.append(k) determined[k] = solution[k] for k in delete: del solution[k] determined = {} update(determined, s) while s: # NOTE sort by default_sort_key to get deterministic result k = max((k for k in s.values()), key=lambda x: (len(x.free_symbols), default_sort_key(x))) x = max(k.free_symbols, key=default_sort_key) if len(k.free_symbols) != 1: determined[x] = S.Zero else: val = solve(k)[0] if val == 0 and all(v.subs(x, val) == 0 for v in s.values()): determined[x] = S.One else: determined[x] = val update(determined, s) return determined else: # We try to select n variables which we want to be non-zero. # All others will be assumed zero. We try to solve the modified system. # If there is a non-trivial solution, just set the free variables to # one. If we do this for increasing n, trying all combinations of # variables, we will find an optimal solution. # We speed up slightly by starting at one less than the number of # variables the quick method manages. from itertools import combinations from sympy.utilities.misc import debug N = len(symbols) bestsol = minsolve_linear_system(system, *symbols, quick=True) n0 = len([x for x in bestsol.values() if x != 0]) for n in range(n0 - 1, 1, -1): debug('minsolve: %s' % n) thissol = None for nonzeros in combinations(list(range(N)), n): subm = Matrix([system.col(i).T for i in nonzeros] + [system.col(-1).T]).T s = solve_linear_system(subm, *[symbols[i] for i in nonzeros]) if s and not all(v == 0 for v in s.values()): subs = [(symbols[v], S.One) for v in nonzeros] for k, v in s.items(): s[k] = v.subs(subs) for sym in symbols: if sym not in s: if symbols.index(sym) in nonzeros: s[sym] = S.One else: s[sym] = S.Zero thissol = s break if thissol is None: break bestsol = thissol return bestsol def solve_linear_system(system, *symbols, **flags): r""" Solve system of $N$ linear equations with $M$ variables, which means both under- and overdetermined systems are supported. Explanation =========== The possible number of solutions is zero, one, or infinite. Respectively, this procedure will return None or a dictionary with solutions. In the case of underdetermined systems, all arbitrary parameters are skipped. This may cause a situation in which an empty dictionary is returned. In that case, all symbols can be assigned arbitrary values. Input to this function is a $N\times M + 1$ matrix, which means it has to be in augmented form. If you prefer to enter $N$ equations and $M$ unknowns then use ``solve(Neqs, *Msymbols)`` instead. Note: a local copy of the matrix is made by this routine so the matrix that is passed will not be modified. The algorithm used here is fraction-free Gaussian elimination, which results, after elimination, in an upper-triangular matrix. Then solutions are found using back-substitution. This approach is more efficient and compact than the Gauss-Jordan method. Examples ======== >>> from sympy import Matrix, solve_linear_system >>> from sympy.abc import x, y Solve the following system:: x + 4 y == 2 -2 x + y == 14 >>> system = Matrix(( (1, 4, 2), (-2, 1, 14))) >>> solve_linear_system(system, x, y) {x: -6, y: 2} A degenerate system returns an empty dictionary: >>> system = Matrix(( (0,0,0), (0,0,0) )) >>> solve_linear_system(system, x, y) {} """ do_simplify = flags.get('simplify', True) if system.rows == system.cols - 1 == len(symbols): try: # well behaved n-equations and n-unknowns inv = inv_quick(system[:, :-1]) rv = dict(zip(symbols, inv*system[:, -1])) if do_simplify: for k, v in rv.items(): rv[k] = simplify(v) if not all(i.is_zero for i in rv.values()): # non-trivial solution return rv except ValueError: pass matrix = system[:, :] syms = list(symbols) i, m = 0, matrix.cols - 1 # don't count augmentation while i < matrix.rows: if i == m: # an overdetermined system if any(matrix[i:, m]): return None # no solutions else: # remove trailing rows matrix = matrix[:i, :] break if not matrix[i, i]: # there is no pivot in current column # so try to find one in other columns for k in range(i + 1, m): if matrix[i, k]: break else: if matrix[i, m]: # We need to know if this is always zero or not. We # assume that if there are free symbols that it is not # identically zero (or that there is more than one way # to make this zero). Otherwise, if there are none, this # is a constant and we assume that it does not simplify # to zero XXX are there better (fast) ways to test this? # The .equals(0) method could be used but that can be # slow; numerical testing is prone to errors of scaling. if not matrix[i, m].free_symbols: return None # no solution # A row of zeros with a non-zero rhs can only be accepted # if there is another equivalent row. Any such rows will # be deleted. nrows = matrix.rows rowi = matrix.row(i) ip = None j = i + 1 while j < matrix.rows: # do we need to see if the rhs of j # is a constant multiple of i's rhs? rowj = matrix.row(j) if rowj == rowi: matrix.row_del(j) elif rowj[:-1] == rowi[:-1]: if ip is None: _, ip = rowi[-1].as_content_primitive() _, jp = rowj[-1].as_content_primitive() if not (simplify(jp - ip) or simplify(jp + ip)): matrix.row_del(j) j += 1 if nrows == matrix.rows: # no solution return None # zero row or was a linear combination of # other rows or was a row with a symbolic # expression that matched other rows, e.g. [0, 0, x - y] # so now we can safely skip it matrix.row_del(i) if not matrix: # every choice of variable values is a solution # so we return an empty dict instead of None return dict() continue # we want to change the order of columns so # the order of variables must also change syms[i], syms[k] = syms[k], syms[i] matrix.col_swap(i, k) pivot_inv = S.One/matrix[i, i] # divide all elements in the current row by the pivot matrix.row_op(i, lambda x, _: x * pivot_inv) for k in range(i + 1, matrix.rows): if matrix[k, i]: coeff = matrix[k, i] # subtract from the current row the row containing # pivot and multiplied by extracted coefficient matrix.row_op(k, lambda x, j: simplify(x - matrix[i, j]*coeff)) i += 1 # if there weren't any problems, augmented matrix is now # in row-echelon form so we can check how many solutions # there are and extract them using back substitution if len(syms) == matrix.rows: # this system is Cramer equivalent so there is # exactly one solution to this system of equations k, solutions = i - 1, {} while k >= 0: content = matrix[k, m] # run back-substitution for variables for j in range(k + 1, m): content -= matrix[k, j]*solutions[syms[j]] if do_simplify: solutions[syms[k]] = simplify(content) else: solutions[syms[k]] = content k -= 1 return solutions elif len(syms) > matrix.rows: # this system will have infinite number of solutions # dependent on exactly len(syms) - i parameters k, solutions = i - 1, {} while k >= 0: content = matrix[k, m] # run back-substitution for variables for j in range(k + 1, i): content -= matrix[k, j]*solutions[syms[j]] # run back-substitution for parameters for j in range(i, m): content -= matrix[k, j]*syms[j] if do_simplify: solutions[syms[k]] = simplify(content) else: solutions[syms[k]] = content k -= 1 return solutions else: return [] # no solutions def solve_undetermined_coeffs(equ, coeffs, sym, **flags): r""" Solve equation of a type $p(x; a_1, \ldots, a_k) = q(x)$ where both $p$ and $q$ are univariate polynomials that depend on $k$ parameters. Explanation =========== The result of this function is a dictionary with symbolic values of those parameters with respect to coefficients in $q$. This function accepts both equations class instances and ordinary SymPy expressions. Specification of parameters and variables is obligatory for efficiency and simplicity reasons. Examples ======== >>> from sympy import Eq >>> from sympy.abc import a, b, c, x >>> from sympy.solvers import solve_undetermined_coeffs >>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x) {a: 1/2, b: -1/2} >>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x) {a: 1/c, b: -1/c} """ if isinstance(equ, Equality): # got equation, so move all the # terms to the left hand side equ = equ.lhs - equ.rhs equ = cancel(equ).as_numer_denom()[0] system = list(collect(equ.expand(), sym, evaluate=False).values()) if not any(equ.has(sym) for equ in system): # consecutive powers in the input expressions have # been successfully collected, so solve remaining # system using Gaussian elimination algorithm return solve(system, *coeffs, **flags) else: return None # no solutions def solve_linear_system_LU(matrix, syms): """ Solves the augmented matrix system using ``LUsolve`` and returns a dictionary in which solutions are keyed to the symbols of *syms* as ordered. Explanation =========== The matrix must be invertible. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y, z >>> from sympy.solvers.solvers import solve_linear_system_LU >>> solve_linear_system_LU(Matrix([ ... [1, 2, 0, 1], ... [3, 2, 2, 1], ... [2, 0, 0, 1]]), [x, y, z]) {x: 1/2, y: 1/4, z: -1/2} See Also ======== LUsolve """ if matrix.rows != matrix.cols - 1: raise ValueError("Rows should be equal to columns - 1") A = matrix[:matrix.rows, :matrix.rows] b = matrix[:, matrix.cols - 1:] soln = A.LUsolve(b) solutions = {} for i in range(soln.rows): solutions[syms[i]] = soln[i, 0] return solutions def det_perm(M): """ Return the determinant of *M* by using permutations to select factors. Explanation =========== For sizes larger than 8 the number of permutations becomes prohibitively large, or if there are no symbols in the matrix, it is better to use the standard determinant routines (e.g., ``M.det()``.) See Also ======== det_minor det_quick """ args = [] s = True n = M.rows list_ = getattr(M, '_mat', None) if list_ is None: list_ = flatten(M.tolist()) for perm in generate_bell(n): fac = [] idx = 0 for j in perm: fac.append(list_[idx + j]) idx += n term = Mul(*fac) # disaster with unevaluated Mul -- takes forever for n=7 args.append(term if s else -term) s = not s return Add(*args) def det_minor(M): """ Return the ``det(M)`` computed from minors without introducing new nesting in products. See Also ======== det_perm det_quick """ n = M.rows if n == 2: return M[0, 0]*M[1, 1] - M[1, 0]*M[0, 1] else: return sum([(1, -1)[i % 2]*Add(*[M[0, i]*d for d in Add.make_args(det_minor(M.minor_submatrix(0, i)))]) if M[0, i] else S.Zero for i in range(n)]) def det_quick(M, method=None): """ Return ``det(M)`` assuming that either there are lots of zeros or the size of the matrix is small. If this assumption is not met, then the normal Matrix.det function will be used with method = ``method``. See Also ======== det_minor det_perm """ if any(i.has(Symbol) for i in M): if M.rows < 8 and all(i.has(Symbol) for i in M): return det_perm(M) return det_minor(M) else: return M.det(method=method) if method else M.det() def inv_quick(M): """Return the inverse of ``M``, assuming that either there are lots of zeros or the size of the matrix is small. """ from sympy.matrices import zeros if not all(i.is_Number for i in M): if not any(i.is_Number for i in M): det = lambda _: det_perm(_) else: det = lambda _: det_minor(_) else: return M.inv() n = M.rows d = det(M) if d == S.Zero: raise ValueError("Matrix det == 0; not invertible.") ret = zeros(n) s1 = -1 for i in range(n): s = s1 = -s1 for j in range(n): di = det(M.minor_submatrix(i, j)) ret[j, i] = s*di/d s = -s return ret # these are functions that have multiple inverse values per period multi_inverses = { sin: lambda x: (asin(x), S.Pi - asin(x)), cos: lambda x: (acos(x), 2*S.Pi - acos(x)), } def _tsolve(eq, sym, **flags): """ Helper for ``_solve`` that solves a transcendental equation with respect to the given symbol. Various equations containing powers and logarithms, can be solved. There is currently no guarantee that all solutions will be returned or that a real solution will be favored over a complex one. Either a list of potential solutions will be returned or None will be returned (in the case that no method was known to get a solution for the equation). All other errors (like the inability to cast an expression as a Poly) are unhandled. Examples ======== >>> from sympy import log >>> from sympy.solvers.solvers import _tsolve as tsolve >>> from sympy.abc import x >>> tsolve(3**(2*x + 5) - 4, x) [-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)] >>> tsolve(log(x) + 2*x, x) [LambertW(2)/2] """ if 'tsolve_saw' not in flags: flags['tsolve_saw'] = [] if eq in flags['tsolve_saw']: return None else: flags['tsolve_saw'].append(eq) rhs, lhs = _invert(eq, sym) if lhs == sym: return [rhs] try: if lhs.is_Add: # it's time to try factoring; powdenest is used # to try get powers in standard form for better factoring f = factor(powdenest(lhs - rhs)) if f.is_Mul: return _solve(f, sym, **flags) if rhs: f = logcombine(lhs, force=flags.get('force', True)) if f.count(log) != lhs.count(log): if isinstance(f, log): return _solve(f.args[0] - exp(rhs), sym, **flags) return _tsolve(f - rhs, sym, **flags) elif lhs.is_Pow: if lhs.exp.is_Integer: if lhs - rhs != eq: return _solve(lhs - rhs, sym, **flags) if sym not in lhs.exp.free_symbols: return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags) # _tsolve calls this with Dummy before passing the actual number in. if any(t.is_Dummy for t in rhs.free_symbols): raise NotImplementedError # _tsolve will call here again... # a ** g(x) == 0 if not rhs: # f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at # the same place sol_base = _solve(lhs.base, sym, **flags) return [s for s in sol_base if lhs.exp.subs(sym, s) != 0] # a ** g(x) == b if not lhs.base.has(sym): if lhs.base == 0: return _solve(lhs.exp, sym, **flags) if rhs != 0 else [] # Gets most solutions... if lhs.base == rhs.as_base_exp()[0]: # handles case when bases are equal sol = _solve(lhs.exp - rhs.as_base_exp()[1], sym, **flags) else: # handles cases when bases are not equal and exp # may or may not be equal sol = _solve(exp(log(lhs.base)*lhs.exp)-exp(log(rhs)), sym, **flags) # Check for duplicate solutions def equal(expr1, expr2): _ = Dummy() eq = checksol(expr1 - _, _, expr2) if eq is None: if nsimplify(expr1) != nsimplify(expr2): return False # they might be coincidentally the same # so check more rigorously eq = expr1.equals(expr2) return eq # Guess a rational exponent e_rat = nsimplify(log(abs(rhs))/log(abs(lhs.base))) e_rat = simplify(posify(e_rat)[0]) n, d = fraction(e_rat) if expand(lhs.base**n - rhs**d) == 0: sol = [s for s in sol if not equal(lhs.exp.subs(sym, s), e_rat)] sol.extend(_solve(lhs.exp - e_rat, sym, **flags)) return list(ordered(set(sol))) # f(x) ** g(x) == c else: sol = [] logform = lhs.exp*log(lhs.base) - log(rhs) if logform != lhs - rhs: try: sol.extend(_solve(logform, sym, **flags)) except NotImplementedError: pass # Collect possible solutions and check with substitution later. check = [] if rhs == 1: # f(x) ** g(x) = 1 -- g(x)=0 or f(x)=+-1 check.extend(_solve(lhs.exp, sym, **flags)) check.extend(_solve(lhs.base - 1, sym, **flags)) check.extend(_solve(lhs.base + 1, sym, **flags)) elif rhs.is_Rational: for d in (i for i in divisors(abs(rhs.p)) if i != 1): e, t = integer_log(rhs.p, d) if not t: continue # rhs.p != d**b for s in divisors(abs(rhs.q)): if s**e== rhs.q: r = Rational(d, s) check.extend(_solve(lhs.base - r, sym, **flags)) check.extend(_solve(lhs.base + r, sym, **flags)) check.extend(_solve(lhs.exp - e, sym, **flags)) elif rhs.is_irrational: b_l, e_l = lhs.base.as_base_exp() n, d = (e_l*lhs.exp).as_numer_denom() b, e = sqrtdenest(rhs).as_base_exp() check = [sqrtdenest(i) for i in (_solve(lhs.base - b, sym, **flags))] check.extend([sqrtdenest(i) for i in (_solve(lhs.exp - e, sym, **flags))]) if e_l*d != 1: check.extend(_solve(b_l**n - rhs**(e_l*d), sym, **flags)) for s in check: ok = checksol(eq, sym, s) if ok is None: ok = eq.subs(sym, s).equals(0) if ok: sol.append(s) return list(ordered(set(sol))) elif lhs.is_Function and len(lhs.args) == 1: if lhs.func in multi_inverses: # sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3)) soln = [] for i in multi_inverses[lhs.func](rhs): soln.extend(_solve(lhs.args[0] - i, sym, **flags)) return list(ordered(soln)) elif lhs.func == LambertW: return _solve(lhs.args[0] - rhs*exp(rhs), sym, **flags) rewrite = lhs.rewrite(exp) if rewrite != lhs: return _solve(rewrite - rhs, sym, **flags) except NotImplementedError: pass # maybe it is a lambert pattern if flags.pop('bivariate', True): # lambert forms may need some help being recognized, e.g. changing # 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1 # to 2**(3*x) + (x*log(2) + 1)**3 g = _filtered_gens(eq.as_poly(), sym) up_or_log = set() for gi in g: if isinstance(gi, exp) or isinstance(gi, log): up_or_log.add(gi) elif gi.is_Pow: gisimp = powdenest(expand_power_exp(gi)) if gisimp.is_Pow and sym in gisimp.exp.free_symbols: up_or_log.add(gi) eq_down = expand_log(expand_power_exp(eq)).subs( dict(list(zip(up_or_log, [0]*len(up_or_log))))) eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down)) rhs, lhs = _invert(eq, sym) if lhs.has(sym): try: poly = lhs.as_poly() g = _filtered_gens(poly, sym) _eq = lhs - rhs sols = _solve_lambert(_eq, sym, g) # use a simplified form if it satisfies eq # and has fewer operations for n, s in enumerate(sols): ns = nsimplify(s) if ns != s and ns.count_ops() <= s.count_ops(): ok = checksol(_eq, sym, ns) if ok is None: ok = _eq.subs(sym, ns).equals(0) if ok: sols[n] = ns return sols except NotImplementedError: # maybe it's a convoluted function if len(g) == 2: try: gpu = bivariate_type(lhs - rhs, *g) if gpu is None: raise NotImplementedError g, p, u = gpu flags['bivariate'] = False inversion = _tsolve(g - u, sym, **flags) if inversion: sol = _solve(p, u, **flags) return list(ordered(set([i.subs(u, s) for i in inversion for s in sol]))) except NotImplementedError: pass else: pass if flags.pop('force', True): flags['force'] = False pos, reps = posify(lhs - rhs) if rhs == S.ComplexInfinity: return [] for u, s in reps.items(): if s == sym: break else: u = sym if pos.has(u): try: soln = _solve(pos, u, **flags) return list(ordered([s.subs(reps) for s in soln])) except NotImplementedError: pass else: pass # here for coverage return # here for coverage # TODO: option for calculating J numerically @conserve_mpmath_dps def nsolve(*args, **kwargs): r""" Solve a nonlinear equation system numerically: ``nsolve(f, [args,] x0, modules=['mpmath'], **kwargs)``. Explanation =========== ``f`` is a vector function of symbolic expressions representing the system. *args* are the variables. If there is only one variable, this argument can be omitted. ``x0`` is a starting vector close to a solution. Use the modules keyword to specify which modules should be used to evaluate the function and the Jacobian matrix. Make sure to use a module that supports matrices. For more information on the syntax, please see the docstring of ``lambdify``. If the keyword arguments contain ``dict=True`` (default is False) ``nsolve`` will return a list (perhaps empty) of solution mappings. This might be especially useful if you want to use ``nsolve`` as a fallback to solve since using the dict argument for both methods produces return values of consistent type structure. Please note: to keep this consistent with ``solve``, the solution will be returned in a list even though ``nsolve`` (currently at least) only finds one solution at a time. Overdetermined systems are supported. Examples ======== >>> from sympy import Symbol, nsolve >>> import sympy >>> import mpmath >>> mpmath.mp.dps = 15 >>> x1 = Symbol('x1') >>> x2 = Symbol('x2') >>> f1 = 3 * x1**2 - 2 * x2**2 - 1 >>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8 >>> print(nsolve((f1, f2), (x1, x2), (-1, 1))) Matrix([[-1.19287309935246], [1.27844411169911]]) For one-dimensional functions the syntax is simplified: >>> from sympy import sin, nsolve >>> from sympy.abc import x >>> nsolve(sin(x), x, 2) 3.14159265358979 >>> nsolve(sin(x), 2) 3.14159265358979 To solve with higher precision than the default, use the prec argument: >>> from sympy import cos >>> nsolve(cos(x) - x, 1) 0.739085133215161 >>> nsolve(cos(x) - x, 1, prec=50) 0.73908513321516064165531208767387340401341175890076 >>> cos(_) 0.73908513321516064165531208767387340401341175890076 To solve for complex roots of real functions, a nonreal initial point must be specified: >>> from sympy import I >>> nsolve(x**2 + 2, I) 1.4142135623731*I ``mpmath.findroot`` is used and you can find their more extensive documentation, especially concerning keyword parameters and available solvers. Note, however, that functions which are very steep near the root, the verification of the solution may fail. In this case you should use the flag ``verify=False`` and independently verify the solution. >>> from sympy import cos, cosh >>> from sympy.abc import i >>> f = cos(x)*cosh(x) - 1 >>> nsolve(f, 3.14*100) Traceback (most recent call last): ... ValueError: Could not find root within given tolerance. (1.39267e+230 > 2.1684e-19) >>> ans = nsolve(f, 3.14*100, verify=False); ans 312.588469032184 >>> f.subs(x, ans).n(2) 2.1e+121 >>> (f/f.diff(x)).subs(x, ans).n(2) 7.4e-15 One might safely skip the verification if bounds of the root are known and a bisection method is used: >>> bounds = lambda i: (3.14*i, 3.14*(i + 1)) >>> nsolve(f, bounds(100), solver='bisect', verify=False) 315.730061685774 Alternatively, a function may be better behaved when the denominator is ignored. Since this is not always the case, however, the decision of what function to use is left to the discretion of the user. >>> eq = x**2/(1 - x)/(1 - 2*x)**2 - 100 >>> nsolve(eq, 0.46) Traceback (most recent call last): ... ValueError: Could not find root within given tolerance. (10000 > 2.1684e-19) Try another starting point or tweak arguments. >>> nsolve(eq.as_numer_denom()[0], 0.46) 0.46792545969349058 """ # there are several other SymPy functions that use method= so # guard against that here if 'method' in kwargs: raise ValueError(filldedent(''' Keyword "method" should not be used in this context. When using some mpmath solvers directly, the keyword "method" is used, but when using nsolve (and findroot) the keyword to use is "solver".''')) if 'prec' in kwargs: prec = kwargs.pop('prec') import mpmath mpmath.mp.dps = prec else: prec = None # keyword argument to return result as a dictionary as_dict = kwargs.pop('dict', False) # interpret arguments if len(args) == 3: f = args[0] fargs = args[1] x0 = args[2] if iterable(fargs) and iterable(x0): if len(x0) != len(fargs): raise TypeError('nsolve expected exactly %i guess vectors, got %i' % (len(fargs), len(x0))) elif len(args) == 2: f = args[0] fargs = None x0 = args[1] if iterable(f): raise TypeError('nsolve expected 3 arguments, got 2') elif len(args) < 2: raise TypeError('nsolve expected at least 2 arguments, got %i' % len(args)) else: raise TypeError('nsolve expected at most 3 arguments, got %i' % len(args)) modules = kwargs.get('modules', ['mpmath']) if iterable(f): f = list(f) for i, fi in enumerate(f): if isinstance(fi, Equality): f[i] = fi.lhs - fi.rhs f = Matrix(f).T if iterable(x0): x0 = list(x0) if not isinstance(f, Matrix): # assume it's a sympy expression if isinstance(f, Equality): f = f.lhs - f.rhs syms = f.free_symbols if fargs is None: fargs = syms.copy().pop() if not (len(syms) == 1 and (fargs in syms or fargs[0] in syms)): raise ValueError(filldedent(''' expected a one-dimensional and numerical function''')) # the function is much better behaved if there is no denominator # but sending the numerator is left to the user since sometimes # the function is better behaved when the denominator is present # e.g., issue 11768 f = lambdify(fargs, f, modules) x = sympify(findroot(f, x0, **kwargs)) if as_dict: return [{fargs: x}] return x if len(fargs) > f.cols: raise NotImplementedError(filldedent(''' need at least as many equations as variables''')) verbose = kwargs.get('verbose', False) if verbose: print('f(x):') print(f) # derive Jacobian J = f.jacobian(fargs) if verbose: print('J(x):') print(J) # create functions f = lambdify(fargs, f.T, modules) J = lambdify(fargs, J, modules) # solve the system numerically x = findroot(f, x0, J=J, **kwargs) if as_dict: return [dict(zip(fargs, [sympify(xi) for xi in x]))] return Matrix(x) def _invert(eq, *symbols, **kwargs): """ Return tuple (i, d) where ``i`` is independent of *symbols* and ``d`` contains symbols. Explanation =========== ``i`` and ``d`` are obtained after recursively using algebraic inversion until an uninvertible ``d`` remains. If there are no free symbols then ``d`` will be zero. Some (but not necessarily all) solutions to the expression ``i - d`` will be related to the solutions of the original expression. Examples ======== >>> from sympy.solvers.solvers import _invert as invert >>> from sympy import sqrt, cos >>> from sympy.abc import x, y >>> invert(x - 3) (3, x) >>> invert(3) (3, 0) >>> invert(2*cos(x) - 1) (1/2, cos(x)) >>> invert(sqrt(x) - 3) (3, sqrt(x)) >>> invert(sqrt(x) + y, x) (-y, sqrt(x)) >>> invert(sqrt(x) + y, y) (-sqrt(x), y) >>> invert(sqrt(x) + y, x, y) (0, sqrt(x) + y) If there is more than one symbol in a power's base and the exponent is not an Integer, then the principal root will be used for the inversion: >>> invert(sqrt(x + y) - 2) (4, x + y) >>> invert(sqrt(x + y) - 2) (4, x + y) If the exponent is an Integer, setting ``integer_power`` to True will force the principal root to be selected: >>> invert(x**2 - 4, integer_power=True) (2, x) """ eq = sympify(eq) if eq.args: # make sure we are working with flat eq eq = eq.func(*eq.args) free = eq.free_symbols if not symbols: symbols = free if not free & set(symbols): return eq, S.Zero dointpow = bool(kwargs.get('integer_power', False)) lhs = eq rhs = S.Zero while True: was = lhs while True: indep, dep = lhs.as_independent(*symbols) # dep + indep == rhs if lhs.is_Add: # this indicates we have done it all if indep.is_zero: break lhs = dep rhs -= indep # dep * indep == rhs else: # this indicates we have done it all if indep is S.One: break lhs = dep rhs /= indep # collect like-terms in symbols if lhs.is_Add: terms = {} for a in lhs.args: i, d = a.as_independent(*symbols) terms.setdefault(d, []).append(i) if any(len(v) > 1 for v in terms.values()): args = [] for d, i in terms.items(): if len(i) > 1: args.append(Add(*i)*d) else: args.append(i[0]*d) lhs = Add(*args) # if it's a two-term Add with rhs = 0 and two powers we can get the # dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3 if lhs.is_Add and not rhs and len(lhs.args) == 2 and \ not lhs.is_polynomial(*symbols): a, b = ordered(lhs.args) ai, ad = a.as_independent(*symbols) bi, bd = b.as_independent(*symbols) if any(_ispow(i) for i in (ad, bd)): a_base, a_exp = ad.as_base_exp() b_base, b_exp = bd.as_base_exp() if a_base == b_base: # a = -b lhs = powsimp(powdenest(ad/bd)) rhs = -bi/ai else: rat = ad/bd _lhs = powsimp(ad/bd) if _lhs != rat: lhs = _lhs rhs = -bi/ai elif ai == -bi: if isinstance(ad, Function) and ad.func == bd.func: if len(ad.args) == len(bd.args) == 1: lhs = ad.args[0] - bd.args[0] elif len(ad.args) == len(bd.args): # should be able to solve # f(x, y) - f(2 - x, 0) == 0 -> x == 1 raise NotImplementedError( 'equal function with more than 1 argument') else: raise ValueError( 'function with different numbers of args') elif lhs.is_Mul and any(_ispow(a) for a in lhs.args): lhs = powsimp(powdenest(lhs)) if lhs.is_Function: if hasattr(lhs, 'inverse') and len(lhs.args) == 1: # -1 # f(x) = g -> x = f (g) # # /!\ inverse should not be defined if there are multiple values # for the function -- these are handled in _tsolve # rhs = lhs.inverse()(rhs) lhs = lhs.args[0] elif isinstance(lhs, atan2): y, x = lhs.args lhs = 2*atan(y/(sqrt(x**2 + y**2) + x)) elif lhs.func == rhs.func: if len(lhs.args) == len(rhs.args) == 1: lhs = lhs.args[0] rhs = rhs.args[0] elif len(lhs.args) == len(rhs.args): # should be able to solve # f(x, y) == f(2, 3) -> x == 2 # f(x, x + y) == f(2, 3) -> x == 2 raise NotImplementedError( 'equal function with more than 1 argument') else: raise ValueError( 'function with different numbers of args') if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0: lhs = 1/lhs rhs = 1/rhs # base**a = b -> base = b**(1/a) if # a is an Integer and dointpow=True (this gives real branch of root) # a is not an Integer and the equation is multivariate and the # base has more than 1 symbol in it # The rationale for this is that right now the multi-system solvers # doesn't try to resolve generators to see, for example, if the whole # system is written in terms of sqrt(x + y) so it will just fail, so we # do that step here. if lhs.is_Pow and ( lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1): rhs = rhs**(1/lhs.exp) lhs = lhs.base if lhs == was: break return rhs, lhs def unrad(eq, *syms, **flags): """ Remove radicals with symbolic arguments and return (eq, cov), None, or raise an error. Explanation =========== None is returned if there are no radicals to remove. NotImplementedError is raised if there are radicals and they cannot be removed or if the relationship between the original symbols and the change of variable needed to rewrite the system as a polynomial cannot be solved. Otherwise the tuple, ``(eq, cov)``, is returned where: *eq*, ``cov`` *eq* is an equation without radicals (in the symbol(s) of interest) whose solutions are a superset of the solutions to the original expression. *eq* might be rewritten in terms of a new variable; the relationship to the original variables is given by ``cov`` which is a list containing ``v`` and ``v**p - b`` where ``p`` is the power needed to clear the radical and ``b`` is the radical now expressed as a polynomial in the symbols of interest. For example, for sqrt(2 - x) the tuple would be ``(c, c**2 - 2 + x)``. The solutions of *eq* will contain solutions to the original equation (if there are any). *syms* An iterable of symbols which, if provided, will limit the focus of radical removal: only radicals with one or more of the symbols of interest will be cleared. All free symbols are used if *syms* is not set. *flags* are used internally for communication during recursive calls. Two options are also recognized: ``take``, when defined, is interpreted as a single-argument function that returns True if a given Pow should be handled. Radicals can be removed from an expression if: * All bases of the radicals are the same; a change of variables is done in this case. * If all radicals appear in one term of the expression. * There are only four terms with sqrt() factors or there are less than four terms having sqrt() factors. * There are only two terms with radicals. Examples ======== >>> from sympy.solvers.solvers import unrad >>> from sympy.abc import x >>> from sympy import sqrt, Rational, root, real_roots, solve >>> unrad(sqrt(x)*x**Rational(1, 3) + 2) (x**5 - 64, []) >>> unrad(sqrt(x) + root(x + 1, 3)) (x**3 - x**2 - 2*x - 1, []) >>> eq = sqrt(x) + root(x, 3) - 2 >>> unrad(eq) (_p**3 + _p**2 - 2, [_p, _p**6 - x]) """ uflags = dict(check=False, simplify=False) def _cov(p, e): if cov: # XXX - uncovered oldp, olde = cov if Poly(e, p).degree(p) in (1, 2): cov[:] = [p, olde.subs(oldp, _solve(e, p, **uflags)[0])] else: raise NotImplementedError else: cov[:] = [p, e] def _canonical(eq, cov): if cov: # change symbol to vanilla so no solutions are eliminated p, e = cov rep = {p: Dummy(p.name)} eq = eq.xreplace(rep) cov = [p.xreplace(rep), e.xreplace(rep)] # remove constants and powers of factors since these don't change # the location of the root; XXX should factor or factor_terms be used? eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True) if eq.is_Mul: args = [] for f in eq.args: if f.is_number: continue if f.is_Pow and _take(f, True): args.append(f.base) else: args.append(f) eq = Mul(*args) # leave as Mul for more efficient solving # make the sign canonical free = eq.free_symbols if len(free) == 1: if eq.coeff(free.pop()**degree(eq)).could_extract_minus_sign(): eq = -eq elif eq.could_extract_minus_sign(): eq = -eq return eq, cov def _Q(pow): # return leading Rational of denominator of Pow's exponent c = pow.as_base_exp()[1].as_coeff_Mul()[0] if not c.is_Rational: return S.One return c.q # define the _take method that will determine whether a term is of interest def _take(d, take_int_pow): # return True if coefficient of any factor's exponent's den is not 1 for pow in Mul.make_args(d): if not (pow.is_Symbol or pow.is_Pow): continue b, e = pow.as_base_exp() if not b.has(*syms): continue if not take_int_pow and _Q(pow) == 1: continue free = pow.free_symbols if free.intersection(syms): return True return False _take = flags.setdefault('_take', _take) cov, nwas, rpt = [flags.setdefault(k, v) for k, v in sorted(dict(cov=[], n=None, rpt=0).items())] # preconditioning eq = powdenest(factor_terms(eq, radical=True, clear=True)) if isinstance(eq, Relational): eq, d = eq, 1 else: eq, d = eq.as_numer_denom() eq = _mexpand(eq, recursive=True) if eq.is_number: return syms = set(syms) or eq.free_symbols poly = eq.as_poly() gens = [g for g in poly.gens if _take(g, True)] if not gens: return # check for trivial case # - already a polynomial in integer powers if all(_Q(g) == 1 for g in gens): return # - an exponent has a symbol of interest (don't handle) if any(g.as_base_exp()[1].has(*syms) for g in gens): return def _rads_bases_lcm(poly): # if all the bases are the same or all the radicals are in one # term, `lcm` will be the lcm of the denominators of the # exponents of the radicals lcm = 1 rads = set() bases = set() for g in poly.gens: if not _take(g, False): continue q = _Q(g) if q != 1: rads.add(g) lcm = ilcm(lcm, q) bases.add(g.base) return rads, bases, lcm rads, bases, lcm = _rads_bases_lcm(poly) if not rads: return covsym = Dummy('p', nonnegative=True) # only keep in syms symbols that actually appear in radicals; # and update gens newsyms = set() for r in rads: newsyms.update(syms & r.free_symbols) if newsyms != syms: syms = newsyms gens = [g for g in gens if g.free_symbols & syms] # get terms together that have common generators drad = dict(list(zip(rads, list(range(len(rads)))))) rterms = {(): []} args = Add.make_args(poly.as_expr()) for t in args: if _take(t, False): common = set(t.as_poly().gens).intersection(rads) key = tuple(sorted([drad[i] for i in common])) else: key = () rterms.setdefault(key, []).append(t) others = Add(*rterms.pop(())) rterms = [Add(*rterms[k]) for k in rterms.keys()] # the output will depend on the order terms are processed, so # make it canonical quickly rterms = list(reversed(list(ordered(rterms)))) ok = False # we don't have a solution yet depth = sqrt_depth(eq) if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2): eq = rterms[0]**lcm - ((-others)**lcm) ok = True else: if len(rterms) == 1 and rterms[0].is_Add: rterms = list(rterms[0].args) if len(bases) == 1: b = bases.pop() if len(syms) > 1: free = b.free_symbols x = {g for g in gens if g.is_Symbol} & free if not x: x = free x = ordered(x) else: x = syms x = list(x)[0] try: inv = _solve(covsym**lcm - b, x, **uflags) if not inv: raise NotImplementedError eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0]) _cov(covsym, covsym**lcm - b) return _canonical(eq, cov) except NotImplementedError: pass else: # no longer consider integer powers as generators gens = [g for g in gens if _Q(g) != 1] if len(rterms) == 2: if not others: eq = rterms[0]**lcm - (-rterms[1])**lcm ok = True elif not log(lcm, 2).is_Integer: # the lcm-is-power-of-two case is handled below r0, r1 = rterms if flags.get('_reverse', False): r1, r0 = r0, r1 i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly()) i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly()) for reverse in range(2): if reverse: i0, i1 = i1, i0 r0, r1 = r1, r0 _rads1, _, lcm1 = i1 _rads1 = Mul(*_rads1) t1 = _rads1**lcm1 c = covsym**lcm1 - t1 for x in syms: try: sol = _solve(c, x, **uflags) if not sol: raise NotImplementedError neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \ others tmp = unrad(neweq, covsym) if tmp: eq, newcov = tmp if newcov: newp, newc = newcov _cov(newp, c.subs(covsym, _solve(newc, covsym, **uflags)[0])) else: _cov(covsym, c) else: eq = neweq _cov(covsym, c) ok = True break except NotImplementedError: if reverse: raise NotImplementedError( 'no successful change of variable found') else: pass if ok: break elif len(rterms) == 3: # two cube roots and another with order less than 5 # (so an analytical solution can be found) or a base # that matches one of the cube root bases info = [_rads_bases_lcm(i.as_poly()) for i in rterms] RAD = 0 BASES = 1 LCM = 2 if info[0][LCM] != 3: info.append(info.pop(0)) rterms.append(rterms.pop(0)) elif info[1][LCM] != 3: info.append(info.pop(1)) rterms.append(rterms.pop(1)) if info[0][LCM] == info[1][LCM] == 3: if info[1][BASES] != info[2][BASES]: info[0], info[1] = info[1], info[0] rterms[0], rterms[1] = rterms[1], rterms[0] if info[1][BASES] == info[2][BASES]: eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3 ok = True elif info[2][LCM] < 5: # a*root(A, 3) + b*root(B, 3) + others = c a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB'] # zz represents the unraded expression into which the # specifics for this case are substituted zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 - 3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 + 3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 - 63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 - 21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d + 45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 - 18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 + 9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 + 3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 - 60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 + 3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 - 126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 - 9*c*d**8 + d**9) def _t(i): b = Mul(*info[i][RAD]) return cancel(rterms[i]/b), Mul(*info[i][BASES]) aa, AA = _t(0) bb, BB = _t(1) cc = -rterms[2] dd = others eq = zz.xreplace(dict(zip( (a, A, b, B, c, d), (aa, AA, bb, BB, cc, dd)))) ok = True # handle power-of-2 cases if not ok: if log(lcm, 2).is_Integer and (not others and len(rterms) == 4 or len(rterms) < 4): def _norm2(a, b): return a**2 + b**2 + 2*a*b if len(rterms) == 4: # (r0+r1)**2 - (r2+r3)**2 r0, r1, r2, r3 = rterms eq = _norm2(r0, r1) - _norm2(r2, r3) ok = True elif len(rterms) == 3: # (r1+r2)**2 - (r0+others)**2 r0, r1, r2 = rterms eq = _norm2(r1, r2) - _norm2(r0, others) ok = True elif len(rterms) == 2: # r0**2 - (r1+others)**2 r0, r1 = rterms eq = r0**2 - _norm2(r1, others) ok = True new_depth = sqrt_depth(eq) if ok else depth rpt += 1 # XXX how many repeats with others unchanging is enough? if not ok or ( nwas is not None and len(rterms) == nwas and new_depth is not None and new_depth == depth and rpt > 3): raise NotImplementedError('Cannot remove all radicals') flags.update(dict(cov=cov, n=len(rterms), rpt=rpt)) neq = unrad(eq, *syms, **flags) if neq: eq, cov = neq eq, cov = _canonical(eq, cov) return eq, cov from sympy.solvers.bivariate import ( bivariate_type, _solve_lambert, _filtered_gens)
792d0bcd731ea0efdaf92d9961bee7f692cbdd386db76a47bd3abb31f9190207
"""Base class for all the objects in SymPy""" from __future__ import print_function, division from collections import defaultdict from itertools import chain from .assumptions import BasicMeta, ManagedProperties from .cache import cacheit from .sympify import _sympify, sympify, SympifyError from .compatibility import (iterable, Iterator, ordered, string_types, with_metaclass, zip_longest, range, PY3, Mapping) from .singleton import S from inspect import getmro def as_Basic(expr): """Return expr as a Basic instance using strict sympify or raise a TypeError; this is just a wrapper to _sympify, raising a TypeError instead of a SympifyError.""" from sympy.utilities.misc import func_name try: return _sympify(expr) except SympifyError: raise TypeError( 'Argument must be a Basic object, not `%s`' % func_name( expr)) class Basic(with_metaclass(ManagedProperties)): """ Base class for all objects in SymPy. Conventions: 1) Always use ``.args``, when accessing parameters of some instance: >>> from sympy import cot >>> from sympy.abc import x, y >>> cot(x).args (x,) >>> cot(x).args[0] x >>> (x*y).args (x, y) >>> (x*y).args[1] y 2) Never use internal methods or variables (the ones prefixed with ``_``): >>> cot(x)._args # do not use this, use cot(x).args instead (x,) """ __slots__ = ['_mhash', # hash value '_args', # arguments '_assumptions' ] # To be overridden with True in the appropriate subclasses is_number = False is_Atom = False is_Symbol = False is_symbol = False is_Indexed = False is_Dummy = False is_Wild = False is_Function = False is_Add = False is_Mul = False is_Pow = False is_Number = False is_Float = False is_Rational = False is_Integer = False is_NumberSymbol = False is_Order = False is_Derivative = False is_Piecewise = False is_Poly = False is_AlgebraicNumber = False is_Relational = False is_Equality = False is_Boolean = False is_Not = False is_Matrix = False is_Vector = False is_Point = False is_MatAdd = False is_MatMul = False def __new__(cls, *args): obj = object.__new__(cls) obj._assumptions = cls.default_assumptions obj._mhash = None # will be set by __hash__ method. obj._args = args # all items in args must be Basic objects return obj def copy(self): return self.func(*self.args) def __reduce_ex__(self, proto): """ Pickling support.""" return type(self), self.__getnewargs__(), self.__getstate__() def __getnewargs__(self): return self.args def __getstate__(self): return {} def __setstate__(self, state): for k, v in state.items(): setattr(self, k, v) def __hash__(self): # hash cannot be cached using cache_it because infinite recurrence # occurs as hash is needed for setting cache dictionary keys h = self._mhash if h is None: h = hash((type(self).__name__,) + self._hashable_content()) self._mhash = h return h def _hashable_content(self): """Return a tuple of information about self that can be used to compute the hash. If a class defines additional attributes, like ``name`` in Symbol, then this method should be updated accordingly to return such relevant attributes. Defining more than _hashable_content is necessary if __eq__ has been defined by a class. See note about this in Basic.__eq__.""" return self._args @property def assumptions0(self): """ Return object `type` assumptions. For example: Symbol('x', real=True) Symbol('x', integer=True) are different objects. In other words, besides Python type (Symbol in this case), the initial assumptions are also forming their typeinfo. Examples ======== >>> from sympy import Symbol >>> from sympy.abc import x >>> x.assumptions0 {'commutative': True} >>> x = Symbol("x", positive=True) >>> x.assumptions0 {'commutative': True, 'complex': True, 'extended_negative': False, 'extended_nonnegative': True, 'extended_nonpositive': False, 'extended_nonzero': True, 'extended_positive': True, 'extended_real': True, 'finite': True, 'hermitian': True, 'imaginary': False, 'infinite': False, 'negative': False, 'nonnegative': True, 'nonpositive': False, 'nonzero': True, 'positive': True, 'real': True, 'zero': False} """ return {} def compare(self, other): """ Return -1, 0, 1 if the object is smaller, equal, or greater than other. Not in the mathematical sense. If the object is of a different type from the "other" then their classes are ordered according to the sorted_classes list. Examples ======== >>> from sympy.abc import x, y >>> x.compare(y) -1 >>> x.compare(x) 0 >>> y.compare(x) 1 """ # all redefinitions of __cmp__ method should start with the # following lines: if self is other: return 0 n1 = self.__class__ n2 = other.__class__ c = (n1 > n2) - (n1 < n2) if c: return c # st = self._hashable_content() ot = other._hashable_content() c = (len(st) > len(ot)) - (len(st) < len(ot)) if c: return c for l, r in zip(st, ot): l = Basic(*l) if isinstance(l, frozenset) else l r = Basic(*r) if isinstance(r, frozenset) else r if isinstance(l, Basic): c = l.compare(r) else: c = (l > r) - (l < r) if c: return c return 0 @staticmethod def _compare_pretty(a, b): from sympy.series.order import Order if isinstance(a, Order) and not isinstance(b, Order): return 1 if not isinstance(a, Order) and isinstance(b, Order): return -1 if a.is_Rational and b.is_Rational: l = a.p * b.q r = b.p * a.q return (l > r) - (l < r) else: from sympy.core.symbol import Wild p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3") r_a = a.match(p1 * p2**p3) if r_a and p3 in r_a: a3 = r_a[p3] r_b = b.match(p1 * p2**p3) if r_b and p3 in r_b: b3 = r_b[p3] c = Basic.compare(a3, b3) if c != 0: return c return Basic.compare(a, b) @classmethod def fromiter(cls, args, **assumptions): """ Create a new object from an iterable. This is a convenience function that allows one to create objects from any iterable, without having to convert to a list or tuple first. Examples ======== >>> from sympy import Tuple >>> Tuple.fromiter(i for i in range(5)) (0, 1, 2, 3, 4) """ return cls(*tuple(args), **assumptions) @classmethod def class_key(cls): """Nice order of classes. """ return 5, 0, cls.__name__ @cacheit def sort_key(self, order=None): """ Return a sort key. Examples ======== >>> from sympy.core import S, I >>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key()) [1/2, -I, I] >>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]") [x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)] >>> sorted(_, key=lambda x: x.sort_key()) [x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2] """ # XXX: remove this when issue 5169 is fixed def inner_key(arg): if isinstance(arg, Basic): return arg.sort_key(order) else: return arg args = self._sorted_args args = len(args), tuple([inner_key(arg) for arg in args]) return self.class_key(), args, S.One.sort_key(), S.One def __eq__(self, other): """Return a boolean indicating whether a == b on the basis of their symbolic trees. This is the same as a.compare(b) == 0 but faster. Notes ===== If a class that overrides __eq__() needs to retain the implementation of __hash__() from a parent class, the interpreter must be told this explicitly by setting __hash__ = <ParentClass>.__hash__. Otherwise the inheritance of __hash__() will be blocked, just as if __hash__ had been explicitly set to None. References ========== from http://docs.python.org/dev/reference/datamodel.html#object.__hash__ """ if self is other: return True tself = type(self) tother = type(other) if tself is not tother: try: other = _sympify(other) tother = type(other) except SympifyError: return NotImplemented # As long as we have the ordering of classes (sympy.core), # comparing types will be slow in Python 2, because it uses # __cmp__. Until we can remove it # (https://github.com/sympy/sympy/issues/4269), we only compare # types in Python 2 directly if they actually have __ne__. if PY3 or type(tself).__ne__ is not type.__ne__: if tself != tother: return False elif tself is not tother: return False return self._hashable_content() == other._hashable_content() def __ne__(self, other): """``a != b`` -> Compare two symbolic trees and see whether they are different this is the same as: ``a.compare(b) != 0`` but faster """ return not self == other def dummy_eq(self, other, symbol=None): """ Compare two expressions and handle dummy symbols. Examples ======== >>> from sympy import Dummy >>> from sympy.abc import x, y >>> u = Dummy('u') >>> (u**2 + 1).dummy_eq(x**2 + 1) True >>> (u**2 + 1) == (x**2 + 1) False >>> (u**2 + y).dummy_eq(x**2 + y, x) True >>> (u**2 + y).dummy_eq(x**2 + y, y) False """ s = self.as_dummy() o = _sympify(other) o = o.as_dummy() dummy_symbols = [i for i in s.free_symbols if i.is_Dummy] if len(dummy_symbols) == 1: dummy = dummy_symbols.pop() else: return s == o if symbol is None: symbols = o.free_symbols if len(symbols) == 1: symbol = symbols.pop() else: return s == o tmp = dummy.__class__() return s.subs(dummy, tmp) == o.subs(symbol, tmp) # Note, we always use the default ordering (lex) in __str__ and __repr__, # regardless of the global setting. See issue 5487. def __repr__(self): """Method to return the string representation. Return the expression as a string. """ from sympy.printing import sstr return sstr(self, order=None) def __str__(self): from sympy.printing import sstr return sstr(self, order=None) # We don't define _repr_png_ here because it would add a large amount of # data to any notebook containing SymPy expressions, without adding # anything useful to the notebook. It can still enabled manually, e.g., # for the qtconsole, with init_printing(). def _repr_latex_(self): """ IPython/Jupyter LaTeX printing To change the behavior of this (e.g., pass in some settings to LaTeX), use init_printing(). init_printing() will also enable LaTeX printing for built in numeric types like ints and container types that contain SymPy objects, like lists and dictionaries of expressions. """ from sympy.printing.latex import latex s = latex(self, mode='plain') return "$\\displaystyle %s$" % s _repr_latex_orig = _repr_latex_ def atoms(self, *types): """Returns the atoms that form the current object. By default, only objects that are truly atomic and can't be divided into smaller pieces are returned: symbols, numbers, and number symbols like I and pi. It is possible to request atoms of any type, however, as demonstrated below. Examples ======== >>> from sympy import I, pi, sin >>> from sympy.abc import x, y >>> (1 + x + 2*sin(y + I*pi)).atoms() {1, 2, I, pi, x, y} If one or more types are given, the results will contain only those types of atoms. >>> from sympy import Number, NumberSymbol, Symbol >>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol) {x, y} >>> (1 + x + 2*sin(y + I*pi)).atoms(Number) {1, 2} >>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol) {1, 2, pi} >>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I) {1, 2, I, pi} Note that I (imaginary unit) and zoo (complex infinity) are special types of number symbols and are not part of the NumberSymbol class. The type can be given implicitly, too: >>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol {x, y} Be careful to check your assumptions when using the implicit option since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all integers in an expression: >>> from sympy import S >>> (1 + x + 2*sin(y + I*pi)).atoms(S(1)) {1} >>> (1 + x + 2*sin(y + I*pi)).atoms(S(2)) {1, 2} Finally, arguments to atoms() can select more than atomic atoms: any sympy type (loaded in core/__init__.py) can be listed as an argument and those types of "atoms" as found in scanning the arguments of the expression recursively: >>> from sympy import Function, Mul >>> from sympy.core.function import AppliedUndef >>> f = Function('f') >>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function) {f(x), sin(y + I*pi)} >>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef) {f(x)} >>> (1 + x + 2*sin(y + I*pi)).atoms(Mul) {I*pi, 2*sin(y + I*pi)} """ if types: types = tuple( [t if isinstance(t, type) else type(t) for t in types]) else: types = (Atom,) result = set() for expr in preorder_traversal(self): if isinstance(expr, types): result.add(expr) return result @property def free_symbols(self): """Return from the atoms of self those which are free symbols. For most expressions, all symbols are free symbols. For some classes this is not true. e.g. Integrals use Symbols for the dummy variables which are bound variables, so Integral has a method to return all symbols except those. Derivative keeps track of symbols with respect to which it will perform a derivative; those are bound variables, too, so it has its own free_symbols method. Any other method that uses bound variables should implement a free_symbols method.""" return set().union(*[a.free_symbols for a in self.args]) @property def expr_free_symbols(self): return set([]) def as_dummy(self): """Return the expression with any objects having structurally bound symbols replaced with unique, canonical symbols within the object in which they appear and having only the default assumption for commutativity being True. Examples ======== >>> from sympy import Integral, Symbol >>> from sympy.abc import x, y >>> r = Symbol('r', real=True) >>> Integral(r, (r, x)).as_dummy() Integral(_0, (_0, x)) >>> _.variables[0].is_real is None True Notes ===== Any object that has structural dummy variables should have a property, `bound_symbols` that returns a list of structural dummy symbols of the object itself. Lambda and Subs have bound symbols, but because of how they are cached, they already compare the same regardless of their bound symbols: >>> from sympy import Lambda >>> Lambda(x, x + 1) == Lambda(y, y + 1) True """ def can(x): d = {i: i.as_dummy() for i in x.bound_symbols} # mask free that shadow bound x = x.subs(d) c = x.canonical_variables # replace bound x = x.xreplace(c) # undo masking x = x.xreplace(dict((v, k) for k, v in d.items())) return x return self.replace( lambda x: hasattr(x, 'bound_symbols'), lambda x: can(x)) @property def canonical_variables(self): """Return a dictionary mapping any variable defined in ``self.bound_symbols`` to Symbols that do not clash with any existing symbol in the expression. Examples ======== >>> from sympy import Lambda >>> from sympy.abc import x >>> Lambda(x, 2*x).canonical_variables {x: _0} """ from sympy.core.symbol import Symbol from sympy.utilities.iterables import numbered_symbols if not hasattr(self, 'bound_symbols'): return {} dums = numbered_symbols('_') reps = {} v = self.bound_symbols # this free will include bound symbols that are not part of # self's bound symbols free = set([i.name for i in self.atoms(Symbol) - set(v)]) for v in v: d = next(dums) if v.is_Symbol: while v.name == d.name or d.name in free: d = next(dums) reps[v] = d return reps def rcall(self, *args): """Apply on the argument recursively through the expression tree. This method is used to simulate a common abuse of notation for operators. For instance in SymPy the the following will not work: ``(x+Lambda(y, 2*y))(z) == x+2*z``, however you can use >>> from sympy import Lambda >>> from sympy.abc import x, y, z >>> (x + Lambda(y, 2*y)).rcall(z) x + 2*z """ return Basic._recursive_call(self, args) @staticmethod def _recursive_call(expr_to_call, on_args): """Helper for rcall method.""" from sympy import Symbol def the_call_method_is_overridden(expr): for cls in getmro(type(expr)): if '__call__' in cls.__dict__: return cls != Basic if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call): if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is return expr_to_call # transformed into an UndefFunction else: return expr_to_call(*on_args) elif expr_to_call.args: args = [Basic._recursive_call( sub, on_args) for sub in expr_to_call.args] return type(expr_to_call)(*args) else: return expr_to_call def is_hypergeometric(self, k): from sympy.simplify import hypersimp return hypersimp(self, k) is not None @property def is_comparable(self): """Return True if self can be computed to a real number (or already is a real number) with precision, else False. Examples ======== >>> from sympy import exp_polar, pi, I >>> (I*exp_polar(I*pi/2)).is_comparable True >>> (I*exp_polar(I*pi*2)).is_comparable False A False result does not mean that `self` cannot be rewritten into a form that would be comparable. For example, the difference computed below is zero but without simplification it does not evaluate to a zero with precision: >>> e = 2**pi*(1 + 2**pi) >>> dif = e - e.expand() >>> dif.is_comparable False >>> dif.n(2)._prec 1 """ is_extended_real = self.is_extended_real if is_extended_real is False: return False if not self.is_number: return False # don't re-eval numbers that are already evaluated since # this will create spurious precision n, i = [p.evalf(2) if not p.is_Number else p for p in self.as_real_imag()] if not (i.is_Number and n.is_Number): return False if i: # if _prec = 1 we can't decide and if not, # the answer is False because numbers with # imaginary parts can't be compared # so return False return False else: return n._prec != 1 @property def func(self): """ The top-level function in an expression. The following should hold for all objects:: >> x == x.func(*x.args) Examples ======== >>> from sympy.abc import x >>> a = 2*x >>> a.func <class 'sympy.core.mul.Mul'> >>> a.args (2, x) >>> a.func(*a.args) 2*x >>> a == a.func(*a.args) True """ return self.__class__ @property def args(self): """Returns a tuple of arguments of 'self'. Examples ======== >>> from sympy import cot >>> from sympy.abc import x, y >>> cot(x).args (x,) >>> cot(x).args[0] x >>> (x*y).args (x, y) >>> (x*y).args[1] y Notes ===== Never use self._args, always use self.args. Only use _args in __new__ when creating a new function. Don't override .args() from Basic (so that it's easy to change the interface in the future if needed). """ return self._args @property def _sorted_args(self): """ The same as ``args``. Derived classes which don't fix an order on their arguments should override this method to produce the sorted representation. """ return self.args def as_content_primitive(self, radical=False, clear=True): """A stub to allow Basic args (like Tuple) to be skipped when computing the content and primitive components of an expression. See Also ======== sympy.core.expr.Expr.as_content_primitive """ return S.One, self def subs(self, *args, **kwargs): """ Substitutes old for new in an expression after sympifying args. `args` is either: - two arguments, e.g. foo.subs(old, new) - one iterable argument, e.g. foo.subs(iterable). The iterable may be o an iterable container with (old, new) pairs. In this case the replacements are processed in the order given with successive patterns possibly affecting replacements already made. o a dict or set whose key/value items correspond to old/new pairs. In this case the old/new pairs will be sorted by op count and in case of a tie, by number of args and the default_sort_key. The resulting sorted list is then processed as an iterable container (see previous). If the keyword ``simultaneous`` is True, the subexpressions will not be evaluated until all the substitutions have been made. Examples ======== >>> from sympy import pi, exp, limit, oo >>> from sympy.abc import x, y >>> (1 + x*y).subs(x, pi) pi*y + 1 >>> (1 + x*y).subs({x:pi, y:2}) 1 + 2*pi >>> (1 + x*y).subs([(x, pi), (y, 2)]) 1 + 2*pi >>> reps = [(y, x**2), (x, 2)] >>> (x + y).subs(reps) 6 >>> (x + y).subs(reversed(reps)) x**2 + 2 >>> (x**2 + x**4).subs(x**2, y) y**2 + y To replace only the x**2 but not the x**4, use xreplace: >>> (x**2 + x**4).xreplace({x**2: y}) x**4 + y To delay evaluation until all substitutions have been made, set the keyword ``simultaneous`` to True: >>> (x/y).subs([(x, 0), (y, 0)]) 0 >>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True) nan This has the added feature of not allowing subsequent substitutions to affect those already made: >>> ((x + y)/y).subs({x + y: y, y: x + y}) 1 >>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True) y/(x + y) In order to obtain a canonical result, unordered iterables are sorted by count_op length, number of arguments and by the default_sort_key to break any ties. All other iterables are left unsorted. >>> from sympy import sqrt, sin, cos >>> from sympy.abc import a, b, c, d, e >>> A = (sqrt(sin(2*x)), a) >>> B = (sin(2*x), b) >>> C = (cos(2*x), c) >>> D = (x, d) >>> E = (exp(x), e) >>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x) >>> expr.subs(dict([A, B, C, D, E])) a*c*sin(d*e) + b The resulting expression represents a literal replacement of the old arguments with the new arguments. This may not reflect the limiting behavior of the expression: >>> (x**3 - 3*x).subs({x: oo}) nan >>> limit(x**3 - 3*x, x, oo) oo If the substitution will be followed by numerical evaluation, it is better to pass the substitution to evalf as >>> (1/x).evalf(subs={x: 3.0}, n=21) 0.333333333333333333333 rather than >>> (1/x).subs({x: 3.0}).evalf(21) 0.333333333333333314830 as the former will ensure that the desired level of precision is obtained. See Also ======== replace: replacement capable of doing wildcard-like matching, parsing of match, and conditional replacements xreplace: exact node replacement in expr tree; also capable of using matching rules sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision """ from sympy.core.containers import Dict from sympy.utilities import default_sort_key from sympy import Dummy, Symbol unordered = False if len(args) == 1: sequence = args[0] if isinstance(sequence, set): unordered = True elif isinstance(sequence, (Dict, Mapping)): unordered = True sequence = sequence.items() elif not iterable(sequence): from sympy.utilities.misc import filldedent raise ValueError(filldedent(""" When a single argument is passed to subs it should be a dictionary of old: new pairs or an iterable of (old, new) tuples.""")) elif len(args) == 2: sequence = [args] else: raise ValueError("subs accepts either 1 or 2 arguments") sequence = list(sequence) for i, s in enumerate(sequence): if isinstance(s[0], string_types): # when old is a string we prefer Symbol s = Symbol(s[0]), s[1] try: s = [sympify(_, strict=not isinstance(_, string_types)) for _ in s] except SympifyError: # if it can't be sympified, skip it sequence[i] = None continue # skip if there is no change sequence[i] = None if _aresame(*s) else tuple(s) sequence = list(filter(None, sequence)) if unordered: sequence = dict(sequence) if not all(k.is_Atom for k in sequence): d = {} for o, n in sequence.items(): try: ops = o.count_ops(), len(o.args) except TypeError: ops = (0, 0) d.setdefault(ops, []).append((o, n)) newseq = [] for k in sorted(d.keys(), reverse=True): newseq.extend( sorted([v[0] for v in d[k]], key=default_sort_key)) sequence = [(k, sequence[k]) for k in newseq] del newseq, d else: sequence = sorted([(k, v) for (k, v) in sequence.items()], key=default_sort_key) if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs? reps = {} rv = self kwargs['hack2'] = True m = Dummy('subs_m') for old, new in sequence: com = new.is_commutative if com is None: com = True d = Dummy('subs_d', commutative=com) # using d*m so Subs will be used on dummy variables # in things like Derivative(f(x, y), x) in which x # is both free and bound rv = rv._subs(old, d*m, **kwargs) if not isinstance(rv, Basic): break reps[d] = new reps[m] = S.One # get rid of m return rv.xreplace(reps) else: rv = self for old, new in sequence: rv = rv._subs(old, new, **kwargs) if not isinstance(rv, Basic): break return rv @cacheit def _subs(self, old, new, **hints): """Substitutes an expression old -> new. If self is not equal to old then _eval_subs is called. If _eval_subs doesn't want to make any special replacement then a None is received which indicates that the fallback should be applied wherein a search for replacements is made amongst the arguments of self. >>> from sympy import Add >>> from sympy.abc import x, y, z Examples ======== Add's _eval_subs knows how to target x + y in the following so it makes the change: >>> (x + y + z).subs(x + y, 1) z + 1 Add's _eval_subs doesn't need to know how to find x + y in the following: >>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None True The returned None will cause the fallback routine to traverse the args and pass the z*(x + y) arg to Mul where the change will take place and the substitution will succeed: >>> (z*(x + y) + 3).subs(x + y, 1) z + 3 ** Developers Notes ** An _eval_subs routine for a class should be written if: 1) any arguments are not instances of Basic (e.g. bool, tuple); 2) some arguments should not be targeted (as in integration variables); 3) if there is something other than a literal replacement that should be attempted (as in Piecewise where the condition may be updated without doing a replacement). If it is overridden, here are some special cases that might arise: 1) If it turns out that no special change was made and all the original sub-arguments should be checked for replacements then None should be returned. 2) If it is necessary to do substitutions on a portion of the expression then _subs should be called. _subs will handle the case of any sub-expression being equal to old (which usually would not be the case) while its fallback will handle the recursion into the sub-arguments. For example, after Add's _eval_subs removes some matching terms it must process the remaining terms so it calls _subs on each of the un-matched terms and then adds them onto the terms previously obtained. 3) If the initial expression should remain unchanged then the original expression should be returned. (Whenever an expression is returned, modified or not, no further substitution of old -> new is attempted.) Sum's _eval_subs routine uses this strategy when a substitution is attempted on any of its summation variables. """ def fallback(self, old, new): """ Try to replace old with new in any of self's arguments. """ hit = False args = list(self.args) for i, arg in enumerate(args): if not hasattr(arg, '_eval_subs'): continue arg = arg._subs(old, new, **hints) if not _aresame(arg, args[i]): hit = True args[i] = arg if hit: rv = self.func(*args) hack2 = hints.get('hack2', False) if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack coeff = S.One nonnumber = [] for i in args: if i.is_Number: coeff *= i else: nonnumber.append(i) nonnumber = self.func(*nonnumber) if coeff is S.One: return nonnumber else: return self.func(coeff, nonnumber, evaluate=False) return rv return self if _aresame(self, old): return new rv = self._eval_subs(old, new) if rv is None: rv = fallback(self, old, new) return rv def _eval_subs(self, old, new): """Override this stub if you want to do anything more than attempt a replacement of old with new in the arguments of self. See also ======== _subs """ return None def xreplace(self, rule): """ Replace occurrences of objects within the expression. Parameters ========== rule : dict-like Expresses a replacement rule Returns ======= xreplace : the result of the replacement Examples ======== >>> from sympy import symbols, pi, exp >>> x, y, z = symbols('x y z') >>> (1 + x*y).xreplace({x: pi}) pi*y + 1 >>> (1 + x*y).xreplace({x: pi, y: 2}) 1 + 2*pi Replacements occur only if an entire node in the expression tree is matched: >>> (x*y + z).xreplace({x*y: pi}) z + pi >>> (x*y*z).xreplace({x*y: pi}) x*y*z >>> (2*x).xreplace({2*x: y, x: z}) y >>> (2*2*x).xreplace({2*x: y, x: z}) 4*z >>> (x + y + 2).xreplace({x + y: 2}) x + y + 2 >>> (x + 2 + exp(x + 2)).xreplace({x + 2: y}) x + exp(y) + 2 xreplace doesn't differentiate between free and bound symbols. In the following, subs(x, y) would not change x since it is a bound symbol, but xreplace does: >>> from sympy import Integral >>> Integral(x, (x, 1, 2*x)).xreplace({x: y}) Integral(y, (y, 1, 2*y)) Trying to replace x with an expression raises an error: >>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP ValueError: Invalid limits given: ((2*y, 1, 4*y),) See Also ======== replace: replacement capable of doing wildcard-like matching, parsing of match, and conditional replacements subs: substitution of subexpressions as defined by the objects themselves. """ value, _ = self._xreplace(rule) return value def _xreplace(self, rule): """ Helper for xreplace. Tracks whether a replacement actually occurred. """ if self in rule: return rule[self], True elif rule: args = [] changed = False for a in self.args: _xreplace = getattr(a, '_xreplace', None) if _xreplace is not None: a_xr = _xreplace(rule) args.append(a_xr[0]) changed |= a_xr[1] else: args.append(a) args = tuple(args) if changed: return self.func(*args), True return self, False @cacheit def has(self, *patterns): """ Test whether any subexpression matches any of the patterns. Examples ======== >>> from sympy import sin >>> from sympy.abc import x, y, z >>> (x**2 + sin(x*y)).has(z) False >>> (x**2 + sin(x*y)).has(x, y, z) True >>> x.has(x) True Note ``has`` is a structural algorithm with no knowledge of mathematics. Consider the following half-open interval: >>> from sympy.sets import Interval >>> i = Interval.Lopen(0, 5); i Interval.Lopen(0, 5) >>> i.args (0, 5, True, False) >>> i.has(4) # there is no "4" in the arguments False >>> i.has(0) # there *is* a "0" in the arguments True Instead, use ``contains`` to determine whether a number is in the interval or not: >>> i.contains(4) True >>> i.contains(0) False Note that ``expr.has(*patterns)`` is exactly equivalent to ``any(expr.has(p) for p in patterns)``. In particular, ``False`` is returned when the list of patterns is empty. >>> x.has() False """ return any(self._has(pattern) for pattern in patterns) def _has(self, pattern): """Helper for .has()""" from sympy.core.function import UndefinedFunction, Function if isinstance(pattern, UndefinedFunction): return any(f.func == pattern or f == pattern for f in self.atoms(Function, UndefinedFunction)) pattern = sympify(pattern) if isinstance(pattern, BasicMeta): return any(isinstance(arg, pattern) for arg in preorder_traversal(self)) _has_matcher = getattr(pattern, '_has_matcher', None) if _has_matcher is not None: match = _has_matcher() return any(match(arg) for arg in preorder_traversal(self)) else: return any(arg == pattern for arg in preorder_traversal(self)) def _has_matcher(self): """Helper for .has()""" return lambda other: self == other def replace(self, query, value, map=False, simultaneous=True, exact=None): """ Replace matching subexpressions of ``self`` with ``value``. If ``map = True`` then also return the mapping {old: new} where ``old`` was a sub-expression found with query and ``new`` is the replacement value for it. If the expression itself doesn't match the query, then the returned value will be ``self.xreplace(map)`` otherwise it should be ``self.subs(ordered(map.items()))``. Traverses an expression tree and performs replacement of matching subexpressions from the bottom to the top of the tree. The default approach is to do the replacement in a simultaneous fashion so changes made are targeted only once. If this is not desired or causes problems, ``simultaneous`` can be set to False. In addition, if an expression containing more than one Wild symbol is being used to match subexpressions and the ``exact`` flag is None it will be set to True so the match will only succeed if all non-zero values are received for each Wild that appears in the match pattern. Setting this to False accepts a match of 0; while setting it True accepts all matches that have a 0 in them. See example below for cautions. The list of possible combinations of queries and replacement values is listed below: Examples ======== Initial setup >>> from sympy import log, sin, cos, tan, Wild, Mul, Add >>> from sympy.abc import x, y >>> f = log(sin(x)) + tan(sin(x**2)) 1.1. type -> type obj.replace(type, newtype) When object of type ``type`` is found, replace it with the result of passing its argument(s) to ``newtype``. >>> f.replace(sin, cos) log(cos(x)) + tan(cos(x**2)) >>> sin(x).replace(sin, cos, map=True) (cos(x), {sin(x): cos(x)}) >>> (x*y).replace(Mul, Add) x + y 1.2. type -> func obj.replace(type, func) When object of type ``type`` is found, apply ``func`` to its argument(s). ``func`` must be written to handle the number of arguments of ``type``. >>> f.replace(sin, lambda arg: sin(2*arg)) log(sin(2*x)) + tan(sin(2*x**2)) >>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args))) sin(2*x*y) 2.1. pattern -> expr obj.replace(pattern(wild), expr(wild)) Replace subexpressions matching ``pattern`` with the expression written in terms of the Wild symbols in ``pattern``. >>> a, b = map(Wild, 'ab') >>> f.replace(sin(a), tan(a)) log(tan(x)) + tan(tan(x**2)) >>> f.replace(sin(a), tan(a/2)) log(tan(x/2)) + tan(tan(x**2/2)) >>> f.replace(sin(a), a) log(x) + tan(x**2) >>> (x*y).replace(a*x, a) y Matching is exact by default when more than one Wild symbol is used: matching fails unless the match gives non-zero values for all Wild symbols: >>> (2*x + y).replace(a*x + b, b - a) y - 2 >>> (2*x).replace(a*x + b, b - a) 2*x When set to False, the results may be non-intuitive: >>> (2*x).replace(a*x + b, b - a, exact=False) 2/x 2.2. pattern -> func obj.replace(pattern(wild), lambda wild: expr(wild)) All behavior is the same as in 2.1 but now a function in terms of pattern variables is used rather than an expression: >>> f.replace(sin(a), lambda a: sin(2*a)) log(sin(2*x)) + tan(sin(2*x**2)) 3.1. func -> func obj.replace(filter, func) Replace subexpression ``e`` with ``func(e)`` if ``filter(e)`` is True. >>> g = 2*sin(x**3) >>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2) 4*sin(x**9) The expression itself is also targeted by the query but is done in such a fashion that changes are not made twice. >>> e = x*(x*y + 1) >>> e.replace(lambda x: x.is_Mul, lambda x: 2*x) 2*x*(2*x*y + 1) When matching a single symbol, `exact` will default to True, but this may or may not be the behavior that is desired: Here, we want `exact=False`: >>> from sympy import Function >>> f = Function('f') >>> e = f(1) + f(0) >>> q = f(a), lambda a: f(a + 1) >>> e.replace(*q, exact=False) f(1) + f(2) >>> e.replace(*q, exact=True) f(0) + f(2) But here, the nature of matching makes selecting the right setting tricky: >>> e = x**(1 + y) >>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False) 1 >>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True) x**(-x - y + 1) >>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False) 1 >>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True) x**(1 - y) It is probably better to use a different form of the query that describes the target expression more precisely: >>> (1 + x**(1 + y)).replace( ... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1, ... lambda x: x.base**(1 - (x.exp - 1))) ... x**(1 - y) + 1 See Also ======== subs: substitution of subexpressions as defined by the objects themselves. xreplace: exact node replacement in expr tree; also capable of using matching rules """ from sympy.core.symbol import Dummy, Wild from sympy.simplify.simplify import bottom_up try: query = _sympify(query) except SympifyError: pass try: value = _sympify(value) except SympifyError: pass if isinstance(query, type): _query = lambda expr: isinstance(expr, query) if isinstance(value, type): _value = lambda expr, result: value(*expr.args) elif callable(value): _value = lambda expr, result: value(*expr.args) else: raise TypeError( "given a type, replace() expects another " "type or a callable") elif isinstance(query, Basic): _query = lambda expr: expr.match(query) if exact is None: exact = (len(query.atoms(Wild)) > 1) if isinstance(value, Basic): if exact: _value = lambda expr, result: (value.subs(result) if all(result.values()) else expr) else: _value = lambda expr, result: value.subs(result) elif callable(value): # match dictionary keys get the trailing underscore stripped # from them and are then passed as keywords to the callable; # if ``exact`` is True, only accept match if there are no null # values amongst those matched. if exact: _value = lambda expr, result: (value(** {str(k)[:-1]: v for k, v in result.items()}) if all(val for val in result.values()) else expr) else: _value = lambda expr, result: value(** {str(k)[:-1]: v for k, v in result.items()}) else: raise TypeError( "given an expression, replace() expects " "another expression or a callable") elif callable(query): _query = query if callable(value): _value = lambda expr, result: value(expr) else: raise TypeError( "given a callable, replace() expects " "another callable") else: raise TypeError( "first argument to replace() must be a " "type, an expression or a callable") mapping = {} # changes that took place mask = [] # the dummies that were used as change placeholders def rec_replace(expr): result = _query(expr) if result or result == {}: new = _value(expr, result) if new is not None and new != expr: mapping[expr] = new if simultaneous: # don't let this change during rebuilding; # XXX this may fail if the object being replaced # cannot be represented as a Dummy in the expression # tree, e.g. an ExprConditionPair in Piecewise # cannot be represented with a Dummy com = getattr(new, 'is_commutative', True) if com is None: com = True d = Dummy('rec_replace', commutative=com) mask.append((d, new)) expr = d else: expr = new return expr rv = bottom_up(self, rec_replace, atoms=True) # restore original expressions for Dummy symbols if simultaneous: mask = list(reversed(mask)) for o, n in mask: r = {o: n} # if a sub-expression could not be replaced with # a Dummy then this will fail; either filter # against such sub-expressions or figure out a # way to carry out simultaneous replacement # in this situation. rv = rv.xreplace(r) # if this fails, see above if not map: return rv else: if simultaneous: # restore subexpressions in mapping for o, n in mask: r = {o: n} mapping = {k.xreplace(r): v.xreplace(r) for k, v in mapping.items()} return rv, mapping def find(self, query, group=False): """Find all subexpressions matching a query. """ query = _make_find_query(query) results = list(filter(query, preorder_traversal(self))) if not group: return set(results) else: groups = {} for result in results: if result in groups: groups[result] += 1 else: groups[result] = 1 return groups def count(self, query): """Count the number of matching subexpressions. """ query = _make_find_query(query) return sum(bool(query(sub)) for sub in preorder_traversal(self)) def matches(self, expr, repl_dict={}, old=False): """ Helper method for match() that looks for a match between Wild symbols in self and expressions in expr. Examples ======== >>> from sympy import symbols, Wild, Basic >>> a, b, c = symbols('a b c') >>> x = Wild('x') >>> Basic(a + x, x).matches(Basic(a + b, c)) is None True >>> Basic(a + x, x).matches(Basic(a + b + c, b + c)) {x_: b + c} """ expr = sympify(expr) if not isinstance(expr, self.__class__): return None if self == expr: return repl_dict if len(self.args) != len(expr.args): return None d = repl_dict.copy() for arg, other_arg in zip(self.args, expr.args): if arg == other_arg: continue d = arg.xreplace(d).matches(other_arg, d, old=old) if d is None: return None return d def match(self, pattern, old=False): """ Pattern matching. Wild symbols match all. Return ``None`` when expression (self) does not match with pattern. Otherwise return a dictionary such that:: pattern.xreplace(self.match(pattern)) == self Examples ======== >>> from sympy import Wild >>> from sympy.abc import x, y >>> p = Wild("p") >>> q = Wild("q") >>> r = Wild("r") >>> e = (x+y)**(x+y) >>> e.match(p**p) {p_: x + y} >>> e.match(p**q) {p_: x + y, q_: x + y} >>> e = (2*x)**2 >>> e.match(p*q**r) {p_: 4, q_: x, r_: 2} >>> (p*q**r).xreplace(e.match(p*q**r)) 4*x**2 The ``old`` flag will give the old-style pattern matching where expressions and patterns are essentially solved to give the match. Both of the following give None unless ``old=True``: >>> (x - 2).match(p - x, old=True) {p_: 2*x - 2} >>> (2/x).match(p*x, old=True) {p_: 2/x**2} """ pattern = sympify(pattern) return pattern.matches(self, old=old) def count_ops(self, visual=None): """wrapper for count_ops that returns the operation count.""" from sympy import count_ops return count_ops(self, visual) def doit(self, **hints): """Evaluate objects that are not evaluated by default like limits, integrals, sums and products. All objects of this kind will be evaluated recursively, unless some species were excluded via 'hints' or unless the 'deep' hint was set to 'False'. >>> from sympy import Integral >>> from sympy.abc import x >>> 2*Integral(x, x) 2*Integral(x, x) >>> (2*Integral(x, x)).doit() x**2 >>> (2*Integral(x, x)).doit(deep=False) 2*Integral(x, x) """ if hints.get('deep', True): terms = [term.doit(**hints) if isinstance(term, Basic) else term for term in self.args] return self.func(*terms) else: return self def simplify(self, **kwargs): """See the simplify function in sympy.simplify""" from sympy.simplify import simplify return simplify(self, **kwargs) def _eval_rewrite(self, pattern, rule, **hints): if self.is_Atom: if hasattr(self, rule): return getattr(self, rule)() return self if hints.get('deep', True): args = [a._eval_rewrite(pattern, rule, **hints) if isinstance(a, Basic) else a for a in self.args] else: args = self.args if pattern is None or isinstance(self, pattern): if hasattr(self, rule): rewritten = getattr(self, rule)(*args, **hints) if rewritten is not None: return rewritten return self.func(*args) if hints.get('evaluate', True) else self def _accept_eval_derivative(self, s): # This method needs to be overridden by array-like objects return s._visit_eval_derivative_scalar(self) def _visit_eval_derivative_scalar(self, base): # Base is a scalar # Types are (base: scalar, self: scalar) return base._eval_derivative(self) def _visit_eval_derivative_array(self, base): # Types are (base: array/matrix, self: scalar) # Base is some kind of array/matrix, # it should have `.applyfunc(lambda x: x.diff(self)` implemented: return base._eval_derivative_array(self) def _eval_derivative_n_times(self, s, n): # This is the default evaluator for derivatives (as called by `diff` # and `Derivative`), it will attempt a loop to derive the expression # `n` times by calling the corresponding `_eval_derivative` method, # while leaving the derivative unevaluated if `n` is symbolic. This # method should be overridden if the object has a closed form for its # symbolic n-th derivative. from sympy import Integer if isinstance(n, (int, Integer)): obj = self for i in range(n): obj2 = obj._accept_eval_derivative(s) if obj == obj2 or obj2 is None: break obj = obj2 return obj2 else: return None def rewrite(self, *args, **hints): """ Rewrite functions in terms of other functions. Rewrites expression containing applications of functions of one kind in terms of functions of different kind. For example you can rewrite trigonometric functions as complex exponentials or combinatorial functions as gamma function. As a pattern this function accepts a list of functions to to rewrite (instances of DefinedFunction class). As rule you can use string or a destination function instance (in this case rewrite() will use the str() function). There is also the possibility to pass hints on how to rewrite the given expressions. For now there is only one such hint defined called 'deep'. When 'deep' is set to False it will forbid functions to rewrite their contents. Examples ======== >>> from sympy import sin, exp >>> from sympy.abc import x Unspecified pattern: >>> sin(x).rewrite(exp) -I*(exp(I*x) - exp(-I*x))/2 Pattern as a single function: >>> sin(x).rewrite(sin, exp) -I*(exp(I*x) - exp(-I*x))/2 Pattern as a list of functions: >>> sin(x).rewrite([sin, ], exp) -I*(exp(I*x) - exp(-I*x))/2 """ if not args: return self else: pattern = args[:-1] if isinstance(args[-1], string_types): rule = '_eval_rewrite_as_' + args[-1] else: try: rule = '_eval_rewrite_as_' + args[-1].__name__ except: rule = '_eval_rewrite_as_' + args[-1].__class__.__name__ if not pattern: return self._eval_rewrite(None, rule, **hints) else: if iterable(pattern[0]): pattern = pattern[0] pattern = [p for p in pattern if self.has(p)] if pattern: return self._eval_rewrite(tuple(pattern), rule, **hints) else: return self _constructor_postprocessor_mapping = {} @classmethod def _exec_constructor_postprocessors(cls, obj): # WARNING: This API is experimental. # This is an experimental API that introduces constructor # postprosessors for SymPy Core elements. If an argument of a SymPy # expression has a `_constructor_postprocessor_mapping` attribute, it will # be interpreted as a dictionary containing lists of postprocessing # functions for matching expression node names. clsname = obj.__class__.__name__ postprocessors = defaultdict(list) for i in obj.args: try: postprocessor_mappings = ( Basic._constructor_postprocessor_mapping[cls].items() for cls in type(i).mro() if cls in Basic._constructor_postprocessor_mapping ) for k, v in chain.from_iterable(postprocessor_mappings): postprocessors[k].extend([j for j in v if j not in postprocessors[k]]) except TypeError: pass for f in postprocessors.get(clsname, []): obj = f(obj) return obj class Atom(Basic): """ A parent class for atomic things. An atom is an expression with no subexpressions. Examples ======== Symbol, Number, Rational, Integer, ... But not: Add, Mul, Pow, ... """ is_Atom = True __slots__ = [] def matches(self, expr, repl_dict={}, old=False): if self == expr: return repl_dict def xreplace(self, rule, hack2=False): return rule.get(self, self) def doit(self, **hints): return self @classmethod def class_key(cls): return 2, 0, cls.__name__ @cacheit def sort_key(self, order=None): return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One def _eval_simplify(self, **kwargs): return self @property def _sorted_args(self): # this is here as a safeguard against accidentally using _sorted_args # on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args) # since there are no args. So the calling routine should be checking # to see that this property is not called for Atoms. raise AttributeError('Atoms have no args. It might be necessary' ' to make a check for Atoms in the calling code.') def _aresame(a, b): """Return True if a and b are structurally the same, else False. Examples ======== In SymPy (as in Python) two numbers compare the same if they have the same underlying base-2 representation even though they may not be the same type: >>> from sympy import S >>> 2.0 == S(2) True >>> 0.5 == S.Half True This routine was written to provide a query for such cases that would give false when the types do not match: >>> from sympy.core.basic import _aresame >>> _aresame(S(2.0), S(2)) False """ from .numbers import Number from .function import AppliedUndef, UndefinedFunction as UndefFunc if isinstance(a, Number) and isinstance(b, Number): return a == b and a.__class__ == b.__class__ for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)): if i != j or type(i) != type(j): if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or (isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))): if i.class_key() != j.class_key(): return False else: return False return True def _atomic(e, recursive=False): """Return atom-like quantities as far as substitution is concerned: Derivatives, Functions and Symbols. Don't return any 'atoms' that are inside such quantities unless they also appear outside, too, unless `recursive` is True. Examples ======== >>> from sympy import Derivative, Function, cos >>> from sympy.abc import x, y >>> from sympy.core.basic import _atomic >>> f = Function('f') >>> _atomic(x + y) {x, y} >>> _atomic(x + f(y)) {x, f(y)} >>> _atomic(Derivative(f(x), x) + cos(x) + y) {y, cos(x), Derivative(f(x), x)} """ from sympy import Derivative, Function, Symbol pot = preorder_traversal(e) seen = set() if isinstance(e, Basic): free = getattr(e, "free_symbols", None) if free is None: return {e} else: return set() atoms = set() for p in pot: if p in seen: pot.skip() continue seen.add(p) if isinstance(p, Symbol) and p in free: atoms.add(p) elif isinstance(p, (Derivative, Function)): if not recursive: pot.skip() atoms.add(p) return atoms class preorder_traversal(Iterator): """ Do a pre-order traversal of a tree. This iterator recursively yields nodes that it has visited in a pre-order fashion. That is, it yields the current node then descends through the tree breadth-first to yield all of a node's children's pre-order traversal. For an expression, the order of the traversal depends on the order of .args, which in many cases can be arbitrary. Parameters ========== node : sympy expression The expression to traverse. keys : (default None) sort key(s) The key(s) used to sort args of Basic objects. When None, args of Basic objects are processed in arbitrary order. If key is defined, it will be passed along to ordered() as the only key(s) to use to sort the arguments; if ``key`` is simply True then the default keys of ordered will be used. Yields ====== subtree : sympy expression All of the subtrees in the tree. Examples ======== >>> from sympy import symbols >>> from sympy.core.basic import preorder_traversal >>> x, y, z = symbols('x y z') The nodes are returned in the order that they are encountered unless key is given; simply passing key=True will guarantee that the traversal is unique. >>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP [z*(x + y), z, x + y, y, x] >>> list(preorder_traversal((x + y)*z, keys=True)) [z*(x + y), z, x + y, x, y] """ def __init__(self, node, keys=None): self._skip_flag = False self._pt = self._preorder_traversal(node, keys) def _preorder_traversal(self, node, keys): yield node if self._skip_flag: self._skip_flag = False return if isinstance(node, Basic): if not keys and hasattr(node, '_argset'): # LatticeOp keeps args as a set. We should use this if we # don't care about the order, to prevent unnecessary sorting. args = node._argset else: args = node.args if keys: if keys != True: args = ordered(args, keys, default=False) else: args = ordered(args) for arg in args: for subtree in self._preorder_traversal(arg, keys): yield subtree elif iterable(node): for item in node: for subtree in self._preorder_traversal(item, keys): yield subtree def skip(self): """ Skip yielding current node's (last yielded node's) subtrees. Examples ======== >>> from sympy.core import symbols >>> from sympy.core.basic import preorder_traversal >>> x, y, z = symbols('x y z') >>> pt = preorder_traversal((x+y*z)*z) >>> for i in pt: ... print(i) ... if i == x+y*z: ... pt.skip() z*(x + y*z) z x + y*z """ self._skip_flag = True def __next__(self): return next(self._pt) def __iter__(self): return self def _make_find_query(query): """Convert the argument of Basic.find() into a callable""" try: query = sympify(query) except SympifyError: pass if isinstance(query, type): return lambda expr: isinstance(expr, query) elif isinstance(query, Basic): return lambda expr: expr.match(query) is not None return query
881a1f8be22cb3ea87689ee735a6e862168e19b44f7913c0c4dc2cb0d85caf3b
from __future__ import print_function, division from math import log as _log from .sympify import _sympify from .cache import cacheit from .singleton import S from .expr import Expr from .evalf import PrecisionExhausted from .function import (_coeff_isneg, expand_complex, expand_multinomial, expand_mul) from .logic import fuzzy_bool, fuzzy_not, fuzzy_and from .compatibility import as_int, range from .parameters import global_parameters from sympy.utilities.iterables import sift from mpmath.libmp import sqrtrem as mpmath_sqrtrem from math import sqrt as _sqrt def isqrt(n): """Return the largest integer less than or equal to sqrt(n).""" if n < 0: raise ValueError("n must be nonnegative") n = int(n) # Fast path: with IEEE 754 binary64 floats and a correctly-rounded # math.sqrt, int(math.sqrt(n)) works for any integer n satisfying 0 <= n < # 4503599761588224 = 2**52 + 2**27. But Python doesn't guarantee either # IEEE 754 format floats *or* correct rounding of math.sqrt, so check the # answer and fall back to the slow method if necessary. if n < 4503599761588224: s = int(_sqrt(n)) if 0 <= n - s*s <= 2*s: return s return integer_nthroot(n, 2)[0] def integer_nthroot(y, n): """ Return a tuple containing x = floor(y**(1/n)) and a boolean indicating whether the result is exact (that is, whether x**n == y). Examples ======== >>> from sympy import integer_nthroot >>> integer_nthroot(16, 2) (4, True) >>> integer_nthroot(26, 2) (5, False) To simply determine if a number is a perfect square, the is_square function should be used: >>> from sympy.ntheory.primetest import is_square >>> is_square(26) False See Also ======== sympy.ntheory.primetest.is_square integer_log """ y, n = as_int(y), as_int(n) if y < 0: raise ValueError("y must be nonnegative") if n < 1: raise ValueError("n must be positive") if y in (0, 1): return y, True if n == 1: return y, True if n == 2: x, rem = mpmath_sqrtrem(y) return int(x), not rem if n > y: return 1, False # Get initial estimate for Newton's method. Care must be taken to # avoid overflow try: guess = int(y**(1./n) + 0.5) except OverflowError: exp = _log(y, 2)/n if exp > 53: shift = int(exp - 53) guess = int(2.0**(exp - shift) + 1) << shift else: guess = int(2.0**exp) if guess > 2**50: # Newton iteration xprev, x = -1, guess while 1: t = x**(n - 1) xprev, x = x, ((n - 1)*x + y//t)//n if abs(x - xprev) < 2: break else: x = guess # Compensate t = x**n while t < y: x += 1 t = x**n while t > y: x -= 1 t = x**n return int(x), t == y # int converts long to int if possible def integer_log(y, x): r""" Returns ``(e, bool)`` where e is the largest nonnegative integer such that :math:`|y| \geq |x^e|` and ``bool`` is True if $y = x^e$. Examples ======== >>> from sympy import integer_log >>> integer_log(125, 5) (3, True) >>> integer_log(17, 9) (1, False) >>> integer_log(4, -2) (2, True) >>> integer_log(-125,-5) (3, True) See Also ======== integer_nthroot sympy.ntheory.primetest.is_square sympy.ntheory.factor_.multiplicity sympy.ntheory.factor_.perfect_power """ if x == 1: raise ValueError('x cannot take value as 1') if y == 0: raise ValueError('y cannot take value as 0') if x in (-2, 2): x = int(x) y = as_int(y) e = y.bit_length() - 1 return e, x**e == y if x < 0: n, b = integer_log(y if y > 0 else -y, -x) return n, b and bool(n % 2 if y < 0 else not n % 2) x = as_int(x) y = as_int(y) r = e = 0 while y >= x: d = x m = 1 while y >= d: y, rem = divmod(y, d) r = r or rem e += m if y > d: d *= d m *= 2 return e, r == 0 and y == 1 class Pow(Expr): """ Defines the expression x**y as "x raised to a power y" Singleton definitions involving (0, 1, -1, oo, -oo, I, -I): +--------------+---------+-----------------------------------------------+ | expr | value | reason | +==============+=========+===============================================+ | z**0 | 1 | Although arguments over 0**0 exist, see [2]. | +--------------+---------+-----------------------------------------------+ | z**1 | z | | +--------------+---------+-----------------------------------------------+ | (-oo)**(-1) | 0 | | +--------------+---------+-----------------------------------------------+ | (-1)**-1 | -1 | | +--------------+---------+-----------------------------------------------+ | S.Zero**-1 | zoo | This is not strictly true, as 0**-1 may be | | | | undefined, but is convenient in some contexts | | | | where the base is assumed to be positive. | +--------------+---------+-----------------------------------------------+ | 1**-1 | 1 | | +--------------+---------+-----------------------------------------------+ | oo**-1 | 0 | | +--------------+---------+-----------------------------------------------+ | 0**oo | 0 | Because for all complex numbers z near | | | | 0, z**oo -> 0. | +--------------+---------+-----------------------------------------------+ | 0**-oo | zoo | This is not strictly true, as 0**oo may be | | | | oscillating between positive and negative | | | | values or rotating in the complex plane. | | | | It is convenient, however, when the base | | | | is positive. | +--------------+---------+-----------------------------------------------+ | 1**oo | nan | Because there are various cases where | | 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), | | | | but lim( x(t)**y(t), t) != 1. See [3]. | +--------------+---------+-----------------------------------------------+ | b**zoo | nan | Because b**z has no limit as z -> zoo | +--------------+---------+-----------------------------------------------+ | (-1)**oo | nan | Because of oscillations in the limit. | | (-1)**(-oo) | | | +--------------+---------+-----------------------------------------------+ | oo**oo | oo | | +--------------+---------+-----------------------------------------------+ | oo**-oo | 0 | | +--------------+---------+-----------------------------------------------+ | (-oo)**oo | nan | | | (-oo)**-oo | | | +--------------+---------+-----------------------------------------------+ | oo**I | nan | oo**e could probably be best thought of as | | (-oo)**I | | the limit of x**e for real x as x tends to | | | | oo. If e is I, then the limit does not exist | | | | and nan is used to indicate that. | +--------------+---------+-----------------------------------------------+ | oo**(1+I) | zoo | If the real part of e is positive, then the | | (-oo)**(1+I) | | limit of abs(x**e) is oo. So the limit value | | | | is zoo. | +--------------+---------+-----------------------------------------------+ | oo**(-1+I) | 0 | If the real part of e is negative, then the | | -oo**(-1+I) | | limit is 0. | +--------------+---------+-----------------------------------------------+ Because symbolic computations are more flexible that floating point calculations and we prefer to never return an incorrect answer, we choose not to conform to all IEEE 754 conventions. This helps us avoid extra test-case code in the calculation of limits. See Also ======== sympy.core.numbers.Infinity sympy.core.numbers.NegativeInfinity sympy.core.numbers.NaN References ========== .. [1] https://en.wikipedia.org/wiki/Exponentiation .. [2] https://en.wikipedia.org/wiki/Exponentiation#Zero_to_the_power_of_zero .. [3] https://en.wikipedia.org/wiki/Indeterminate_forms """ is_Pow = True __slots__ = ['is_commutative'] @cacheit def __new__(cls, b, e, evaluate=None): if evaluate is None: evaluate = global_parameters.evaluate from sympy.functions.elementary.exponential import exp_polar b = _sympify(b) e = _sympify(e) # XXX: Maybe only Expr should be allowed... from sympy.core.relational import Relational if isinstance(b, Relational) or isinstance(e, Relational): raise TypeError('Relational can not be used in Pow') if evaluate: if e is S.ComplexInfinity: return S.NaN if e is S.Zero: return S.One elif e is S.One: return b elif e == -1 and not b: return S.ComplexInfinity # Only perform autosimplification if exponent or base is a Symbol or number elif (b.is_Symbol or b.is_number) and (e.is_Symbol or e.is_number) and\ e.is_integer and _coeff_isneg(b): if e.is_even: b = -b elif e.is_odd: return -Pow(-b, e) if S.NaN in (b, e): # XXX S.NaN**x -> S.NaN under assumption that x != 0 return S.NaN elif b is S.One: if abs(e).is_infinite: return S.NaN return S.One else: # recognize base as E if not e.is_Atom and b is not S.Exp1 and not isinstance(b, exp_polar): from sympy import numer, denom, log, sign, im, factor_terms c, ex = factor_terms(e, sign=False).as_coeff_Mul() den = denom(ex) if isinstance(den, log) and den.args[0] == b: return S.Exp1**(c*numer(ex)) elif den.is_Add: s = sign(im(b)) if s.is_Number and s and den == \ log(-factor_terms(b, sign=False)) + s*S.ImaginaryUnit*S.Pi: return S.Exp1**(c*numer(ex)) obj = b._eval_power(e) if obj is not None: return obj obj = Expr.__new__(cls, b, e) obj = cls._exec_constructor_postprocessors(obj) if not isinstance(obj, Pow): return obj obj.is_commutative = (b.is_commutative and e.is_commutative) return obj @property def base(self): return self._args[0] @property def exp(self): return self._args[1] @classmethod def class_key(cls): return 3, 2, cls.__name__ def _eval_refine(self, assumptions): from sympy.assumptions.ask import ask, Q b, e = self.as_base_exp() if ask(Q.integer(e), assumptions) and _coeff_isneg(b): if ask(Q.even(e), assumptions): return Pow(-b, e) elif ask(Q.odd(e), assumptions): return -Pow(-b, e) def _eval_power(self, other): from sympy import Abs, arg, exp, floor, im, log, re, sign b, e = self.as_base_exp() if b is S.NaN: return (b**e)**other # let __new__ handle it s = None if other.is_integer: s = 1 elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)... s = 1 elif e.is_extended_real is not None: # helper functions =========================== def _half(e): """Return True if the exponent has a literal 2 as the denominator, else None.""" if getattr(e, 'q', None) == 2: return True n, d = e.as_numer_denom() if n.is_integer and d == 2: return True def _n2(e): """Return ``e`` evaluated to a Number with 2 significant digits, else None.""" try: rv = e.evalf(2, strict=True) if rv.is_Number: return rv except PrecisionExhausted: pass # =================================================== if e.is_extended_real: # we need _half(other) with constant floor or # floor(S.Half - e*arg(b)/2/pi) == 0 # handle -1 as special case if e == -1: # floor arg. is 1/2 + arg(b)/2/pi if _half(other): if b.is_negative is True: return S.NegativeOne**other*Pow(-b, e*other) if b.is_extended_real is False: return Pow(b.conjugate()/Abs(b)**2, other) elif e.is_even: if b.is_extended_real: b = abs(b) if b.is_imaginary: b = abs(im(b))*S.ImaginaryUnit if (abs(e) < 1) == True or e == 1: s = 1 # floor = 0 elif b.is_extended_nonnegative: s = 1 # floor = 0 elif re(b).is_extended_nonnegative and (abs(e) < 2) == True: s = 1 # floor = 0 elif fuzzy_not(im(b).is_zero) and abs(e) == 2: s = 1 # floor = 0 elif _half(other): s = exp(2*S.Pi*S.ImaginaryUnit*other*floor( S.Half - e*arg(b)/(2*S.Pi))) if s.is_extended_real and _n2(sign(s) - s) == 0: s = sign(s) else: s = None else: # e.is_extended_real is False requires: # _half(other) with constant floor or # floor(S.Half - im(e*log(b))/2/pi) == 0 try: s = exp(2*S.ImaginaryUnit*S.Pi*other* floor(S.Half - im(e*log(b))/2/S.Pi)) # be careful to test that s is -1 or 1 b/c sign(I) == I: # so check that s is real if s.is_extended_real and _n2(sign(s) - s) == 0: s = sign(s) else: s = None except PrecisionExhausted: s = None if s is not None: return s*Pow(b, e*other) def _eval_Mod(self, q): r"""A dispatched function to compute `b^e \bmod q`, dispatched by ``Mod``. Notes ===== Algorithms: 1. For unevaluated integer power, use built-in ``pow`` function with 3 arguments, if powers are not too large wrt base. 2. For very large powers, use totient reduction if e >= lg(m). Bound on m, is for safe factorization memory wise ie m^(1/4). For pollard-rho to be faster than built-in pow lg(e) > m^(1/4) check is added. 3. For any unevaluated power found in `b` or `e`, the step 2 will be recursed down to the base and the exponent such that the `b \bmod q` becomes the new base and ``\phi(q) + e \bmod \phi(q)`` becomes the new exponent, and then the computation for the reduced expression can be done. """ from sympy.ntheory import totient from .mod import Mod base, exp = self.base, self.exp if exp.is_integer and exp.is_positive: if q.is_integer and base % q == 0: return S.Zero if base.is_Integer and exp.is_Integer and q.is_Integer: b, e, m = int(base), int(exp), int(q) mb = m.bit_length() if mb <= 80 and e >= mb and e.bit_length()**4 >= m: phi = totient(m) return Integer(pow(b, phi + e%phi, m)) return Integer(pow(b, e, m)) if isinstance(base, Pow) and base.is_integer and base.is_number: base = Mod(base, q) return Mod(Pow(base, exp, evaluate=False), q) if isinstance(exp, Pow) and exp.is_integer and exp.is_number: bit_length = int(q).bit_length() # XXX Mod-Pow actually attempts to do a hanging evaluation # if this dispatched function returns None. # May need some fixes in the dispatcher itself. if bit_length <= 80: phi = totient(q) exp = phi + Mod(exp, phi) return Mod(Pow(base, exp, evaluate=False), q) def _eval_is_even(self): if self.exp.is_integer and self.exp.is_positive: return self.base.is_even def _eval_is_negative(self): ext_neg = Pow._eval_is_extended_negative(self) if ext_neg is True: return self.is_finite return ext_neg def _eval_is_positive(self): ext_pos = Pow._eval_is_extended_positive(self) if ext_pos is True: return self.is_finite return ext_pos def _eval_is_extended_positive(self): from sympy import log if self.base == self.exp: if self.base.is_extended_nonnegative: return True elif self.base.is_positive: if self.exp.is_extended_real: return True elif self.base.is_extended_negative: if self.exp.is_even: return True if self.exp.is_odd: return False elif self.base.is_zero: if self.exp.is_extended_real: return self.exp.is_zero elif self.base.is_extended_nonpositive: if self.exp.is_odd: return False elif self.base.is_imaginary: if self.exp.is_integer: m = self.exp % 4 if m.is_zero: return True if m.is_integer and m.is_zero is False: return False if self.exp.is_imaginary: return log(self.base).is_imaginary def _eval_is_extended_negative(self): if self.base.is_extended_negative: if self.exp.is_odd and self.base.is_finite: return True if self.exp.is_even: return False elif self.base.is_extended_positive: if self.exp.is_extended_real: return False elif self.base.is_zero: if self.exp.is_extended_real: return False elif self.base.is_extended_nonnegative: if self.exp.is_extended_nonnegative: return False elif self.base.is_extended_nonpositive: if self.exp.is_even: return False elif self.base.is_extended_real: if self.exp.is_even: return False def _eval_is_zero(self): if self.base.is_zero: if self.exp.is_extended_positive: return True elif self.exp.is_extended_nonpositive: return False elif self.base.is_zero is False: if self.exp.is_negative: return self.base.is_infinite elif self.exp.is_nonnegative: return False elif self.exp.is_infinite: if (1 - abs(self.base)).is_extended_positive: return self.exp.is_extended_positive elif (1 - abs(self.base)).is_extended_negative: return self.exp.is_extended_negative else: # when self.base.is_zero is None return None def _eval_is_integer(self): b, e = self.args if b.is_rational: if b.is_integer is False and e.is_positive: return False # rat**nonneg if b.is_integer and e.is_integer: if b is S.NegativeOne: return True if e.is_nonnegative or e.is_positive: return True if b.is_integer and e.is_negative and (e.is_finite or e.is_integer): if fuzzy_not((b - 1).is_zero) and fuzzy_not((b + 1).is_zero): return False if b.is_Number and e.is_Number: check = self.func(*self.args) return check.is_Integer def _eval_is_extended_real(self): from sympy import arg, exp, log, Mul real_b = self.base.is_extended_real if real_b is None: if self.base.func == exp and self.base.args[0].is_imaginary: return self.exp.is_imaginary return real_e = self.exp.is_extended_real if real_e is None: return if real_b and real_e: if self.base.is_extended_positive: return True elif self.base.is_extended_nonnegative and self.exp.is_extended_nonnegative: return True elif self.exp.is_integer and self.base.is_extended_nonzero: return True elif self.exp.is_integer and self.exp.is_nonnegative: return True elif self.base.is_extended_negative: if self.exp.is_Rational: return False if real_e and self.exp.is_extended_negative and self.base.is_zero is False: return Pow(self.base, -self.exp).is_extended_real im_b = self.base.is_imaginary im_e = self.exp.is_imaginary if im_b: if self.exp.is_integer: if self.exp.is_even: return True elif self.exp.is_odd: return False elif im_e and log(self.base).is_imaginary: return True elif self.exp.is_Add: c, a = self.exp.as_coeff_Add() if c and c.is_Integer: return Mul( self.base**c, self.base**a, evaluate=False).is_extended_real elif self.base in (-S.ImaginaryUnit, S.ImaginaryUnit): if (self.exp/2).is_integer is False: return False if real_b and im_e: if self.base is S.NegativeOne: return True c = self.exp.coeff(S.ImaginaryUnit) if c: if self.base.is_rational and c.is_rational: if self.base.is_nonzero and (self.base - 1).is_nonzero and c.is_nonzero: return False ok = (c*log(self.base)/S.Pi).is_integer if ok is not None: return ok if real_b is False: # we already know it's not imag i = arg(self.base)*self.exp/S.Pi return i.is_integer def _eval_is_complex(self): if all(a.is_complex for a in self.args) and self._eval_is_finite(): return True def _eval_is_imaginary(self): from sympy import arg, log if self.base.is_imaginary: if self.exp.is_integer: odd = self.exp.is_odd if odd is not None: return odd return if self.exp.is_imaginary: imlog = log(self.base).is_imaginary if imlog is not None: return False # I**i -> real; (2*I)**i -> complex ==> not imaginary if self.base.is_extended_real and self.exp.is_extended_real: if self.base.is_positive: return False else: rat = self.exp.is_rational if not rat: return rat if self.exp.is_integer: return False else: half = (2*self.exp).is_integer if half: return self.base.is_negative return half if self.base.is_extended_real is False: # we already know it's not imag i = arg(self.base)*self.exp/S.Pi isodd = (2*i).is_odd if isodd is not None: return isodd if self.exp.is_negative: return (1/self).is_imaginary def _eval_is_odd(self): if self.exp.is_integer: if self.exp.is_positive: return self.base.is_odd elif self.exp.is_nonnegative and self.base.is_odd: return True elif self.base is S.NegativeOne: return True def _eval_is_finite(self): if self.exp.is_negative: if self.base.is_zero: return False if self.base.is_infinite or self.base.is_nonzero: return True c1 = self.base.is_finite if c1 is None: return c2 = self.exp.is_finite if c2 is None: return if c1 and c2: if self.exp.is_nonnegative or fuzzy_not(self.base.is_zero): return True def _eval_is_prime(self): ''' An integer raised to the n(>=2)-th power cannot be a prime. ''' if self.base.is_integer and self.exp.is_integer and (self.exp - 1).is_positive: return False def _eval_is_composite(self): """ A power is composite if both base and exponent are greater than 1 """ if (self.base.is_integer and self.exp.is_integer and ((self.base - 1).is_positive and (self.exp - 1).is_positive or (self.base + 1).is_negative and self.exp.is_positive and self.exp.is_even)): return True def _eval_is_polar(self): return self.base.is_polar def _eval_subs(self, old, new): from sympy import exp, log, Symbol def _check(ct1, ct2, old): """Return (bool, pow, remainder_pow) where, if bool is True, then the exponent of Pow `old` will combine with `pow` so the substitution is valid, otherwise bool will be False. For noncommutative objects, `pow` will be an integer, and a factor `Pow(old.base, remainder_pow)` needs to be included. If there is no such factor, None is returned. For commutative objects, remainder_pow is always None. cti are the coefficient and terms of an exponent of self or old In this _eval_subs routine a change like (b**(2*x)).subs(b**x, y) will give y**2 since (b**x)**2 == b**(2*x); if that equality does not hold then the substitution should not occur so `bool` will be False. """ coeff1, terms1 = ct1 coeff2, terms2 = ct2 if terms1 == terms2: if old.is_commutative: # Allow fractional powers for commutative objects pow = coeff1/coeff2 try: as_int(pow, strict=False) combines = True except ValueError: combines = isinstance(Pow._eval_power( Pow(*old.as_base_exp(), evaluate=False), pow), (Pow, exp, Symbol)) return combines, pow, None else: # With noncommutative symbols, substitute only integer powers if not isinstance(terms1, tuple): terms1 = (terms1,) if not all(term.is_integer for term in terms1): return False, None, None try: # Round pow toward zero pow, remainder = divmod(as_int(coeff1), as_int(coeff2)) if pow < 0 and remainder != 0: pow += 1 remainder -= as_int(coeff2) if remainder == 0: remainder_pow = None else: remainder_pow = Mul(remainder, *terms1) return True, pow, remainder_pow except ValueError: # Can't substitute pass return False, None, None if old == self.base: return new**self.exp._subs(old, new) # issue 10829: (4**x - 3*y + 2).subs(2**x, y) -> y**2 - 3*y + 2 if isinstance(old, self.func) and self.exp == old.exp: l = log(self.base, old.base) if l.is_Number: return Pow(new, l) if isinstance(old, self.func) and self.base == old.base: if self.exp.is_Add is False: ct1 = self.exp.as_independent(Symbol, as_Add=False) ct2 = old.exp.as_independent(Symbol, as_Add=False) ok, pow, remainder_pow = _check(ct1, ct2, old) if ok: # issue 5180: (x**(6*y)).subs(x**(3*y),z)->z**2 result = self.func(new, pow) if remainder_pow is not None: result = Mul(result, Pow(old.base, remainder_pow)) return result else: # b**(6*x + a).subs(b**(3*x), y) -> y**2 * b**a # exp(exp(x) + exp(x**2)).subs(exp(exp(x)), w) -> w * exp(exp(x**2)) oarg = old.exp new_l = [] o_al = [] ct2 = oarg.as_coeff_mul() for a in self.exp.args: newa = a._subs(old, new) ct1 = newa.as_coeff_mul() ok, pow, remainder_pow = _check(ct1, ct2, old) if ok: new_l.append(new**pow) if remainder_pow is not None: o_al.append(remainder_pow) continue elif not old.is_commutative and not newa.is_integer: # If any term in the exponent is non-integer, # we do not do any substitutions in the noncommutative case return o_al.append(newa) if new_l: expo = Add(*o_al) new_l.append(Pow(self.base, expo, evaluate=False) if expo != 1 else self.base) return Mul(*new_l) if isinstance(old, exp) and self.exp.is_extended_real and self.base.is_positive: ct1 = old.args[0].as_independent(Symbol, as_Add=False) ct2 = (self.exp*log(self.base)).as_independent( Symbol, as_Add=False) ok, pow, remainder_pow = _check(ct1, ct2, old) if ok: result = self.func(new, pow) # (2**x).subs(exp(x*log(2)), z) -> z if remainder_pow is not None: result = Mul(result, Pow(old.base, remainder_pow)) return result def as_base_exp(self): """Return base and exp of self. If base is 1/Integer, then return Integer, -exp. If this extra processing is not needed, the base and exp properties will give the raw arguments Examples ======== >>> from sympy import Pow, S >>> p = Pow(S.Half, 2, evaluate=False) >>> p.as_base_exp() (2, -2) >>> p.args (1/2, 2) """ b, e = self.args if b.is_Rational and b.p == 1 and b.q != 1: return Integer(b.q), -e return b, e def _eval_adjoint(self): from sympy.functions.elementary.complexes import adjoint i, p = self.exp.is_integer, self.base.is_positive if i: return adjoint(self.base)**self.exp if p: return self.base**adjoint(self.exp) if i is False and p is False: expanded = expand_complex(self) if expanded != self: return adjoint(expanded) def _eval_conjugate(self): from sympy.functions.elementary.complexes import conjugate as c i, p = self.exp.is_integer, self.base.is_positive if i: return c(self.base)**self.exp if p: return self.base**c(self.exp) if i is False and p is False: expanded = expand_complex(self) if expanded != self: return c(expanded) if self.is_extended_real: return self def _eval_transpose(self): from sympy.functions.elementary.complexes import transpose i, p = self.exp.is_integer, (self.base.is_complex or self.base.is_infinite) if p: return self.base**self.exp if i: return transpose(self.base)**self.exp if i is False and p is False: expanded = expand_complex(self) if expanded != self: return transpose(expanded) def _eval_expand_power_exp(self, **hints): """a**(n + m) -> a**n*a**m""" b = self.base e = self.exp if e.is_Add and e.is_commutative: expr = [] for x in e.args: expr.append(self.func(self.base, x)) return Mul(*expr) return self.func(b, e) def _eval_expand_power_base(self, **hints): """(a*b)**n -> a**n * b**n""" force = hints.get('force', False) b = self.base e = self.exp if not b.is_Mul: return self cargs, nc = b.args_cnc(split_1=False) # expand each term - this is top-level-only # expansion but we have to watch out for things # that don't have an _eval_expand method if nc: nc = [i._eval_expand_power_base(**hints) if hasattr(i, '_eval_expand_power_base') else i for i in nc] if e.is_Integer: if e.is_positive: rv = Mul(*nc*e) else: rv = Mul(*[i**-1 for i in nc[::-1]]*-e) if cargs: rv *= Mul(*cargs)**e return rv if not cargs: return self.func(Mul(*nc), e, evaluate=False) nc = [Mul(*nc)] # sift the commutative bases other, maybe_real = sift(cargs, lambda x: x.is_extended_real is False, binary=True) def pred(x): if x is S.ImaginaryUnit: return S.ImaginaryUnit polar = x.is_polar if polar: return True if polar is None: return fuzzy_bool(x.is_extended_nonnegative) sifted = sift(maybe_real, pred) nonneg = sifted[True] other += sifted[None] neg = sifted[False] imag = sifted[S.ImaginaryUnit] if imag: I = S.ImaginaryUnit i = len(imag) % 4 if i == 0: pass elif i == 1: other.append(I) elif i == 2: if neg: nonn = -neg.pop() if nonn is not S.One: nonneg.append(nonn) else: neg.append(S.NegativeOne) else: if neg: nonn = -neg.pop() if nonn is not S.One: nonneg.append(nonn) else: neg.append(S.NegativeOne) other.append(I) del imag # bring out the bases that can be separated from the base if force or e.is_integer: # treat all commutatives the same and put nc in other cargs = nonneg + neg + other other = nc else: # this is just like what is happening automatically, except # that now we are doing it for an arbitrary exponent for which # no automatic expansion is done assert not e.is_Integer # handle negatives by making them all positive and putting # the residual -1 in other if len(neg) > 1: o = S.One if not other and neg[0].is_Number: o *= neg.pop(0) if len(neg) % 2: o = -o for n in neg: nonneg.append(-n) if o is not S.One: other.append(o) elif neg and other: if neg[0].is_Number and neg[0] is not S.NegativeOne: other.append(S.NegativeOne) nonneg.append(-neg[0]) else: other.extend(neg) else: other.extend(neg) del neg cargs = nonneg other += nc rv = S.One if cargs: rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs]) if other: rv *= self.func(Mul(*other), e, evaluate=False) return rv def _eval_expand_multinomial(self, **hints): """(a + b + ..)**n -> a**n + n*a**(n-1)*b + .., n is nonzero integer""" base, exp = self.args result = self if exp.is_Rational and exp.p > 0 and base.is_Add: if not exp.is_Integer: n = Integer(exp.p // exp.q) if not n: return result else: radical, result = self.func(base, exp - n), [] expanded_base_n = self.func(base, n) if expanded_base_n.is_Pow: expanded_base_n = \ expanded_base_n._eval_expand_multinomial() for term in Add.make_args(expanded_base_n): result.append(term*radical) return Add(*result) n = int(exp) if base.is_commutative: order_terms, other_terms = [], [] for b in base.args: if b.is_Order: order_terms.append(b) else: other_terms.append(b) if order_terms: # (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n) f = Add(*other_terms) o = Add(*order_terms) if n == 2: return expand_multinomial(f**n, deep=False) + n*f*o else: g = expand_multinomial(f**(n - 1), deep=False) return expand_mul(f*g, deep=False) + n*g*o if base.is_number: # Efficiently expand expressions of the form (a + b*I)**n # where 'a' and 'b' are real numbers and 'n' is integer. a, b = base.as_real_imag() if a.is_Rational and b.is_Rational: if not a.is_Integer: if not b.is_Integer: k = self.func(a.q * b.q, n) a, b = a.p*b.q, a.q*b.p else: k = self.func(a.q, n) a, b = a.p, a.q*b elif not b.is_Integer: k = self.func(b.q, n) a, b = a*b.q, b.p else: k = 1 a, b, c, d = int(a), int(b), 1, 0 while n: if n & 1: c, d = a*c - b*d, b*c + a*d n -= 1 a, b = a*a - b*b, 2*a*b n //= 2 I = S.ImaginaryUnit if k == 1: return c + I*d else: return Integer(c)/k + I*d/k p = other_terms # (x + y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3 # in this particular example: # p = [x,y]; n = 3 # so now it's easy to get the correct result -- we get the # coefficients first: from sympy import multinomial_coefficients from sympy.polys.polyutils import basic_from_dict expansion_dict = multinomial_coefficients(len(p), n) # in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3} # and now construct the expression. return basic_from_dict(expansion_dict, *p) else: if n == 2: return Add(*[f*g for f in base.args for g in base.args]) else: multi = (base**(n - 1))._eval_expand_multinomial() if multi.is_Add: return Add(*[f*g for f in base.args for g in multi.args]) else: # XXX can this ever happen if base was an Add? return Add(*[f*multi for f in base.args]) elif (exp.is_Rational and exp.p < 0 and base.is_Add and abs(exp.p) > exp.q): return 1 / self.func(base, -exp)._eval_expand_multinomial() elif exp.is_Add and base.is_Number: # a + b a b # n --> n n , where n, a, b are Numbers coeff, tail = S.One, S.Zero for term in exp.args: if term.is_Number: coeff *= self.func(base, term) else: tail += term return coeff * self.func(base, tail) else: return result def as_real_imag(self, deep=True, **hints): from sympy import atan2, cos, im, re, sin from sympy.polys.polytools import poly if self.exp.is_Integer: exp = self.exp re_e, im_e = self.base.as_real_imag(deep=deep) if not im_e: return self, S.Zero a, b = symbols('a b', cls=Dummy) if exp >= 0: if re_e.is_Number and im_e.is_Number: # We can be more efficient in this case expr = expand_multinomial(self.base**exp) if expr != self: return expr.as_real_imag() expr = poly( (a + b)**exp) # a = re, b = im; expr = (a + b*I)**exp else: mag = re_e**2 + im_e**2 re_e, im_e = re_e/mag, -im_e/mag if re_e.is_Number and im_e.is_Number: # We can be more efficient in this case expr = expand_multinomial((re_e + im_e*S.ImaginaryUnit)**-exp) if expr != self: return expr.as_real_imag() expr = poly((a + b)**-exp) # Terms with even b powers will be real r = [i for i in expr.terms() if not i[0][1] % 2] re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) # Terms with odd b powers will be imaginary r = [i for i in expr.terms() if i[0][1] % 4 == 1] im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) r = [i for i in expr.terms() if i[0][1] % 4 == 3] im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) return (re_part.subs({a: re_e, b: S.ImaginaryUnit*im_e}), im_part1.subs({a: re_e, b: im_e}) + im_part3.subs({a: re_e, b: -im_e})) elif self.exp.is_Rational: re_e, im_e = self.base.as_real_imag(deep=deep) if im_e.is_zero and self.exp is S.Half: if re_e.is_extended_nonnegative: return self, S.Zero if re_e.is_extended_nonpositive: return S.Zero, (-self.base)**self.exp # XXX: This is not totally correct since for x**(p/q) with # x being imaginary there are actually q roots, but # only a single one is returned from here. r = self.func(self.func(re_e, 2) + self.func(im_e, 2), S.Half) t = atan2(im_e, re_e) rp, tp = self.func(r, self.exp), t*self.exp return (rp*cos(tp), rp*sin(tp)) else: if deep: hints['complex'] = False expanded = self.expand(deep, **hints) if hints.get('ignore') == expanded: return None else: return (re(expanded), im(expanded)) else: return (re(self), im(self)) def _eval_derivative(self, s): from sympy import log dbase = self.base.diff(s) dexp = self.exp.diff(s) return self * (dexp * log(self.base) + dbase * self.exp/self.base) def _eval_evalf(self, prec): base, exp = self.as_base_exp() base = base._evalf(prec) if not exp.is_Integer: exp = exp._evalf(prec) if exp.is_negative and base.is_number and base.is_extended_real is False: base = base.conjugate() / (base * base.conjugate())._evalf(prec) exp = -exp return self.func(base, exp).expand() return self.func(base, exp) def _eval_is_polynomial(self, syms): if self.exp.has(*syms): return False if self.base.has(*syms): return bool(self.base._eval_is_polynomial(syms) and self.exp.is_Integer and (self.exp >= 0)) else: return True def _eval_is_rational(self): # The evaluation of self.func below can be very expensive in the case # of integer**integer if the exponent is large. We should try to exit # before that if possible: if (self.exp.is_integer and self.base.is_rational and fuzzy_not(fuzzy_and([self.exp.is_negative, self.base.is_zero]))): return True p = self.func(*self.as_base_exp()) # in case it's unevaluated if not p.is_Pow: return p.is_rational b, e = p.as_base_exp() if e.is_Rational and b.is_Rational: # we didn't check that e is not an Integer # because Rational**Integer autosimplifies return False if e.is_integer: if b.is_rational: if fuzzy_not(b.is_zero) or e.is_nonnegative: return True if b == e: # always rational, even for 0**0 return True elif b.is_irrational: return e.is_zero def _eval_is_algebraic(self): def _is_one(expr): try: return (expr - 1).is_zero except ValueError: # when the operation is not allowed return False if self.base.is_zero or _is_one(self.base): return True elif self.exp.is_rational: if self.base.is_algebraic is False: return self.exp.is_zero if self.base.is_zero is False: if self.exp.is_nonzero: return self.base.is_algebraic elif self.base.is_algebraic: return True if self.exp.is_positive: return self.base.is_algebraic elif self.base.is_algebraic and self.exp.is_algebraic: if ((fuzzy_not(self.base.is_zero) and fuzzy_not(_is_one(self.base))) or self.base.is_integer is False or self.base.is_irrational): return self.exp.is_rational def _eval_is_rational_function(self, syms): if self.exp.has(*syms): return False if self.base.has(*syms): return self.base._eval_is_rational_function(syms) and \ self.exp.is_Integer else: return True def _eval_is_algebraic_expr(self, syms): if self.exp.has(*syms): return False if self.base.has(*syms): return self.base._eval_is_algebraic_expr(syms) and \ self.exp.is_Rational else: return True def _eval_rewrite_as_exp(self, base, expo, **kwargs): from sympy import exp, log, I, arg if base.is_zero or base.has(exp) or expo.has(exp): return base**expo if base.has(Symbol): # delay evaluation if expo is non symbolic # (as exp(x*log(5)) automatically reduces to x**5) return exp(log(base)*expo, evaluate=expo.has(Symbol)) else: return exp((log(abs(base)) + I*arg(base))*expo) def as_numer_denom(self): if not self.is_commutative: return self, S.One base, exp = self.as_base_exp() n, d = base.as_numer_denom() # this should be the same as ExpBase.as_numer_denom wrt # exponent handling neg_exp = exp.is_negative if not neg_exp and not (-exp).is_negative: neg_exp = _coeff_isneg(exp) int_exp = exp.is_integer # the denominator cannot be separated from the numerator if # its sign is unknown unless the exponent is an integer, e.g. # sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the # denominator is negative the numerator and denominator can # be negated and the denominator (now positive) separated. if not (d.is_extended_real or int_exp): n = base d = S.One dnonpos = d.is_nonpositive if dnonpos: n, d = -n, -d elif dnonpos is None and not int_exp: n = base d = S.One if neg_exp: n, d = d, n exp = -exp if exp.is_infinite: if n is S.One and d is not S.One: return n, self.func(d, exp) if n is not S.One and d is S.One: return self.func(n, exp), d return self.func(n, exp), self.func(d, exp) def matches(self, expr, repl_dict={}, old=False): expr = _sympify(expr) # special case, pattern = 1 and expr.exp can match to 0 if expr is S.One: d = repl_dict.copy() d = self.exp.matches(S.Zero, d) if d is not None: return d # make sure the expression to be matched is an Expr if not isinstance(expr, Expr): return None b, e = expr.as_base_exp() # special case number sb, se = self.as_base_exp() if sb.is_Symbol and se.is_Integer and expr: if e.is_rational: return sb.matches(b**(e/se), repl_dict) return sb.matches(expr**(1/se), repl_dict) d = repl_dict.copy() d = self.base.matches(b, d) if d is None: return None d = self.exp.xreplace(d).matches(e, d) if d is None: return Expr.matches(self, expr, repl_dict) return d def _eval_nseries(self, x, n, logx): # NOTE! This function is an important part of the gruntz algorithm # for computing limits. It has to return a generalized power # series with coefficients in C(log, log(x)). In more detail: # It has to return an expression # c_0*x**e_0 + c_1*x**e_1 + ... (finitely many terms) # where e_i are numbers (not necessarily integers) and c_i are # expressions involving only numbers, the log function, and log(x). from sympy import ceiling, collect, exp, log, O, Order, powsimp b, e = self.args if e.is_Integer: if e > 0: # positive integer powers are easy to expand, e.g.: # sin(x)**4 = (x - x**3/3 + ...)**4 = ... return expand_multinomial(self.func(b._eval_nseries(x, n=n, logx=logx), e), deep=False) elif e is S.NegativeOne: # this is also easy to expand using the formula: # 1/(1 + x) = 1 - x + x**2 - x**3 ... # so we need to rewrite base to the form "1 + x" nuse = n cf = 1 try: ord = b.as_leading_term(x) cf = Order(ord, x).getn() if cf and cf.is_Number: nuse = n + 2*ceiling(cf) else: cf = 1 except NotImplementedError: pass b_orig, prefactor = b, O(1, x) while prefactor.is_Order: nuse += 1 b = b_orig._eval_nseries(x, n=nuse, logx=logx) prefactor = b.as_leading_term(x) # express "rest" as: rest = 1 + k*x**l + ... + O(x**n) rest = expand_mul((b - prefactor)/prefactor) if rest.is_Order: return 1/prefactor + rest/prefactor + O(x**n, x) k, l = rest.leadterm(x) if l.is_Rational and l > 0: pass elif l.is_number and l > 0: l = l.evalf() elif l == 0: k = k.simplify() if k == 0: # if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to # factor the w**4 out using collect: return 1/collect(prefactor, x) else: raise NotImplementedError() else: raise NotImplementedError() if cf < 0: cf = S.One/abs(cf) try: dn = Order(1/prefactor, x).getn() if dn and dn < 0: pass else: dn = 0 except NotImplementedError: dn = 0 terms = [1/prefactor] for m in range(1, ceiling((n - dn + 1)/l*cf)): new_term = terms[-1]*(-rest) if new_term.is_Pow: new_term = new_term._eval_expand_multinomial( deep=False) else: new_term = expand_mul(new_term, deep=False) terms.append(new_term) terms.append(O(x**n, x)) return powsimp(Add(*terms), deep=True, combine='exp') else: # negative powers are rewritten to the cases above, for # example: # sin(x)**(-4) = 1/(sin(x)**4) = ... # and expand the denominator: nuse, denominator = n, O(1, x) while denominator.is_Order: denominator = (b**(-e))._eval_nseries(x, n=nuse, logx=logx) nuse += 1 if 1/denominator == self: return self # now we have a type 1/f(x), that we know how to expand return (1/denominator)._eval_nseries(x, n=n, logx=logx) if e.has(Symbol): return exp(e*log(b))._eval_nseries(x, n=n, logx=logx) # see if the base is as simple as possible bx = b while bx.is_Pow and bx.exp.is_Rational: bx = bx.base if bx == x: return self # work for b(x)**e where e is not an Integer and does not contain x # and hopefully has no other symbols def e2int(e): """return the integer value (if possible) of e and a flag indicating whether it is bounded or not.""" n = e.limit(x, 0) infinite = n.is_infinite if not infinite: # XXX was int or floor intended? int used to behave like floor # so int(-Rational(1, 2)) returned -1 rather than int's 0 try: n = int(n) except TypeError: # well, the n is something more complicated (like 1 + log(2)) try: n = int(n.evalf()) + 1 # XXX why is 1 being added? except TypeError: pass # hope that base allows this to be resolved n = _sympify(n) return n, infinite order = O(x**n, x) ei, infinite = e2int(e) b0 = b.limit(x, 0) if infinite and (b0 is S.One or b0.has(Symbol)): # XXX what order if b0 is S.One: resid = (b - 1) if resid.is_positive: return S.Infinity elif resid.is_negative: return S.Zero raise ValueError('cannot determine sign of %s' % resid) return b0**ei if (b0 is S.Zero or b0.is_infinite): if infinite is not False: return b0**e # XXX what order if not ei.is_number: # if not, how will we proceed? raise ValueError( 'expecting numerical exponent but got %s' % ei) nuse = n - ei if e.is_extended_real and e.is_positive: lt = b.as_leading_term(x) # Try to correct nuse (= m) guess from: # (lt + rest + O(x**m))**e = # lt**e*(1 + rest/lt + O(x**m)/lt)**e = # lt**e + ... + O(x**m)*lt**(e - 1) = ... + O(x**n) try: cf = Order(lt, x).getn() nuse = ceiling(n - cf*(e - 1)) except NotImplementedError: pass bs = b._eval_nseries(x, n=nuse, logx=logx) terms = bs.removeO() if terms.is_Add: bs = terms lt = terms.as_leading_term(x) # bs -> lt + rest -> lt*(1 + (bs/lt - 1)) return ((self.func(lt, e) * self.func((bs/lt).expand(), e).nseries( x, n=nuse, logx=logx)).expand() + order) if bs.is_Add: from sympy import O # So, bs + O() == terms c = Dummy('c') res = [] for arg in bs.args: if arg.is_Order: arg = c*arg.expr res.append(arg) bs = Add(*res) rv = (bs**e).series(x).subs(c, O(1, x)) rv += order return rv rv = bs**e if terms != bs: rv += order return rv # either b0 is bounded but neither 1 nor 0 or e is infinite # b -> b0 + (b - b0) -> b0 * (1 + (b/b0 - 1)) o2 = order*(b0**-e) z = (b/b0 - 1) o = O(z, x) if o is S.Zero or o2 is S.Zero: infinite = True else: if o.expr.is_number: e2 = log(o2.expr*x)/log(x) else: e2 = log(o2.expr)/log(o.expr) n, infinite = e2int(e2) if infinite: # requested accuracy gives infinite series, # order is probably non-polynomial e.g. O(exp(-1/x), x). r = 1 + z else: l = [] g = None for i in range(n + 2): g = self._taylor_term(i, z, g) g = g.nseries(x, n=n, logx=logx) l.append(g) r = Add(*l) return expand_mul(r*b0**e) + order def _eval_as_leading_term(self, x): from sympy import exp, log if not self.exp.has(x): return self.func(self.base.as_leading_term(x), self.exp) return exp(self.exp * log(self.base)).as_leading_term(x) @cacheit def _taylor_term(self, n, x, *previous_terms): # of (1 + x)**e from sympy import binomial return binomial(self.exp, n) * self.func(x, n) def _sage_(self): return self.args[0]._sage_()**self.args[1]._sage_() def as_content_primitive(self, radical=False, clear=True): """Return the tuple (R, self/R) where R is the positive Rational extracted from self. Examples ======== >>> from sympy import sqrt >>> sqrt(4 + 4*sqrt(2)).as_content_primitive() (2, sqrt(1 + sqrt(2))) >>> sqrt(3 + 3*sqrt(2)).as_content_primitive() (1, sqrt(3)*sqrt(1 + sqrt(2))) >>> from sympy import expand_power_base, powsimp, Mul >>> from sympy.abc import x, y >>> ((2*x + 2)**2).as_content_primitive() (4, (x + 1)**2) >>> (4**((1 + y)/2)).as_content_primitive() (2, 4**(y/2)) >>> (3**((1 + y)/2)).as_content_primitive() (1, 3**((y + 1)/2)) >>> (3**((5 + y)/2)).as_content_primitive() (9, 3**((y + 1)/2)) >>> eq = 3**(2 + 2*x) >>> powsimp(eq) == eq True >>> eq.as_content_primitive() (9, 3**(2*x)) >>> powsimp(Mul(*_)) 3**(2*x + 2) >>> eq = (2 + 2*x)**y >>> s = expand_power_base(eq); s.is_Mul, s (False, (2*x + 2)**y) >>> eq.as_content_primitive() (1, (2*(x + 1))**y) >>> s = expand_power_base(_[1]); s.is_Mul, s (True, 2**y*(x + 1)**y) See docstring of Expr.as_content_primitive for more examples. """ b, e = self.as_base_exp() b = _keep_coeff(*b.as_content_primitive(radical=radical, clear=clear)) ce, pe = e.as_content_primitive(radical=radical, clear=clear) if b.is_Rational: #e #= ce*pe #= ce*(h + t) #= ce*h + ce*t #=> self #= b**(ce*h)*b**(ce*t) #= b**(cehp/cehq)*b**(ce*t) #= b**(iceh + r/cehq)*b**(ce*t) #= b**(iceh)*b**(r/cehq)*b**(ce*t) #= b**(iceh)*b**(ce*t + r/cehq) h, t = pe.as_coeff_Add() if h.is_Rational: ceh = ce*h c = self.func(b, ceh) r = S.Zero if not c.is_Rational: iceh, r = divmod(ceh.p, ceh.q) c = self.func(b, iceh) return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.q)) e = _keep_coeff(ce, pe) # b**e = (h*t)**e = h**e*t**e = c*m*t**e if e.is_Rational and b.is_Mul: h, t = b.as_content_primitive(radical=radical, clear=clear) # h is positive c, m = self.func(h, e).as_coeff_Mul() # so c is positive m, me = m.as_base_exp() if m is S.One or me == e: # probably always true # return the following, not return c, m*Pow(t, e) # which would change Pow into Mul; we let sympy # decide what to do by using the unevaluated Mul, e.g # should it stay as sqrt(2 + 2*sqrt(5)) or become # sqrt(2)*sqrt(1 + sqrt(5)) return c, self.func(_keep_coeff(m, t), e) return S.One, self.func(b, e) def is_constant(self, *wrt, **flags): expr = self if flags.get('simplify', True): expr = expr.simplify() b, e = expr.as_base_exp() bz = b.equals(0) if bz: # recalculate with assumptions in case it's unevaluated new = b**e if new != expr: return new.is_constant() econ = e.is_constant(*wrt) bcon = b.is_constant(*wrt) if bcon: if econ: return True bz = b.equals(0) if bz is False: return False elif bcon is None: return None return e.equals(0) def _eval_difference_delta(self, n, step): b, e = self.args if e.has(n) and not b.has(n): new_e = e.subs(n, n + step) return (b**(new_e - e) - 1) * self from .add import Add from .numbers import Integer from .mul import Mul, _keep_coeff from .symbol import Symbol, Dummy, symbols
83ea992bbde037a035fceb03aa7cd0fe5e775aaa2e27f88e4e339365fab1fb45
"""Core module. Provides the basic operations needed in sympy. """ from .sympify import sympify, SympifyError from .cache import cacheit from .basic import Basic, Atom, preorder_traversal from .singleton import S from .expr import Expr, AtomicExpr, UnevaluatedExpr from .symbol import Symbol, Wild, Dummy, symbols, var from .numbers import Number, Float, Rational, Integer, NumberSymbol, \ RealNumber, igcd, ilcm, seterr, E, I, nan, oo, pi, zoo, \ AlgebraicNumber, comp, mod_inverse from .power import Pow, integer_nthroot, integer_log from .mul import Mul, prod from .add import Add from .mod import Mod from .relational import ( Rel, Eq, Ne, Lt, Le, Gt, Ge, Equality, GreaterThan, LessThan, Unequality, StrictGreaterThan, StrictLessThan ) from .multidimensional import vectorize from .function import Lambda, WildFunction, Derivative, diff, FunctionClass, \ Function, Subs, expand, PoleError, count_ops, \ expand_mul, expand_log, expand_func, \ expand_trig, expand_complex, expand_multinomial, nfloat, \ expand_power_base, expand_power_exp, arity from .evalf import PrecisionExhausted, N from .containers import Tuple, Dict from .exprtools import gcd_terms, factor_terms, factor_nc from .parameters import evaluate # expose singletons Catalan = S.Catalan EulerGamma = S.EulerGamma GoldenRatio = S.GoldenRatio TribonacciConstant = S.TribonacciConstant
c017d0e64eaa8918a0be8499030342bc1d8e8cf60f6efb91a99a790a94151ecf
from __future__ import print_function, division from sympy.utilities.exceptions import SymPyDeprecationWarning from .add import _unevaluated_Add, Add from .basic import S from .compatibility import ordered from .basic import Basic from .expr import Expr from .evalf import EvalfMixin from .sympify import _sympify from .parameters import global_parameters from sympy.logic.boolalg import Boolean, BooleanAtom __all__ = ( 'Rel', 'Eq', 'Ne', 'Lt', 'Le', 'Gt', 'Ge', 'Relational', 'Equality', 'Unequality', 'StrictLessThan', 'LessThan', 'StrictGreaterThan', 'GreaterThan', ) # Note, see issue 4986. Ideally, we wouldn't want to subclass both Boolean # and Expr. def _canonical(cond): # return a condition in which all relationals are canonical reps = {r: r.canonical for r in cond.atoms(Relational)} return cond.xreplace(reps) # XXX: AttributeError was being caught here but it wasn't triggered by any of # the tests so I've removed it... class Relational(Boolean, EvalfMixin): """Base class for all relation types. Subclasses of Relational should generally be instantiated directly, but Relational can be instantiated with a valid ``rop`` value to dispatch to the appropriate subclass. Parameters ========== rop : str or None Indicates what subclass to instantiate. Valid values can be found in the keys of Relational.ValidRelationalOperator. Examples ======== >>> from sympy import Rel >>> from sympy.abc import x, y >>> Rel(y, x + x**2, '==') Eq(y, x**2 + x) """ __slots__ = [] is_Relational = True # ValidRelationOperator - Defined below, because the necessary classes # have not yet been defined def __new__(cls, lhs, rhs, rop=None, **assumptions): # If called by a subclass, do nothing special and pass on to Basic. if cls is not Relational: return Basic.__new__(cls, lhs, rhs, **assumptions) # XXX: Why do this? There should be a separate function to make a # particular subclass of Relational from a string. # # If called directly with an operator, look up the subclass # corresponding to that operator and delegate to it cls = cls.ValidRelationOperator.get(rop, None) if cls is None: raise ValueError("Invalid relational operator symbol: %r" % rop) # XXX: Why should the below be removed when Py2 is not supported? # # /// drop when Py2 is no longer supported if not issubclass(cls, (Eq, Ne)): # validate that Booleans are not being used in a relational # other than Eq/Ne; # Note: Symbol is a subclass of Boolean but is considered # acceptable here. from sympy.core.symbol import Symbol from sympy.logic.boolalg import Boolean def unacceptable(side): return isinstance(side, Boolean) and not isinstance(side, Symbol) if unacceptable(lhs) or unacceptable(rhs): from sympy.utilities.misc import filldedent raise TypeError(filldedent(''' A Boolean argument can only be used in Eq and Ne; all other relationals expect real expressions. ''')) # \\\ return cls(lhs, rhs, **assumptions) @property def lhs(self): """The left-hand side of the relation.""" return self._args[0] @property def rhs(self): """The right-hand side of the relation.""" return self._args[1] @property def reversed(self): """Return the relationship with sides reversed. Examples ======== >>> from sympy import Eq >>> from sympy.abc import x >>> Eq(x, 1) Eq(x, 1) >>> _.reversed Eq(1, x) >>> x < 1 x < 1 >>> _.reversed 1 > x """ ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne} a, b = self.args return Relational.__new__(ops.get(self.func, self.func), b, a) @property def reversedsign(self): """Return the relationship with signs reversed. Examples ======== >>> from sympy import Eq >>> from sympy.abc import x >>> Eq(x, 1) Eq(x, 1) >>> _.reversedsign Eq(-x, -1) >>> x < 1 x < 1 >>> _.reversedsign -x > -1 """ a, b = self.args if not (isinstance(a, BooleanAtom) or isinstance(b, BooleanAtom)): ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne} return Relational.__new__(ops.get(self.func, self.func), -a, -b) else: return self @property def negated(self): """Return the negated relationship. Examples ======== >>> from sympy import Eq >>> from sympy.abc import x >>> Eq(x, 1) Eq(x, 1) >>> _.negated Ne(x, 1) >>> x < 1 x < 1 >>> _.negated x >= 1 Notes ===== This works more or less identical to ``~``/``Not``. The difference is that ``negated`` returns the relationship even if ``evaluate=False``. Hence, this is useful in code when checking for e.g. negated relations to existing ones as it will not be affected by the `evaluate` flag. """ ops = {Eq: Ne, Ge: Lt, Gt: Le, Le: Gt, Lt: Ge, Ne: Eq} # If there ever will be new Relational subclasses, the following line # will work until it is properly sorted out # return ops.get(self.func, lambda a, b, evaluate=False: ~(self.func(a, # b, evaluate=evaluate)))(*self.args, evaluate=False) return Relational.__new__(ops.get(self.func), *self.args) def _eval_evalf(self, prec): return self.func(*[s._evalf(prec) for s in self.args]) @property def canonical(self): """Return a canonical form of the relational by putting a Number on the rhs else ordering the args. The relation is also changed so that the left-hand side expression does not start with a ``-``. No other simplification is attempted. Examples ======== >>> from sympy.abc import x, y >>> x < 2 x < 2 >>> _.reversed.canonical x < 2 >>> (-y < x).canonical x > -y >>> (-y > x).canonical x < -y """ args = self.args r = self if r.rhs.is_number: if r.rhs.is_Number and r.lhs.is_Number and r.lhs > r.rhs: r = r.reversed elif r.lhs.is_number: r = r.reversed elif tuple(ordered(args)) != args: r = r.reversed LHS_CEMS = getattr(r.lhs, 'could_extract_minus_sign', None) RHS_CEMS = getattr(r.rhs, 'could_extract_minus_sign', None) if isinstance(r.lhs, BooleanAtom) or isinstance(r.rhs, BooleanAtom): return r # Check if first value has negative sign if LHS_CEMS and LHS_CEMS(): return r.reversedsign elif not r.rhs.is_number and RHS_CEMS and RHS_CEMS(): # Right hand side has a minus, but not lhs. # How does the expression with reversed signs behave? # This is so that expressions of the type # Eq(x, -y) and Eq(-x, y) # have the same canonical representation expr1, _ = ordered([r.lhs, -r.rhs]) if expr1 != r.lhs: return r.reversed.reversedsign return r def equals(self, other, failing_expression=False): """Return True if the sides of the relationship are mathematically identical and the type of relationship is the same. If failing_expression is True, return the expression whose truth value was unknown.""" if isinstance(other, Relational): if self == other or self.reversed == other: return True a, b = self, other if a.func in (Eq, Ne) or b.func in (Eq, Ne): if a.func != b.func: return False left, right = [i.equals(j, failing_expression=failing_expression) for i, j in zip(a.args, b.args)] if left is True: return right if right is True: return left lr, rl = [i.equals(j, failing_expression=failing_expression) for i, j in zip(a.args, b.reversed.args)] if lr is True: return rl if rl is True: return lr e = (left, right, lr, rl) if all(i is False for i in e): return False for i in e: if i not in (True, False): return i else: if b.func != a.func: b = b.reversed if a.func != b.func: return False left = a.lhs.equals(b.lhs, failing_expression=failing_expression) if left is False: return False right = a.rhs.equals(b.rhs, failing_expression=failing_expression) if right is False: return False if left is True: return right return left def _eval_simplify(self, **kwargs): r = self r = r.func(*[i.simplify(**kwargs) for i in r.args]) if r.is_Relational: dif = r.lhs - r.rhs # replace dif with a valid Number that will # allow a definitive comparison with 0 v = None if dif.is_comparable: v = dif.n(2) elif dif.equals(0): # XXX this is expensive v = S.Zero if v is not None: r = r.func._eval_relation(v, S.Zero) r = r.canonical # If there is only one symbol in the expression, # try to write it on a simplified form free = list(filter(lambda x: x.is_real is not False, r.free_symbols)) if len(free) == 1: try: from sympy.solvers.solveset import linear_coeffs x = free.pop() dif = r.lhs - r.rhs m, b = linear_coeffs(dif, x) if m.is_zero is False: if m.is_negative: # Dividing with a negative number, so change order of arguments # canonical will put the symbol back on the lhs later r = r.func(-b/m, x) else: r = r.func(x, -b/m) else: r = r.func(b, S.zero) except ValueError: # maybe not a linear function, try polynomial from sympy.polys import Poly, poly, PolynomialError, gcd try: p = poly(dif, x) c = p.all_coeffs() constant = c[-1] c[-1] = 0 scale = gcd(c) c = [ctmp/scale for ctmp in c] r = r.func(Poly.from_list(c, x).as_expr(), -constant/scale) except PolynomialError: pass elif len(free) >= 2: try: from sympy.solvers.solveset import linear_coeffs from sympy.polys import gcd free = list(ordered(free)) dif = r.lhs - r.rhs m = linear_coeffs(dif, *free) constant = m[-1] del m[-1] scale = gcd(m) m = [mtmp/scale for mtmp in m] nzm = list(filter(lambda f: f[0] != 0, list(zip(m, free)))) if scale.is_zero is False: if constant != 0: # lhs: expression, rhs: constant newexpr = Add(*[i*j for i, j in nzm]) r = r.func(newexpr, -constant/scale) else: # keep first term on lhs lhsterm = nzm[0][0]*nzm[0][1] del nzm[0] newexpr = Add(*[i*j for i, j in nzm]) r = r.func(lhsterm, -newexpr) else: r = r.func(constant, S.zero) except ValueError: pass # Did we get a simplified result? r = r.canonical measure = kwargs['measure'] if measure(r) < kwargs['ratio']*measure(self): return r else: return self def _eval_trigsimp(self, **opts): from sympy.simplify import trigsimp return self.func(trigsimp(self.lhs, **opts), trigsimp(self.rhs, **opts)) def expand(self, **kwargs): args = (arg.expand(**kwargs) for arg in self.args) return self.func(*args) def __nonzero__(self): raise TypeError("cannot determine truth value of Relational") __bool__ = __nonzero__ def _eval_as_set(self): # self is univariate and periodicity(self, x) in (0, None) from sympy.solvers.inequalities import solve_univariate_inequality syms = self.free_symbols assert len(syms) == 1 x = syms.pop() return solve_univariate_inequality(self, x, relational=False) @property def binary_symbols(self): # override where necessary return set() Rel = Relational class Equality(Relational): """An equal relation between two objects. Represents that two objects are equal. If they can be easily shown to be definitively equal (or unequal), this will reduce to True (or False). Otherwise, the relation is maintained as an unevaluated Equality object. Use the ``simplify`` function on this object for more nontrivial evaluation of the equality relation. As usual, the keyword argument ``evaluate=False`` can be used to prevent any evaluation. Examples ======== >>> from sympy import Eq, simplify, exp, cos >>> from sympy.abc import x, y >>> Eq(y, x + x**2) Eq(y, x**2 + x) >>> Eq(2, 5) False >>> Eq(2, 5, evaluate=False) Eq(2, 5) >>> _.doit() False >>> Eq(exp(x), exp(x).rewrite(cos)) Eq(exp(x), sinh(x) + cosh(x)) >>> simplify(_) True See Also ======== sympy.logic.boolalg.Equivalent : for representing equality between two boolean expressions Notes ===== This class is not the same as the == operator. The == operator tests for exact structural equality between two expressions; this class compares expressions mathematically. If either object defines an `_eval_Eq` method, it can be used in place of the default algorithm. If `lhs._eval_Eq(rhs)` or `rhs._eval_Eq(lhs)` returns anything other than None, that return value will be substituted for the Equality. If None is returned by `_eval_Eq`, an Equality object will be created as usual. Since this object is already an expression, it does not respond to the method `as_expr` if one tries to create `x - y` from Eq(x, y). This can be done with the `rewrite(Add)` method. """ rel_op = '==' __slots__ = [] is_Equality = True def __new__(cls, lhs, rhs=None, **options): from sympy.core.add import Add from sympy.core.containers import Tuple from sympy.core.logic import fuzzy_bool, fuzzy_xor, fuzzy_and, fuzzy_not from sympy.core.expr import _n2 from sympy.functions.elementary.complexes import arg from sympy.simplify.simplify import clear_coefficients from sympy.utilities.iterables import sift if rhs is None: SymPyDeprecationWarning( feature="Eq(expr) with rhs default to 0", useinstead="Eq(expr, 0)", issue=16587, deprecated_since_version="1.5" ).warn() rhs = 0 lhs = _sympify(lhs) rhs = _sympify(rhs) evaluate = options.pop('evaluate', global_parameters.evaluate) if evaluate: # If one expression has an _eval_Eq, return its results. if hasattr(lhs, '_eval_Eq'): r = lhs._eval_Eq(rhs) if r is not None: return r if hasattr(rhs, '_eval_Eq'): r = rhs._eval_Eq(lhs) if r is not None: return r # If expressions have the same structure, they must be equal. if lhs == rhs: return S.true # e.g. True == True elif all(isinstance(i, BooleanAtom) for i in (rhs, lhs)): return S.false # True != False elif not (lhs.is_Symbol or rhs.is_Symbol) and ( isinstance(lhs, Boolean) != isinstance(rhs, Boolean)): return S.false # only Booleans can equal Booleans if lhs.is_infinite or rhs.is_infinite: if fuzzy_xor([lhs.is_infinite, rhs.is_infinite]): return S.false if fuzzy_xor([lhs.is_extended_real, rhs.is_extended_real]): return S.false if fuzzy_and([lhs.is_extended_real, rhs.is_extended_real]): r = fuzzy_xor([lhs.is_extended_positive, fuzzy_not(rhs.is_extended_positive)]) return S(r) # Try to split real/imaginary parts and equate them I = S.ImaginaryUnit def split_real_imag(expr): real_imag = lambda t: ( 'real' if t.is_extended_real else 'imag' if (I*t).is_extended_real else None) return sift(Add.make_args(expr), real_imag) lhs_ri = split_real_imag(lhs) if not lhs_ri[None]: rhs_ri = split_real_imag(rhs) if not rhs_ri[None]: eq_real = Eq(Add(*lhs_ri['real']), Add(*rhs_ri['real'])) eq_imag = Eq(I*Add(*lhs_ri['imag']), I*Add(*rhs_ri['imag'])) res = fuzzy_and(map(fuzzy_bool, [eq_real, eq_imag])) if res is not None: return S(res) # Compare e.g. zoo with 1+I*oo by comparing args arglhs = arg(lhs) argrhs = arg(rhs) # Guard against Eq(nan, nan) -> False if not (arglhs == S.NaN and argrhs == S.NaN): res = fuzzy_bool(Eq(arglhs, argrhs)) if res is not None: return S(res) return Relational.__new__(cls, lhs, rhs, **options) if all(isinstance(i, Expr) for i in (lhs, rhs)): # see if the difference evaluates dif = lhs - rhs z = dif.is_zero if z is not None: if z is False and dif.is_commutative: # issue 10728 return S.false if z: return S.true # evaluate numerically if possible n2 = _n2(lhs, rhs) if n2 is not None: return _sympify(n2 == 0) # see if the ratio evaluates n, d = dif.as_numer_denom() rv = None if n.is_zero: rv = d.is_nonzero elif n.is_finite: if d.is_infinite: rv = S.true elif n.is_zero is False: rv = d.is_infinite if rv is None: # if the condition that makes the denominator # infinite does not make the original expression # True then False can be returned l, r = clear_coefficients(d, S.Infinity) args = [_.subs(l, r) for _ in (lhs, rhs)] if args != [lhs, rhs]: rv = fuzzy_bool(Eq(*args)) if rv is True: rv = None elif any(a.is_infinite for a in Add.make_args(n)): # (inf or nan)/x != 0 rv = S.false if rv is not None: return _sympify(rv) return Relational.__new__(cls, lhs, rhs, **options) @classmethod def _eval_relation(cls, lhs, rhs): return _sympify(lhs == rhs) def _eval_rewrite_as_Add(self, *args, **kwargs): """return Eq(L, R) as L - R. To control the evaluation of the result set pass `evaluate=True` to give L - R; if `evaluate=None` then terms in L and R will not cancel but they will be listed in canonical order; otherwise non-canonical args will be returned. Examples ======== >>> from sympy import Eq, Add >>> from sympy.abc import b, x >>> eq = Eq(x + b, x - b) >>> eq.rewrite(Add) 2*b >>> eq.rewrite(Add, evaluate=None).args (b, b, x, -x) >>> eq.rewrite(Add, evaluate=False).args (b, x, b, -x) """ L, R = args evaluate = kwargs.get('evaluate', True) if evaluate: # allow cancellation of args return L - R args = Add.make_args(L) + Add.make_args(-R) if evaluate is None: # no cancellation, but canonical return _unevaluated_Add(*args) # no cancellation, not canonical return Add._from_args(args) @property def binary_symbols(self): if S.true in self.args or S.false in self.args: if self.lhs.is_Symbol: return set([self.lhs]) elif self.rhs.is_Symbol: return set([self.rhs]) return set() def _eval_simplify(self, **kwargs): from sympy.solvers.solveset import linear_coeffs # standard simplify e = super(Equality, self)._eval_simplify(**kwargs) if not isinstance(e, Equality): return e free = self.free_symbols if len(free) == 1: try: x = free.pop() m, b = linear_coeffs( e.rewrite(Add, evaluate=False), x) if m.is_zero is False: enew = e.func(x, -b/m) else: enew = e.func(m*x, -b) measure = kwargs['measure'] if measure(enew) <= kwargs['ratio']*measure(e): e = enew except ValueError: pass return e.canonical def integrate(self, *args, **kwargs): """See the integrate function in sympy.integrals""" from sympy.integrals import integrate return integrate(self, *args, **kwargs) def as_poly(self, *gens, **kwargs): '''Returns lhs-rhs as a Poly Examples ======== >>> from sympy import Eq >>> from sympy.abc import x, y >>> Eq(x**2, 1).as_poly(x) Poly(x**2 - 1, x, domain='ZZ') ''' return (self.lhs - self.rhs).as_poly(*gens, **kwargs) Eq = Equality class Unequality(Relational): """An unequal relation between two objects. Represents that two objects are not equal. If they can be shown to be definitively equal, this will reduce to False; if definitively unequal, this will reduce to True. Otherwise, the relation is maintained as an Unequality object. Examples ======== >>> from sympy import Ne >>> from sympy.abc import x, y >>> Ne(y, x+x**2) Ne(y, x**2 + x) See Also ======== Equality Notes ===== This class is not the same as the != operator. The != operator tests for exact structural equality between two expressions; this class compares expressions mathematically. This class is effectively the inverse of Equality. As such, it uses the same algorithms, including any available `_eval_Eq` methods. """ rel_op = '!=' __slots__ = [] def __new__(cls, lhs, rhs, **options): lhs = _sympify(lhs) rhs = _sympify(rhs) evaluate = options.pop('evaluate', global_parameters.evaluate) if evaluate: is_equal = Equality(lhs, rhs) if isinstance(is_equal, BooleanAtom): return is_equal.negated return Relational.__new__(cls, lhs, rhs, **options) @classmethod def _eval_relation(cls, lhs, rhs): return _sympify(lhs != rhs) @property def binary_symbols(self): if S.true in self.args or S.false in self.args: if self.lhs.is_Symbol: return set([self.lhs]) elif self.rhs.is_Symbol: return set([self.rhs]) return set() def _eval_simplify(self, **kwargs): # simplify as an equality eq = Equality(*self.args)._eval_simplify(**kwargs) if isinstance(eq, Equality): # send back Ne with the new args return self.func(*eq.args) return eq.negated # result of Ne is the negated Eq Ne = Unequality class _Inequality(Relational): """Internal base class for all *Than types. Each subclass must implement _eval_relation to provide the method for comparing two real numbers. """ __slots__ = [] def __new__(cls, lhs, rhs, **options): lhs = _sympify(lhs) rhs = _sympify(rhs) evaluate = options.pop('evaluate', global_parameters.evaluate) if evaluate: # First we invoke the appropriate inequality method of `lhs` # (e.g., `lhs.__lt__`). That method will try to reduce to # boolean or raise an exception. It may keep calling # superclasses until it reaches `Expr` (e.g., `Expr.__lt__`). # In some cases, `Expr` will just invoke us again (if neither it # nor a subclass was able to reduce to boolean or raise an # exception). In that case, it must call us with # `evaluate=False` to prevent infinite recursion. r = cls._eval_relation(lhs, rhs) if r is not None: return r # Note: not sure r could be None, perhaps we never take this # path? In principle, could use this to shortcut out if a # class realizes the inequality cannot be evaluated further. # make a "non-evaluated" Expr for the inequality return Relational.__new__(cls, lhs, rhs, **options) class _Greater(_Inequality): """Not intended for general use _Greater is only used so that GreaterThan and StrictGreaterThan may subclass it for the .gts and .lts properties. """ __slots__ = () @property def gts(self): return self._args[0] @property def lts(self): return self._args[1] class _Less(_Inequality): """Not intended for general use. _Less is only used so that LessThan and StrictLessThan may subclass it for the .gts and .lts properties. """ __slots__ = () @property def gts(self): return self._args[1] @property def lts(self): return self._args[0] class GreaterThan(_Greater): """Class representations of inequalities. Extended Summary ================ The ``*Than`` classes represent inequal relationships, where the left-hand side is generally bigger or smaller than the right-hand side. For example, the GreaterThan class represents an inequal relationship where the left-hand side is at least as big as the right side, if not bigger. In mathematical notation: lhs >= rhs In total, there are four ``*Than`` classes, to represent the four inequalities: +-----------------+--------+ |Class Name | Symbol | +=================+========+ |GreaterThan | (>=) | +-----------------+--------+ |LessThan | (<=) | +-----------------+--------+ |StrictGreaterThan| (>) | +-----------------+--------+ |StrictLessThan | (<) | +-----------------+--------+ All classes take two arguments, lhs and rhs. +----------------------------+-----------------+ |Signature Example | Math equivalent | +============================+=================+ |GreaterThan(lhs, rhs) | lhs >= rhs | +----------------------------+-----------------+ |LessThan(lhs, rhs) | lhs <= rhs | +----------------------------+-----------------+ |StrictGreaterThan(lhs, rhs) | lhs > rhs | +----------------------------+-----------------+ |StrictLessThan(lhs, rhs) | lhs < rhs | +----------------------------+-----------------+ In addition to the normal .lhs and .rhs of Relations, ``*Than`` inequality objects also have the .lts and .gts properties, which represent the "less than side" and "greater than side" of the operator. Use of .lts and .gts in an algorithm rather than .lhs and .rhs as an assumption of inequality direction will make more explicit the intent of a certain section of code, and will make it similarly more robust to client code changes: >>> from sympy import GreaterThan, StrictGreaterThan >>> from sympy import LessThan, StrictLessThan >>> from sympy import And, Ge, Gt, Le, Lt, Rel, S >>> from sympy.abc import x, y, z >>> from sympy.core.relational import Relational >>> e = GreaterThan(x, 1) >>> e x >= 1 >>> '%s >= %s is the same as %s <= %s' % (e.gts, e.lts, e.lts, e.gts) 'x >= 1 is the same as 1 <= x' Examples ======== One generally does not instantiate these classes directly, but uses various convenience methods: >>> for f in [Ge, Gt, Le, Lt]: # convenience wrappers ... print(f(x, 2)) x >= 2 x > 2 x <= 2 x < 2 Another option is to use the Python inequality operators (>=, >, <=, <) directly. Their main advantage over the Ge, Gt, Le, and Lt counterparts, is that one can write a more "mathematical looking" statement rather than littering the math with oddball function calls. However there are certain (minor) caveats of which to be aware (search for 'gotcha', below). >>> x >= 2 x >= 2 >>> _ == Ge(x, 2) True However, it is also perfectly valid to instantiate a ``*Than`` class less succinctly and less conveniently: >>> Rel(x, 1, ">") x > 1 >>> Relational(x, 1, ">") x > 1 >>> StrictGreaterThan(x, 1) x > 1 >>> GreaterThan(x, 1) x >= 1 >>> LessThan(x, 1) x <= 1 >>> StrictLessThan(x, 1) x < 1 Notes ===== There are a couple of "gotchas" to be aware of when using Python's operators. The first is that what your write is not always what you get: >>> 1 < x x > 1 Due to the order that Python parses a statement, it may not immediately find two objects comparable. When "1 < x" is evaluated, Python recognizes that the number 1 is a native number and that x is *not*. Because a native Python number does not know how to compare itself with a SymPy object Python will try the reflective operation, "x > 1" and that is the form that gets evaluated, hence returned. If the order of the statement is important (for visual output to the console, perhaps), one can work around this annoyance in a couple ways: (1) "sympify" the literal before comparison >>> S(1) < x 1 < x (2) use one of the wrappers or less succinct methods described above >>> Lt(1, x) 1 < x >>> Relational(1, x, "<") 1 < x The second gotcha involves writing equality tests between relationals when one or both sides of the test involve a literal relational: >>> e = x < 1; e x < 1 >>> e == e # neither side is a literal True >>> e == x < 1 # expecting True, too False >>> e != x < 1 # expecting False x < 1 >>> x < 1 != x < 1 # expecting False or the same thing as before Traceback (most recent call last): ... TypeError: cannot determine truth value of Relational The solution for this case is to wrap literal relationals in parentheses: >>> e == (x < 1) True >>> e != (x < 1) False >>> (x < 1) != (x < 1) False The third gotcha involves chained inequalities not involving '==' or '!='. Occasionally, one may be tempted to write: >>> e = x < y < z Traceback (most recent call last): ... TypeError: symbolic boolean expression has no truth value. Due to an implementation detail or decision of Python [1]_, there is no way for SymPy to create a chained inequality with that syntax so one must use And: >>> e = And(x < y, y < z) >>> type( e ) And >>> e (x < y) & (y < z) Although this can also be done with the '&' operator, it cannot be done with the 'and' operarator: >>> (x < y) & (y < z) (x < y) & (y < z) >>> (x < y) and (y < z) Traceback (most recent call last): ... TypeError: cannot determine truth value of Relational .. [1] This implementation detail is that Python provides no reliable method to determine that a chained inequality is being built. Chained comparison operators are evaluated pairwise, using "and" logic (see http://docs.python.org/2/reference/expressions.html#notin). This is done in an efficient way, so that each object being compared is only evaluated once and the comparison can short-circuit. For example, ``1 > 2 > 3`` is evaluated by Python as ``(1 > 2) and (2 > 3)``. The ``and`` operator coerces each side into a bool, returning the object itself when it short-circuits. The bool of the --Than operators will raise TypeError on purpose, because SymPy cannot determine the mathematical ordering of symbolic expressions. Thus, if we were to compute ``x > y > z``, with ``x``, ``y``, and ``z`` being Symbols, Python converts the statement (roughly) into these steps: (1) x > y > z (2) (x > y) and (y > z) (3) (GreaterThanObject) and (y > z) (4) (GreaterThanObject.__nonzero__()) and (y > z) (5) TypeError Because of the "and" added at step 2, the statement gets turned into a weak ternary statement, and the first object's __nonzero__ method will raise TypeError. Thus, creating a chained inequality is not possible. In Python, there is no way to override the ``and`` operator, or to control how it short circuits, so it is impossible to make something like ``x > y > z`` work. There was a PEP to change this, :pep:`335`, but it was officially closed in March, 2012. """ __slots__ = () rel_op = '>=' @classmethod def _eval_relation(cls, lhs, rhs): # We don't use the op symbol here: workaround issue #7951 return _sympify(lhs.__ge__(rhs)) Ge = GreaterThan class LessThan(_Less): __doc__ = GreaterThan.__doc__ __slots__ = () rel_op = '<=' @classmethod def _eval_relation(cls, lhs, rhs): # We don't use the op symbol here: workaround issue #7951 return _sympify(lhs.__le__(rhs)) Le = LessThan class StrictGreaterThan(_Greater): __doc__ = GreaterThan.__doc__ __slots__ = () rel_op = '>' @classmethod def _eval_relation(cls, lhs, rhs): # We don't use the op symbol here: workaround issue #7951 return _sympify(lhs.__gt__(rhs)) Gt = StrictGreaterThan class StrictLessThan(_Less): __doc__ = GreaterThan.__doc__ __slots__ = () rel_op = '<' @classmethod def _eval_relation(cls, lhs, rhs): # We don't use the op symbol here: workaround issue #7951 return _sympify(lhs.__lt__(rhs)) Lt = StrictLessThan # A class-specific (not object-specific) data item used for a minor speedup. # It is defined here, rather than directly in the class, because the classes # that it references have not been defined until now (e.g. StrictLessThan). Relational.ValidRelationOperator = { None: Equality, '==': Equality, 'eq': Equality, '!=': Unequality, '<>': Unequality, 'ne': Unequality, '>=': GreaterThan, 'ge': GreaterThan, '<=': LessThan, 'le': LessThan, '>': StrictGreaterThan, 'gt': StrictGreaterThan, '<': StrictLessThan, 'lt': StrictLessThan, }
b58ceb78708e9ceaf0a36f822e25dbb710514649116aa6753015f26ff6a4cbd5
from __future__ import absolute_import, print_function, division import numbers import decimal import fractions import math import re as regex from .containers import Tuple from .sympify import converter, sympify, _sympify, SympifyError, _convert_numpy_types from .singleton import S, Singleton from .expr import Expr, AtomicExpr from .evalf import pure_complex from .decorators import _sympifyit from .cache import cacheit, clear_cache from .logic import fuzzy_not from sympy.core.compatibility import ( as_int, integer_types, long, string_types, with_metaclass, HAS_GMPY, SYMPY_INTS, int_info) from sympy.core.cache import lru_cache import mpmath import mpmath.libmp as mlib from mpmath.libmp import bitcount from mpmath.libmp.backend import MPZ from mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed from mpmath.ctx_mp import mpnumeric from mpmath.libmp.libmpf import ( finf as _mpf_inf, fninf as _mpf_ninf, fnan as _mpf_nan, fzero, _normalize as mpf_normalize, prec_to_dps, fone, fnone) from sympy.utilities.misc import debug, filldedent from .parameters import global_parameters from sympy.utilities.exceptions import SymPyDeprecationWarning rnd = mlib.round_nearest _LOG2 = math.log(2) def comp(z1, z2, tol=None): """Return a bool indicating whether the error between z1 and z2 is <= tol. Examples ======== If ``tol`` is None then True will be returned if ``abs(z1 - z2)*10**p <= 5`` where ``p`` is minimum value of the decimal precision of each value. >>> from sympy.core.numbers import comp, pi >>> pi4 = pi.n(4); pi4 3.142 >>> comp(_, 3.142) True >>> comp(pi4, 3.141) False >>> comp(pi4, 3.143) False A comparison of strings will be made if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''. >>> comp(pi4, 3.1415) True >>> comp(pi4, 3.1415, '') False When ``tol`` is provided and ``z2`` is non-zero and ``|z1| > 1`` the error is normalized by ``|z1|``: >>> abs(pi4 - 3.14)/pi4 0.000509791731426756 >>> comp(pi4, 3.14, .001) # difference less than 0.1% True >>> comp(pi4, 3.14, .0005) # difference less than 0.1% False When ``|z1| <= 1`` the absolute error is used: >>> 1/pi4 0.3183 >>> abs(1/pi4 - 0.3183)/(1/pi4) 3.07371499106316e-5 >>> abs(1/pi4 - 0.3183) 9.78393554684764e-6 >>> comp(1/pi4, 0.3183, 1e-5) True To see if the absolute error between ``z1`` and ``z2`` is less than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)`` or ``comp(z1 - z2, tol=tol)``: >>> abs(pi4 - 3.14) 0.00160156249999988 >>> comp(pi4 - 3.14, 0, .002) True >>> comp(pi4 - 3.14, 0, .001) False """ if type(z2) is str: if not pure_complex(z1, or_real=True): raise ValueError('when z2 is a str z1 must be a Number') return str(z1) == z2 if not z1: z1, z2 = z2, z1 if not z1: return True if not tol: a, b = z1, z2 if tol == '': return str(a) == str(b) if tol is None: a, b = sympify(a), sympify(b) if not all(i.is_number for i in (a, b)): raise ValueError('expecting 2 numbers') fa = a.atoms(Float) fb = b.atoms(Float) if not fa and not fb: # no floats -- compare exactly return a == b # get a to be pure_complex for do in range(2): ca = pure_complex(a, or_real=True) if not ca: if fa: a = a.n(prec_to_dps(min([i._prec for i in fa]))) ca = pure_complex(a, or_real=True) break else: fa, fb = fb, fa a, b = b, a cb = pure_complex(b) if not cb and fb: b = b.n(prec_to_dps(min([i._prec for i in fb]))) cb = pure_complex(b, or_real=True) if ca and cb and (ca[1] or cb[1]): return all(comp(i, j) for i, j in zip(ca, cb)) tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec))) return int(abs(a - b)*tol) <= 5 diff = abs(z1 - z2) az1 = abs(z1) if z2 and az1 > 1: return diff/az1 <= tol else: return diff <= tol def mpf_norm(mpf, prec): """Return the mpf tuple normalized appropriately for the indicated precision after doing a check to see if zero should be returned or not when the mantissa is 0. ``mpf_normlize`` always assumes that this is zero, but it may not be since the mantissa for mpf's values "+inf", "-inf" and "nan" have a mantissa of zero, too. Note: this is not intended to validate a given mpf tuple, so sending mpf tuples that were not created by mpmath may produce bad results. This is only a wrapper to ``mpf_normalize`` which provides the check for non- zero mpfs that have a 0 for the mantissa. """ sign, man, expt, bc = mpf if not man: # hack for mpf_normalize which does not do this; # it assumes that if man is zero the result is 0 # (see issue 6639) if not bc: return fzero else: # don't change anything; this should already # be a well formed mpf tuple return mpf # Necessary if mpmath is using the gmpy backend from mpmath.libmp.backend import MPZ rv = mpf_normalize(sign, MPZ(man), expt, bc, prec, rnd) return rv # TODO: we should use the warnings module _errdict = {"divide": False} def seterr(divide=False): """ Should sympy raise an exception on 0/0 or return a nan? divide == True .... raise an exception divide == False ... return nan """ if _errdict["divide"] != divide: clear_cache() _errdict["divide"] = divide def _as_integer_ratio(p): neg_pow, man, expt, bc = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_) p = [1, -1][neg_pow % 2]*man if expt < 0: q = 2**-expt else: q = 1 p *= 2**expt return int(p), int(q) def _decimal_to_Rational_prec(dec): """Convert an ordinary decimal instance to a Rational.""" if not dec.is_finite(): raise TypeError("dec must be finite, got %s." % dec) s, d, e = dec.as_tuple() prec = len(d) if e >= 0: # it's an integer rv = Integer(int(dec)) else: s = (-1)**s d = sum([di*10**i for i, di in enumerate(reversed(d))]) rv = Rational(s*d, 10**-e) return rv, prec _floatpat = regex.compile(r"[-+]?((\d*\.\d+)|(\d+\.?))") def _literal_float(f): """Return True if n starts like a floating point number.""" return bool(_floatpat.match(f)) # (a,b) -> gcd(a,b) # TODO caching with decorator, but not to degrade performance @lru_cache(1024) def igcd(*args): """Computes nonnegative integer greatest common divisor. The algorithm is based on the well known Euclid's algorithm. To improve speed, igcd() has its own caching mechanism implemented. Examples ======== >>> from sympy.core.numbers import igcd >>> igcd(2, 4) 2 >>> igcd(5, 10, 15) 5 """ if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1 a = args_temp.pop() for b in args_temp: a = igcd2(a, b) if b else a return a try: from math import gcd as igcd2 except ImportError: def igcd2(a, b): """Compute gcd of two Python integers a and b.""" if (a.bit_length() > BIGBITS and b.bit_length() > BIGBITS): return igcd_lehmer(a, b) a, b = abs(a), abs(b) while b: a, b = b, a % b return a # Use Lehmer's algorithm only for very large numbers. # The limit could be different on Python 2.7 and 3.x. # If so, then this could be defined in compatibility.py. BIGBITS = 5000 def igcd_lehmer(a, b): """Computes greatest common divisor of two integers. Euclid's algorithm for the computation of the greatest common divisor gcd(a, b) of two (positive) integers a and b is based on the division identity a = q*b + r, where the quotient q and the remainder r are integers and 0 <= r < b. Then each common divisor of a and b divides r, and it follows that gcd(a, b) == gcd(b, r). The algorithm works by constructing the sequence r0, r1, r2, ..., where r0 = a, r1 = b, and each rn is the remainder from the division of the two preceding elements. In Python, q = a // b and r = a % b are obtained by the floor division and the remainder operations, respectively. These are the most expensive arithmetic operations, especially for large a and b. Lehmer's algorithm is based on the observation that the quotients qn = r(n-1) // rn are in general small integers even when a and b are very large. Hence the quotients can be usually determined from a relatively small number of most significant bits. The efficiency of the algorithm is further enhanced by not computing each long remainder in Euclid's sequence. The remainders are linear combinations of a and b with integer coefficients derived from the quotients. The coefficients can be computed as far as the quotients can be determined from the chosen most significant parts of a and b. Only then a new pair of consecutive remainders is computed and the algorithm starts anew with this pair. References ========== .. [1] https://en.wikipedia.org/wiki/Lehmer%27s_GCD_algorithm """ a, b = abs(as_int(a)), abs(as_int(b)) if a < b: a, b = b, a # The algorithm works by using one or two digit division # whenever possible. The outer loop will replace the # pair (a, b) with a pair of shorter consecutive elements # of the Euclidean gcd sequence until a and b # fit into two Python (long) int digits. nbits = 2*int_info.bits_per_digit while a.bit_length() > nbits and b != 0: # Quotients are mostly small integers that can # be determined from most significant bits. n = a.bit_length() - nbits x, y = int(a >> n), int(b >> n) # most significant bits # Elements of the Euclidean gcd sequence are linear # combinations of a and b with integer coefficients. # Compute the coefficients of consecutive pairs # a' = A*a + B*b, b' = C*a + D*b # using small integer arithmetic as far as possible. A, B, C, D = 1, 0, 0, 1 # initial values while True: # The coefficients alternate in sign while looping. # The inner loop combines two steps to keep track # of the signs. # At this point we have # A > 0, B <= 0, C <= 0, D > 0, # x' = x + B <= x < x" = x + A, # y' = y + C <= y < y" = y + D, # and # x'*N <= a' < x"*N, y'*N <= b' < y"*N, # where N = 2**n. # Now, if y' > 0, and x"//y' and x'//y" agree, # then their common value is equal to q = a'//b'. # In addition, # x'%y" = x' - q*y" < x" - q*y' = x"%y', # and # (x'%y")*N < a'%b' < (x"%y')*N. # On the other hand, we also have x//y == q, # and therefore # x'%y" = x + B - q*(y + D) = x%y + B', # x"%y' = x + A - q*(y + C) = x%y + A', # where # B' = B - q*D < 0, A' = A - q*C > 0. if y + C <= 0: break q = (x + A) // (y + C) # Now x'//y" <= q, and equality holds if # x' - q*y" = (x - q*y) + (B - q*D) >= 0. # This is a minor optimization to avoid division. x_qy, B_qD = x - q*y, B - q*D if x_qy + B_qD < 0: break # Next step in the Euclidean sequence. x, y = y, x_qy A, B, C, D = C, D, A - q*C, B_qD # At this point the signs of the coefficients # change and their roles are interchanged. # A <= 0, B > 0, C > 0, D < 0, # x' = x + A <= x < x" = x + B, # y' = y + D < y < y" = y + C. if y + D <= 0: break q = (x + B) // (y + D) x_qy, A_qC = x - q*y, A - q*C if x_qy + A_qC < 0: break x, y = y, x_qy A, B, C, D = C, D, A_qC, B - q*D # Now the conditions on top of the loop # are again satisfied. # A > 0, B < 0, C < 0, D > 0. if B == 0: # This can only happen when y == 0 in the beginning # and the inner loop does nothing. # Long division is forced. a, b = b, a % b continue # Compute new long arguments using the coefficients. a, b = A*a + B*b, C*a + D*b # Small divisors. Finish with the standard algorithm. while b: a, b = b, a % b return a def ilcm(*args): """Computes integer least common multiple. Examples ======== >>> from sympy.core.numbers import ilcm >>> ilcm(5, 10) 10 >>> ilcm(7, 3) 21 >>> ilcm(5, 10, 15) 30 """ if len(args) < 2: raise TypeError( 'ilcm() takes at least 2 arguments (%s given)' % len(args)) if 0 in args: return 0 a = args[0] for b in args[1:]: a = a // igcd(a, b) * b # since gcd(a,b) | a return a def igcdex(a, b): """Returns x, y, g such that g = x*a + y*b = gcd(a, b). >>> from sympy.core.numbers import igcdex >>> igcdex(2, 3) (-1, 1, 1) >>> igcdex(10, 12) (-1, 1, 2) >>> x, y, g = igcdex(100, 2004) >>> x, y, g (-20, 1, 4) >>> x*100 + y*2004 4 """ if (not a) and (not b): return (0, 1, 0) if not a: return (0, b//abs(b), abs(b)) if not b: return (a//abs(a), 0, abs(a)) if a < 0: a, x_sign = -a, -1 else: x_sign = 1 if b < 0: b, y_sign = -b, -1 else: y_sign = 1 x, y, r, s = 1, 0, 0, 1 while b: (c, q) = (a % b, a // b) (a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s) return (x*x_sign, y*y_sign, a) def mod_inverse(a, m): """ Return the number c such that, (a * c) = 1 (mod m) where c has the same sign as m. If no such value exists, a ValueError is raised. Examples ======== >>> from sympy import S >>> from sympy.core.numbers import mod_inverse Suppose we wish to find multiplicative inverse x of 3 modulo 11. This is the same as finding x such that 3 * x = 1 (mod 11). One value of x that satisfies this congruence is 4. Because 3 * 4 = 12 and 12 = 1 (mod 11). This is the value returned by mod_inverse: >>> mod_inverse(3, 11) 4 >>> mod_inverse(-3, 11) 7 When there is a common factor between the numerators of ``a`` and ``m`` the inverse does not exist: >>> mod_inverse(2, 4) Traceback (most recent call last): ... ValueError: inverse of 2 mod 4 does not exist >>> mod_inverse(S(2)/7, S(5)/2) 7/2 References ========== - https://en.wikipedia.org/wiki/Modular_multiplicative_inverse - https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm """ c = None try: a, m = as_int(a), as_int(m) if m != 1 and m != -1: x, y, g = igcdex(a, m) if g == 1: c = x % m except ValueError: a, m = sympify(a), sympify(m) if not (a.is_number and m.is_number): raise TypeError(filldedent(''' Expected numbers for arguments; symbolic `mod_inverse` is not implemented but symbolic expressions can be handled with the similar function, sympy.polys.polytools.invert''')) big = (m > 1) if not (big is S.true or big is S.false): raise ValueError('m > 1 did not evaluate; try to simplify %s' % m) elif big: c = 1/a if c is None: raise ValueError('inverse of %s (mod %s) does not exist' % (a, m)) return c class Number(AtomicExpr): """Represents atomic numbers in SymPy. Floating point numbers are represented by the Float class. Rational numbers (of any size) are represented by the Rational class. Integer numbers (of any size) are represented by the Integer class. Float and Rational are subclasses of Number; Integer is a subclass of Rational. For example, ``2/3`` is represented as ``Rational(2, 3)`` which is a different object from the floating point number obtained with Python division ``2/3``. Even for numbers that are exactly represented in binary, there is a difference between how two forms, such as ``Rational(1, 2)`` and ``Float(0.5)``, are used in SymPy. The rational form is to be preferred in symbolic computations. Other kinds of numbers, such as algebraic numbers ``sqrt(2)`` or complex numbers ``3 + 4*I``, are not instances of Number class as they are not atomic. See Also ======== Float, Integer, Rational """ is_commutative = True is_number = True is_Number = True __slots__ = [] # Used to make max(x._prec, y._prec) return x._prec when only x is a float _prec = -1 def __new__(cls, *obj): if len(obj) == 1: obj = obj[0] if isinstance(obj, Number): return obj if isinstance(obj, SYMPY_INTS): return Integer(obj) if isinstance(obj, tuple) and len(obj) == 2: return Rational(*obj) if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)): return Float(obj) if isinstance(obj, string_types): _obj = obj.lower() # float('INF') == float('inf') if _obj == 'nan': return S.NaN elif _obj == 'inf': return S.Infinity elif _obj == '+inf': return S.Infinity elif _obj == '-inf': return S.NegativeInfinity val = sympify(obj) if isinstance(val, Number): return val else: raise ValueError('String "%s" does not denote a Number' % obj) msg = "expected str|int|long|float|Decimal|Number object but got %r" raise TypeError(msg % type(obj).__name__) def invert(self, other, *gens, **args): from sympy.polys.polytools import invert if getattr(other, 'is_number', True): return mod_inverse(self, other) return invert(self, other, *gens, **args) def __divmod__(self, other): from .containers import Tuple from sympy.functions.elementary.complexes import sign try: other = Number(other) if self.is_infinite or S.NaN in (self, other): return (S.NaN, S.NaN) except TypeError: return NotImplemented if not other: raise ZeroDivisionError('modulo by zero') if self.is_Integer and other.is_Integer: return Tuple(*divmod(self.p, other.p)) elif isinstance(other, Float): rat = self/Rational(other) else: rat = self/other if other.is_finite: w = int(rat) if rat > 0 else int(rat) - 1 r = self - other*w else: w = 0 if not self or (sign(self) == sign(other)) else -1 r = other if w else self return Tuple(w, r) def __rdivmod__(self, other): try: other = Number(other) except TypeError: return NotImplemented return divmod(other, self) def _as_mpf_val(self, prec): """Evaluation of mpf tuple accurate to at least prec bits.""" raise NotImplementedError('%s needs ._as_mpf_val() method' % (self.__class__.__name__)) def _eval_evalf(self, prec): return Float._new(self._as_mpf_val(prec), prec) def _as_mpf_op(self, prec): prec = max(prec, self._prec) return self._as_mpf_val(prec), prec def __float__(self): return mlib.to_float(self._as_mpf_val(53)) def floor(self): raise NotImplementedError('%s needs .floor() method' % (self.__class__.__name__)) def ceiling(self): raise NotImplementedError('%s needs .ceiling() method' % (self.__class__.__name__)) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() def _eval_conjugate(self): return self def _eval_order(self, *symbols): from sympy import Order # Order(5, x, y) -> Order(1,x,y) return Order(S.One, *symbols) def _eval_subs(self, old, new): if old == -self: return -new return self # there is no other possibility def _eval_is_finite(self): return True @classmethod def class_key(cls): return 1, 0, 'Number' @cacheit def sort_key(self, order=None): return self.class_key(), (0, ()), (), self @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity: return S.Infinity elif other is S.NegativeInfinity: return S.NegativeInfinity return AtomicExpr.__add__(self, other) @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity: return S.NegativeInfinity elif other is S.NegativeInfinity: return S.Infinity return AtomicExpr.__sub__(self, other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity: if self.is_zero: return S.NaN elif self.is_positive: return S.Infinity else: return S.NegativeInfinity elif other is S.NegativeInfinity: if self.is_zero: return S.NaN elif self.is_positive: return S.NegativeInfinity else: return S.Infinity elif isinstance(other, Tuple): return NotImplemented return AtomicExpr.__mul__(self, other) @_sympifyit('other', NotImplemented) def __div__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity or other is S.NegativeInfinity: return S.Zero return AtomicExpr.__div__(self, other) __truediv__ = __div__ def __eq__(self, other): raise NotImplementedError('%s needs .__eq__() method' % (self.__class__.__name__)) def __ne__(self, other): raise NotImplementedError('%s needs .__ne__() method' % (self.__class__.__name__)) def __lt__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s < %s" % (self, other)) raise NotImplementedError('%s needs .__lt__() method' % (self.__class__.__name__)) def __le__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s <= %s" % (self, other)) raise NotImplementedError('%s needs .__le__() method' % (self.__class__.__name__)) def __gt__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s > %s" % (self, other)) return _sympify(other).__lt__(self) def __ge__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s >= %s" % (self, other)) return _sympify(other).__le__(self) def __hash__(self): return super(Number, self).__hash__() def is_constant(self, *wrt, **flags): return True def as_coeff_mul(self, *deps, **kwargs): # a -> c*t if self.is_Rational or not kwargs.pop('rational', True): return self, tuple() elif self.is_negative: return S.NegativeOne, (-self,) return S.One, (self,) def as_coeff_add(self, *deps): # a -> c + t if self.is_Rational: return self, tuple() return S.Zero, (self,) def as_coeff_Mul(self, rational=False): """Efficiently extract the coefficient of a product. """ if rational and not self.is_Rational: return S.One, self return (self, S.One) if self else (S.One, self) def as_coeff_Add(self, rational=False): """Efficiently extract the coefficient of a summation. """ if not rational: return self, S.Zero return S.Zero, self def gcd(self, other): """Compute GCD of `self` and `other`. """ from sympy.polys import gcd return gcd(self, other) def lcm(self, other): """Compute LCM of `self` and `other`. """ from sympy.polys import lcm return lcm(self, other) def cofactors(self, other): """Compute GCD and cofactors of `self` and `other`. """ from sympy.polys import cofactors return cofactors(self, other) class Float(Number): """Represent a floating-point number of arbitrary precision. Examples ======== >>> from sympy import Float >>> Float(3.5) 3.50000000000000 >>> Float(3) 3.00000000000000 Creating Floats from strings (and Python ``int`` and ``long`` types) will give a minimum precision of 15 digits, but the precision will automatically increase to capture all digits entered. >>> Float(1) 1.00000000000000 >>> Float(10**20) 100000000000000000000. >>> Float('1e20') 100000000000000000000. However, *floating-point* numbers (Python ``float`` types) retain only 15 digits of precision: >>> Float(1e20) 1.00000000000000e+20 >>> Float(1.23456789123456789) 1.23456789123457 It may be preferable to enter high-precision decimal numbers as strings: Float('1.23456789123456789') 1.23456789123456789 The desired number of digits can also be specified: >>> Float('1e-3', 3) 0.00100 >>> Float(100, 4) 100.0 Float can automatically count significant figures if a null string is sent for the precision; spaces or underscores are also allowed. (Auto- counting is only allowed for strings, ints and longs). >>> Float('123 456 789.123_456', '') 123456789.123456 >>> Float('12e-3', '') 0.012 >>> Float(3, '') 3. If a number is written in scientific notation, only the digits before the exponent are considered significant if a decimal appears, otherwise the "e" signifies only how to move the decimal: >>> Float('60.e2', '') # 2 digits significant 6.0e+3 >>> Float('60e2', '') # 4 digits significant 6000. >>> Float('600e-2', '') # 3 digits significant 6.00 Notes ===== Floats are inexact by their nature unless their value is a binary-exact value. >>> approx, exact = Float(.1, 1), Float(.125, 1) For calculation purposes, evalf needs to be able to change the precision but this will not increase the accuracy of the inexact value. The following is the most accurate 5-digit approximation of a value of 0.1 that had only 1 digit of precision: >>> approx.evalf(5) 0.099609 By contrast, 0.125 is exact in binary (as it is in base 10) and so it can be passed to Float or evalf to obtain an arbitrary precision with matching accuracy: >>> Float(exact, 5) 0.12500 >>> exact.evalf(20) 0.12500000000000000000 Trying to make a high-precision Float from a float is not disallowed, but one must keep in mind that the *underlying float* (not the apparent decimal value) is being obtained with high precision. For example, 0.3 does not have a finite binary representation. The closest rational is the fraction 5404319552844595/2**54. So if you try to obtain a Float of 0.3 to 20 digits of precision you will not see the same thing as 0.3 followed by 19 zeros: >>> Float(0.3, 20) 0.29999999999999998890 If you want a 20-digit value of the decimal 0.3 (not the floating point approximation of 0.3) you should send the 0.3 as a string. The underlying representation is still binary but a higher precision than Python's float is used: >>> Float('0.3', 20) 0.30000000000000000000 Although you can increase the precision of an existing Float using Float it will not increase the accuracy -- the underlying value is not changed: >>> def show(f): # binary rep of Float ... from sympy import Mul, Pow ... s, m, e, b = f._mpf_ ... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False) ... print('%s at prec=%s' % (v, f._prec)) ... >>> t = Float('0.3', 3) >>> show(t) 4915/2**14 at prec=13 >>> show(Float(t, 20)) # higher prec, not higher accuracy 4915/2**14 at prec=70 >>> show(Float(t, 2)) # lower prec 307/2**10 at prec=10 The same thing happens when evalf is used on a Float: >>> show(t.evalf(20)) 4915/2**14 at prec=70 >>> show(t.evalf(2)) 307/2**10 at prec=10 Finally, Floats can be instantiated with an mpf tuple (n, c, p) to produce the number (-1)**n*c*2**p: >>> n, c, p = 1, 5, 0 >>> (-1)**n*c*2**p -5 >>> Float((1, 5, 0)) -5.00000000000000 An actual mpf tuple also contains the number of bits in c as the last element of the tuple: >>> _._mpf_ (1, 5, 0, 3) This is not needed for instantiation and is not the same thing as the precision. The mpf tuple and the precision are two separate quantities that Float tracks. In SymPy, a Float is a number that can be computed with arbitrary precision. Although floating point 'inf' and 'nan' are not such numbers, Float can create these numbers: >>> Float('-inf') -oo >>> _.is_Float False """ __slots__ = ['_mpf_', '_prec'] # A Float represents many real numbers, # both rational and irrational. is_rational = None is_irrational = None is_number = True is_real = True is_extended_real = True is_Float = True def __new__(cls, num, dps=None, prec=None, precision=None): if prec is not None: SymPyDeprecationWarning( feature="Using 'prec=XX' to denote decimal precision", useinstead="'dps=XX' for decimal precision and 'precision=XX' "\ "for binary precision", issue=12820, deprecated_since_version="1.1").warn() dps = prec del prec # avoid using this deprecated kwarg if dps is not None and precision is not None: raise ValueError('Both decimal and binary precision supplied. ' 'Supply only one. ') if isinstance(num, string_types): # Float accepts spaces as digit separators num = num.replace(' ', '').lower() # in Py 3.6 # underscores are allowed. In anticipation of that, we ignore # legally placed underscores if '_' in num: parts = num.split('_') if not (all(parts) and all(parts[i][-1].isdigit() for i in range(0, len(parts), 2)) and all(parts[i][0].isdigit() for i in range(1, len(parts), 2))): # copy Py 3.6 error raise ValueError("could not convert string to float: '%s'" % num) num = ''.join(parts) if num.startswith('.') and len(num) > 1: num = '0' + num elif num.startswith('-.') and len(num) > 2: num = '-0.' + num[2:] elif num in ('inf', '+inf'): return S.Infinity elif num == '-inf': return S.NegativeInfinity elif isinstance(num, float) and num == 0: num = '0' elif isinstance(num, float) and num == float('inf'): return S.Infinity elif isinstance(num, float) and num == float('-inf'): return S.NegativeInfinity elif isinstance(num, float) and num == float('nan'): return S.NaN elif isinstance(num, (SYMPY_INTS, Integer)): num = str(num) elif num is S.Infinity: return num elif num is S.NegativeInfinity: return num elif num is S.NaN: return num elif type(num).__module__ == 'numpy': # support for numpy datatypes num = _convert_numpy_types(num) elif isinstance(num, mpmath.mpf): if precision is None: if dps is None: precision = num.context.prec num = num._mpf_ if dps is None and precision is None: dps = 15 if isinstance(num, Float): return num if isinstance(num, string_types) and _literal_float(num): try: Num = decimal.Decimal(num) except decimal.InvalidOperation: pass else: isint = '.' not in num num, dps = _decimal_to_Rational_prec(Num) if num.is_Integer and isint: dps = max(dps, len(str(num).lstrip('-'))) dps = max(15, dps) precision = mlib.libmpf.dps_to_prec(dps) elif precision == '' and dps is None or precision is None and dps == '': if not isinstance(num, string_types): raise ValueError('The null string can only be used when ' 'the number to Float is passed as a string or an integer.') ok = None if _literal_float(num): try: Num = decimal.Decimal(num) except decimal.InvalidOperation: pass else: isint = '.' not in num num, dps = _decimal_to_Rational_prec(Num) if num.is_Integer and isint: dps = max(dps, len(str(num).lstrip('-'))) precision = mlib.libmpf.dps_to_prec(dps) ok = True if ok is None: raise ValueError('string-float not recognized: %s' % num) # decimal precision(dps) is set and maybe binary precision(precision) # as well.From here on binary precision is used to compute the Float. # Hence, if supplied use binary precision else translate from decimal # precision. if precision is None or precision == '': precision = mlib.libmpf.dps_to_prec(dps) precision = int(precision) if isinstance(num, float): _mpf_ = mlib.from_float(num, precision, rnd) elif isinstance(num, string_types): _mpf_ = mlib.from_str(num, precision, rnd) elif isinstance(num, decimal.Decimal): if num.is_finite(): _mpf_ = mlib.from_str(str(num), precision, rnd) elif num.is_nan(): return S.NaN elif num.is_infinite(): if num > 0: return S.Infinity return S.NegativeInfinity else: raise ValueError("unexpected decimal value %s" % str(num)) elif isinstance(num, tuple) and len(num) in (3, 4): if type(num[1]) is str: # it's a hexadecimal (coming from a pickled object) # assume that it is in standard form num = list(num) # If we're loading an object pickled in Python 2 into # Python 3, we may need to strip a tailing 'L' because # of a shim for int on Python 3, see issue #13470. if num[1].endswith('L'): num[1] = num[1][:-1] num[1] = MPZ(num[1], 16) _mpf_ = tuple(num) else: if len(num) == 4: # handle normalization hack return Float._new(num, precision) else: if not all(( num[0] in (0, 1), num[1] >= 0, all(type(i) in (long, int) for i in num) )): raise ValueError('malformed mpf: %s' % (num,)) # don't compute number or else it may # over/underflow return Float._new( (num[0], num[1], num[2], bitcount(num[1])), precision) else: try: _mpf_ = num._as_mpf_val(precision) except (NotImplementedError, AttributeError): _mpf_ = mpmath.mpf(num, prec=precision)._mpf_ return cls._new(_mpf_, precision, zero=False) @classmethod def _new(cls, _mpf_, _prec, zero=True): # special cases if zero and _mpf_ == fzero: return S.Zero # Float(0) -> 0.0; Float._new((0,0,0,0)) -> 0 elif _mpf_ == _mpf_nan: return S.NaN elif _mpf_ == _mpf_inf: return S.Infinity elif _mpf_ == _mpf_ninf: return S.NegativeInfinity obj = Expr.__new__(cls) obj._mpf_ = mpf_norm(_mpf_, _prec) obj._prec = _prec return obj # mpz can't be pickled def __getnewargs__(self): return (mlib.to_pickable(self._mpf_),) def __getstate__(self): return {'_prec': self._prec} def _hashable_content(self): return (self._mpf_, self._prec) def floor(self): return Integer(int(mlib.to_int( mlib.mpf_floor(self._mpf_, self._prec)))) def ceiling(self): return Integer(int(mlib.to_int( mlib.mpf_ceil(self._mpf_, self._prec)))) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() @property def num(self): return mpmath.mpf(self._mpf_) def _as_mpf_val(self, prec): rv = mpf_norm(self._mpf_, prec) if rv != self._mpf_ and self._prec == prec: debug(self._mpf_, rv) return rv def _as_mpf_op(self, prec): return self._mpf_, max(prec, self._prec) def _eval_is_finite(self): if self._mpf_ in (_mpf_inf, _mpf_ninf): return False return True def _eval_is_infinite(self): if self._mpf_ in (_mpf_inf, _mpf_ninf): return True return False def _eval_is_integer(self): return self._mpf_ == fzero def _eval_is_negative(self): if self._mpf_ == _mpf_ninf or self._mpf_ == _mpf_inf: return False return self.num < 0 def _eval_is_positive(self): if self._mpf_ == _mpf_ninf or self._mpf_ == _mpf_inf: return False return self.num > 0 def _eval_is_extended_negative(self): if self._mpf_ == _mpf_ninf: return True if self._mpf_ == _mpf_inf: return False return self.num < 0 def _eval_is_extended_positive(self): if self._mpf_ == _mpf_inf: return True if self._mpf_ == _mpf_ninf: return False return self.num > 0 def _eval_is_zero(self): return self._mpf_ == fzero def __nonzero__(self): return self._mpf_ != fzero __bool__ = __nonzero__ def __neg__(self): return Float._new(mlib.mpf_neg(self._mpf_), self._prec) @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec) return Number.__add__(self, other) @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec) return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec) return Number.__mul__(self, other) @_sympifyit('other', NotImplemented) def __div__(self, other): if isinstance(other, Number) and other != 0 and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec) return Number.__div__(self, other) __truediv__ = __div__ @_sympifyit('other', NotImplemented) def __mod__(self, other): if isinstance(other, Rational) and other.q != 1 and global_parameters.evaluate: # calculate mod with Rationals, *then* round the result return Float(Rational.__mod__(Rational(self), other), precision=self._prec) if isinstance(other, Float) and global_parameters.evaluate: r = self/other if r == int(r): return Float(0, precision=max(self._prec, other._prec)) if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec) return Number.__mod__(self, other) @_sympifyit('other', NotImplemented) def __rmod__(self, other): if isinstance(other, Float) and global_parameters.evaluate: return other.__mod__(self) if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec) return Number.__rmod__(self, other) def _eval_power(self, expt): """ expt is symbolic object but not equal to 0, 1 (-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) -> -> p**r*(sin(Pi*r) + cos(Pi*r)*I) """ if self == 0: if expt.is_positive: return S.Zero if expt.is_negative: return S.Infinity if isinstance(expt, Number): if isinstance(expt, Integer): prec = self._prec return Float._new( mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec) elif isinstance(expt, Rational) and \ expt.p == 1 and expt.q % 2 and self.is_negative: return Pow(S.NegativeOne, expt, evaluate=False)*( -self)._eval_power(expt) expt, prec = expt._as_mpf_op(self._prec) mpfself = self._mpf_ try: y = mpf_pow(mpfself, expt, prec, rnd) return Float._new(y, prec) except mlib.ComplexResult: re, im = mlib.mpc_pow( (mpfself, fzero), (expt, fzero), prec, rnd) return Float._new(re, prec) + \ Float._new(im, prec)*S.ImaginaryUnit def __abs__(self): return Float._new(mlib.mpf_abs(self._mpf_), self._prec) def __int__(self): if self._mpf_ == fzero: return 0 return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down __long__ = __int__ def __eq__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if not self: return not other if other.is_NumberSymbol: if other.is_irrational: return False return other.__eq__(self) if other.is_Float: # comparison is exact # so Float(.1, 3) != Float(.1, 33) return self._mpf_ == other._mpf_ if other.is_Rational: return other.__eq__(self) if other.is_Number: # numbers should compare at the same precision; # all _as_mpf_val routines should be sure to abide # by the request to change the prec if necessary; if # they don't, the equality test will fail since it compares # the mpf tuples ompf = other._as_mpf_val(self._prec) return bool(mlib.mpf_eq(self._mpf_, ompf)) return False # Float != non-Number def __ne__(self, other): return not self == other def _Frel(self, other, op): from sympy.core.evalf import evalf from sympy.core.numbers import prec_to_dps try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Rational: # test self*other.q <?> other.p without losing precision ''' >>> f = Float(.1,2) >>> i = 1234567890 >>> (f*i)._mpf_ (0, 471, 18, 9) >>> mlib.mpf_mul(f._mpf_, mlib.from_int(i)) (0, 505555550955, -12, 39) ''' smpf = mlib.mpf_mul(self._mpf_, mlib.from_int(other.q)) ompf = mlib.from_int(other.p) return _sympify(bool(op(smpf, ompf))) elif other.is_Float: return _sympify(bool( op(self._mpf_, other._mpf_))) elif other.is_comparable and other not in ( S.Infinity, S.NegativeInfinity): other = other.evalf(prec_to_dps(self._prec)) if other._prec > 1: if other.is_Number: return _sympify(bool( op(self._mpf_, other._as_mpf_val(self._prec)))) def __gt__(self, other): if isinstance(other, NumberSymbol): return other.__lt__(self) rv = self._Frel(other, mlib.mpf_gt) if rv is None: return Expr.__gt__(self, other) return rv def __ge__(self, other): if isinstance(other, NumberSymbol): return other.__le__(self) rv = self._Frel(other, mlib.mpf_ge) if rv is None: return Expr.__ge__(self, other) return rv def __lt__(self, other): if isinstance(other, NumberSymbol): return other.__gt__(self) rv = self._Frel(other, mlib.mpf_lt) if rv is None: return Expr.__lt__(self, other) return rv def __le__(self, other): if isinstance(other, NumberSymbol): return other.__ge__(self) rv = self._Frel(other, mlib.mpf_le) if rv is None: return Expr.__le__(self, other) return rv def __hash__(self): return super(Float, self).__hash__() def epsilon_eq(self, other, epsilon="1e-15"): return abs(self - other) < Float(epsilon) def _sage_(self): import sage.all as sage return sage.RealNumber(str(self)) def __format__(self, format_spec): return format(decimal.Decimal(str(self)), format_spec) # Add sympify converters converter[float] = converter[decimal.Decimal] = Float # this is here to work nicely in Sage RealNumber = Float class Rational(Number): """Represents rational numbers (p/q) of any size. Examples ======== >>> from sympy import Rational, nsimplify, S, pi >>> Rational(1, 2) 1/2 Rational is unprejudiced in accepting input. If a float is passed, the underlying value of the binary representation will be returned: >>> Rational(.5) 1/2 >>> Rational(.2) 3602879701896397/18014398509481984 If the simpler representation of the float is desired then consider limiting the denominator to the desired value or convert the float to a string (which is roughly equivalent to limiting the denominator to 10**12): >>> Rational(str(.2)) 1/5 >>> Rational(.2).limit_denominator(10**12) 1/5 An arbitrarily precise Rational is obtained when a string literal is passed: >>> Rational("1.23") 123/100 >>> Rational('1e-2') 1/100 >>> Rational(".1") 1/10 >>> Rational('1e-2/3.2') 1/320 The conversion of other types of strings can be handled by the sympify() function, and conversion of floats to expressions or simple fractions can be handled with nsimplify: >>> S('.[3]') # repeating digits in brackets 1/3 >>> S('3**2/10') # general expressions 9/10 >>> nsimplify(.3) # numbers that have a simple form 3/10 But if the input does not reduce to a literal Rational, an error will be raised: >>> Rational(pi) Traceback (most recent call last): ... TypeError: invalid input: pi Low-level --------- Access numerator and denominator as .p and .q: >>> r = Rational(3, 4) >>> r 3/4 >>> r.p 3 >>> r.q 4 Note that p and q return integers (not SymPy Integers) so some care is needed when using them in expressions: >>> r.p/r.q 0.75 See Also ======== sympy.core.sympify.sympify, sympy.simplify.simplify.nsimplify """ is_real = True is_integer = False is_rational = True is_number = True __slots__ = ['p', 'q'] is_Rational = True @cacheit def __new__(cls, p, q=None, gcd=None): if q is None: if isinstance(p, Rational): return p if isinstance(p, SYMPY_INTS): pass else: if isinstance(p, (float, Float)): return Rational(*_as_integer_ratio(p)) if not isinstance(p, string_types): try: p = sympify(p) except (SympifyError, SyntaxError): pass # error will raise below else: if p.count('/') > 1: raise TypeError('invalid input: %s' % p) p = p.replace(' ', '') pq = p.rsplit('/', 1) if len(pq) == 2: p, q = pq fp = fractions.Fraction(p) fq = fractions.Fraction(q) p = fp/fq try: p = fractions.Fraction(p) except ValueError: pass # error will raise below else: return Rational(p.numerator, p.denominator, 1) if not isinstance(p, Rational): raise TypeError('invalid input: %s' % p) q = 1 gcd = 1 else: p = Rational(p) q = Rational(q) if isinstance(q, Rational): p *= q.q q = q.p if isinstance(p, Rational): q *= p.q p = p.p # p and q are now integers if q == 0: if p == 0: if _errdict["divide"]: raise ValueError("Indeterminate 0/0") else: return S.NaN return S.ComplexInfinity if q < 0: q = -q p = -p if not gcd: gcd = igcd(abs(p), q) if gcd > 1: p //= gcd q //= gcd if q == 1: return Integer(p) if p == 1 and q == 2: return S.Half obj = Expr.__new__(cls) obj.p = p obj.q = q return obj def limit_denominator(self, max_denominator=1000000): """Closest Rational to self with denominator at most max_denominator. >>> from sympy import Rational >>> Rational('3.141592653589793').limit_denominator(10) 22/7 >>> Rational('3.141592653589793').limit_denominator(100) 311/99 """ f = fractions.Fraction(self.p, self.q) return Rational(f.limit_denominator(fractions.Fraction(int(max_denominator)))) def __getnewargs__(self): return (self.p, self.q) def _hashable_content(self): return (self.p, self.q) def _eval_is_positive(self): return self.p > 0 def _eval_is_zero(self): return self.p == 0 def __neg__(self): return Rational(-self.p, self.q) @_sympifyit('other', NotImplemented) def __add__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.p + self.q*other.p, self.q, 1) elif isinstance(other, Rational): #TODO: this can probably be optimized more return Rational(self.p*other.q + self.q*other.p, self.q*other.q) elif isinstance(other, Float): return other + self else: return Number.__add__(self, other) return Number.__add__(self, other) __radd__ = __add__ @_sympifyit('other', NotImplemented) def __sub__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.p - self.q*other.p, self.q, 1) elif isinstance(other, Rational): return Rational(self.p*other.q - self.q*other.p, self.q*other.q) elif isinstance(other, Float): return -other + self else: return Number.__sub__(self, other) return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __rsub__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.q*other.p - self.p, self.q, 1) elif isinstance(other, Rational): return Rational(self.q*other.p - self.p*other.q, self.q*other.q) elif isinstance(other, Float): return -self + other else: return Number.__rsub__(self, other) return Number.__rsub__(self, other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.p*other.p, self.q, igcd(other.p, self.q)) elif isinstance(other, Rational): return Rational(self.p*other.p, self.q*other.q, igcd(self.p, other.q)*igcd(self.q, other.p)) elif isinstance(other, Float): return other*self else: return Number.__mul__(self, other) return Number.__mul__(self, other) __rmul__ = __mul__ @_sympifyit('other', NotImplemented) def __div__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): if self.p and other.p == S.Zero: return S.ComplexInfinity else: return Rational(self.p, self.q*other.p, igcd(self.p, other.p)) elif isinstance(other, Rational): return Rational(self.p*other.q, self.q*other.p, igcd(self.p, other.p)*igcd(self.q, other.q)) elif isinstance(other, Float): return self*(1/other) else: return Number.__div__(self, other) return Number.__div__(self, other) @_sympifyit('other', NotImplemented) def __rdiv__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(other.p*self.q, self.p, igcd(self.p, other.p)) elif isinstance(other, Rational): return Rational(other.p*self.q, other.q*self.p, igcd(self.p, other.p)*igcd(self.q, other.q)) elif isinstance(other, Float): return other*(1/self) else: return Number.__rdiv__(self, other) return Number.__rdiv__(self, other) __truediv__ = __div__ @_sympifyit('other', NotImplemented) def __mod__(self, other): if global_parameters.evaluate: if isinstance(other, Rational): n = (self.p*other.q) // (other.p*self.q) return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q) if isinstance(other, Float): # calculate mod with Rationals, *then* round the answer return Float(self.__mod__(Rational(other)), precision=other._prec) return Number.__mod__(self, other) return Number.__mod__(self, other) @_sympifyit('other', NotImplemented) def __rmod__(self, other): if isinstance(other, Rational): return Rational.__mod__(other, self) return Number.__rmod__(self, other) def _eval_power(self, expt): if isinstance(expt, Number): if isinstance(expt, Float): return self._eval_evalf(expt._prec)**expt if expt.is_extended_negative: # (3/4)**-2 -> (4/3)**2 ne = -expt if (ne is S.One): return Rational(self.q, self.p) if self.is_negative: return S.NegativeOne**expt*Rational(self.q, -self.p)**ne else: return Rational(self.q, self.p)**ne if expt is S.Infinity: # -oo already caught by test for negative if self.p > self.q: # (3/2)**oo -> oo return S.Infinity if self.p < -self.q: # (-3/2)**oo -> oo + I*oo return S.Infinity + S.Infinity*S.ImaginaryUnit return S.Zero if isinstance(expt, Integer): # (4/3)**2 -> 4**2 / 3**2 return Rational(self.p**expt.p, self.q**expt.p, 1) if isinstance(expt, Rational): if self.p != 1: # (4/3)**(5/6) -> 4**(5/6)*3**(-5/6) return Integer(self.p)**expt*Integer(self.q)**(-expt) # as the above caught negative self.p, now self is positive return Integer(self.q)**Rational( expt.p*(expt.q - 1), expt.q) / \ Integer(self.q)**Integer(expt.p) if self.is_extended_negative and expt.is_even: return (-self)**expt return def _as_mpf_val(self, prec): return mlib.from_rational(self.p, self.q, prec, rnd) def _mpmath_(self, prec, rnd): return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd)) def __abs__(self): return Rational(abs(self.p), self.q) def __int__(self): p, q = self.p, self.q if p < 0: return -int(-p//q) return int(p//q) __long__ = __int__ def floor(self): return Integer(self.p // self.q) def ceiling(self): return -Integer(-self.p // self.q) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() def __eq__(self, other): from sympy.core.power import integer_log try: other = _sympify(other) except SympifyError: return NotImplemented if not isinstance(other, Number): # S(0) == S.false is False # S(0) == False is True return False if not self: return not other if other.is_NumberSymbol: if other.is_irrational: return False return other.__eq__(self) if other.is_Rational: # a Rational is always in reduced form so will never be 2/4 # so we can just check equivalence of args return self.p == other.p and self.q == other.q if other.is_Float: # all Floats have a denominator that is a power of 2 # so if self doesn't, it can't be equal to other if self.q & (self.q - 1): return False s, m, t = other._mpf_[:3] if s: m = -m if not t: # other is an odd integer if not self.is_Integer or self.is_even: return False return m == self.p if t > 0: # other is an even integer if not self.is_Integer: return False # does m*2**t == self.p return self.p and not self.p % m and \ integer_log(self.p//m, 2) == (t, True) # does non-integer s*m/2**-t = p/q? if self.is_Integer: return False return m == self.p and integer_log(self.q, 2) == (-t, True) return False def __ne__(self, other): return not self == other def _Rrel(self, other, attr): # if you want self < other, pass self, other, __gt__ try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Number: op = None s, o = self, other if other.is_NumberSymbol: op = getattr(o, attr) elif other.is_Float: op = getattr(o, attr) elif other.is_Rational: s, o = Integer(s.p*o.q), Integer(s.q*o.p) op = getattr(o, attr) if op: return op(s) if o.is_number and o.is_extended_real: return Integer(s.p), s.q*o def __gt__(self, other): rv = self._Rrel(other, '__lt__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__gt__(*rv) def __ge__(self, other): rv = self._Rrel(other, '__le__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__ge__(*rv) def __lt__(self, other): rv = self._Rrel(other, '__gt__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__lt__(*rv) def __le__(self, other): rv = self._Rrel(other, '__ge__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__le__(*rv) def __hash__(self): return super(Rational, self).__hash__() def factors(self, limit=None, use_trial=True, use_rho=False, use_pm1=False, verbose=False, visual=False): """A wrapper to factorint which return factors of self that are smaller than limit (or cheap to compute). Special methods of factoring are disabled by default so that only trial division is used. """ from sympy.ntheory import factorrat return factorrat(self, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose).copy() def numerator(self): return self.p def denominator(self): return self.q @_sympifyit('other', NotImplemented) def gcd(self, other): if isinstance(other, Rational): if other == S.Zero: return other return Rational( Integer(igcd(self.p, other.p)), Integer(ilcm(self.q, other.q))) return Number.gcd(self, other) @_sympifyit('other', NotImplemented) def lcm(self, other): if isinstance(other, Rational): return Rational( self.p // igcd(self.p, other.p) * other.p, igcd(self.q, other.q)) return Number.lcm(self, other) def as_numer_denom(self): return Integer(self.p), Integer(self.q) def _sage_(self): import sage.all as sage return sage.Integer(self.p)/sage.Integer(self.q) def as_content_primitive(self, radical=False, clear=True): """Return the tuple (R, self/R) where R is the positive Rational extracted from self. Examples ======== >>> from sympy import S >>> (S(-3)/2).as_content_primitive() (3/2, -1) See docstring of Expr.as_content_primitive for more examples. """ if self: if self.is_positive: return self, S.One return -self, S.NegativeOne return S.One, self def as_coeff_Mul(self, rational=False): """Efficiently extract the coefficient of a product. """ return self, S.One def as_coeff_Add(self, rational=False): """Efficiently extract the coefficient of a summation. """ return self, S.Zero class Integer(Rational): """Represents integer numbers of any size. Examples ======== >>> from sympy import Integer >>> Integer(3) 3 If a float or a rational is passed to Integer, the fractional part will be discarded; the effect is of rounding toward zero. >>> Integer(3.8) 3 >>> Integer(-3.8) -3 A string is acceptable input if it can be parsed as an integer: >>> Integer("9" * 20) 99999999999999999999 It is rarely needed to explicitly instantiate an Integer, because Python integers are automatically converted to Integer when they are used in SymPy expressions. """ q = 1 is_integer = True is_number = True is_Integer = True __slots__ = ['p'] def _as_mpf_val(self, prec): return mlib.from_int(self.p, prec, rnd) def _mpmath_(self, prec, rnd): return mpmath.make_mpf(self._as_mpf_val(prec)) @cacheit def __new__(cls, i): if isinstance(i, string_types): i = i.replace(' ', '') # whereas we cannot, in general, make a Rational from an # arbitrary expression, we can make an Integer unambiguously # (except when a non-integer expression happens to round to # an integer). So we proceed by taking int() of the input and # let the int routines determine whether the expression can # be made into an int or whether an error should be raised. try: ival = int(i) except TypeError: raise TypeError( "Argument of Integer should be of numeric type, got %s." % i) # We only work with well-behaved integer types. This converts, for # example, numpy.int32 instances. if ival == 1: return S.One if ival == -1: return S.NegativeOne if ival == 0: return S.Zero obj = Expr.__new__(cls) obj.p = ival return obj def __getnewargs__(self): return (self.p,) # Arithmetic operations are here for efficiency def __int__(self): return self.p __long__ = __int__ def floor(self): return Integer(self.p) def ceiling(self): return Integer(self.p) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() def __neg__(self): return Integer(-self.p) def __abs__(self): if self.p >= 0: return self else: return Integer(-self.p) def __divmod__(self, other): from .containers import Tuple if isinstance(other, Integer) and global_parameters.evaluate: return Tuple(*(divmod(self.p, other.p))) else: return Number.__divmod__(self, other) def __rdivmod__(self, other): from .containers import Tuple if isinstance(other, integer_types) and global_parameters.evaluate: return Tuple(*(divmod(other, self.p))) else: try: other = Number(other) except TypeError: msg = "unsupported operand type(s) for divmod(): '%s' and '%s'" oname = type(other).__name__ sname = type(self).__name__ raise TypeError(msg % (oname, sname)) return Number.__divmod__(other, self) # TODO make it decorator + bytecodehacks? def __add__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(self.p + other) elif isinstance(other, Integer): return Integer(self.p + other.p) elif isinstance(other, Rational): return Rational(self.p*other.q + other.p, other.q, 1) return Rational.__add__(self, other) else: return Add(self, other) def __radd__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(other + self.p) elif isinstance(other, Rational): return Rational(other.p + self.p*other.q, other.q, 1) return Rational.__radd__(self, other) return Rational.__radd__(self, other) def __sub__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(self.p - other) elif isinstance(other, Integer): return Integer(self.p - other.p) elif isinstance(other, Rational): return Rational(self.p*other.q - other.p, other.q, 1) return Rational.__sub__(self, other) return Rational.__sub__(self, other) def __rsub__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(other - self.p) elif isinstance(other, Rational): return Rational(other.p - self.p*other.q, other.q, 1) return Rational.__rsub__(self, other) return Rational.__rsub__(self, other) def __mul__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(self.p*other) elif isinstance(other, Integer): return Integer(self.p*other.p) elif isinstance(other, Rational): return Rational(self.p*other.p, other.q, igcd(self.p, other.q)) return Rational.__mul__(self, other) return Rational.__mul__(self, other) def __rmul__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(other*self.p) elif isinstance(other, Rational): return Rational(other.p*self.p, other.q, igcd(self.p, other.q)) return Rational.__rmul__(self, other) return Rational.__rmul__(self, other) def __mod__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(self.p % other) elif isinstance(other, Integer): return Integer(self.p % other.p) return Rational.__mod__(self, other) return Rational.__mod__(self, other) def __rmod__(self, other): if global_parameters.evaluate: if isinstance(other, integer_types): return Integer(other % self.p) elif isinstance(other, Integer): return Integer(other.p % self.p) return Rational.__rmod__(self, other) return Rational.__rmod__(self, other) def __eq__(self, other): if isinstance(other, integer_types): return (self.p == other) elif isinstance(other, Integer): return (self.p == other.p) return Rational.__eq__(self, other) def __ne__(self, other): return not self == other def __gt__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p > other.p) return Rational.__gt__(self, other) def __lt__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p < other.p) return Rational.__lt__(self, other) def __ge__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p >= other.p) return Rational.__ge__(self, other) def __le__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p <= other.p) return Rational.__le__(self, other) def __hash__(self): return hash(self.p) def __index__(self): return self.p ######################################## def _eval_is_odd(self): return bool(self.p % 2) def _eval_power(self, expt): """ Tries to do some simplifications on self**expt Returns None if no further simplifications can be done When exponent is a fraction (so we have for example a square root), we try to find a simpler representation by factoring the argument up to factors of 2**15, e.g. - sqrt(4) becomes 2 - sqrt(-4) becomes 2*I - (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7) Further simplification would require a special call to factorint on the argument which is not done here for sake of speed. """ from sympy.ntheory.factor_ import perfect_power if expt is S.Infinity: if self.p > S.One: return S.Infinity # cases -1, 0, 1 are done in their respective classes return S.Infinity + S.ImaginaryUnit*S.Infinity if expt is S.NegativeInfinity: return Rational(1, self)**S.Infinity if not isinstance(expt, Number): # simplify when expt is even # (-2)**k --> 2**k if self.is_negative and expt.is_even: return (-self)**expt if isinstance(expt, Float): # Rational knows how to exponentiate by a Float return super(Integer, self)._eval_power(expt) if not isinstance(expt, Rational): return if expt is S.Half and self.is_negative: # we extract I for this special case since everyone is doing so return S.ImaginaryUnit*Pow(-self, expt) if expt.is_negative: # invert base and change sign on exponent ne = -expt if self.is_negative: return S.NegativeOne**expt*Rational(1, -self)**ne else: return Rational(1, self.p)**ne # see if base is a perfect root, sqrt(4) --> 2 x, xexact = integer_nthroot(abs(self.p), expt.q) if xexact: # if it's a perfect root we've finished result = Integer(x**abs(expt.p)) if self.is_negative: result *= S.NegativeOne**expt return result # The following is an algorithm where we collect perfect roots # from the factors of base. # if it's not an nth root, it still might be a perfect power b_pos = int(abs(self.p)) p = perfect_power(b_pos) if p is not False: dict = {p[0]: p[1]} else: dict = Integer(b_pos).factors(limit=2**15) # now process the dict of factors out_int = 1 # integer part out_rad = 1 # extracted radicals sqr_int = 1 sqr_gcd = 0 sqr_dict = {} for prime, exponent in dict.items(): exponent *= expt.p # remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10) div_e, div_m = divmod(exponent, expt.q) if div_e > 0: out_int *= prime**div_e if div_m > 0: # see if the reduced exponent shares a gcd with e.q # (2**2)**(1/10) -> 2**(1/5) g = igcd(div_m, expt.q) if g != 1: out_rad *= Pow(prime, Rational(div_m//g, expt.q//g)) else: sqr_dict[prime] = div_m # identify gcd of remaining powers for p, ex in sqr_dict.items(): if sqr_gcd == 0: sqr_gcd = ex else: sqr_gcd = igcd(sqr_gcd, ex) if sqr_gcd == 1: break for k, v in sqr_dict.items(): sqr_int *= k**(v//sqr_gcd) if sqr_int == b_pos and out_int == 1 and out_rad == 1: result = None else: result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q)) if self.is_negative: result *= Pow(S.NegativeOne, expt) return result def _eval_is_prime(self): from sympy.ntheory import isprime return isprime(self) def _eval_is_composite(self): if self > 1: return fuzzy_not(self.is_prime) else: return False def as_numer_denom(self): return self, S.One @_sympifyit('other', NotImplemented) def __floordiv__(self, other): if not isinstance(other, Expr): return NotImplemented if isinstance(other, Integer): return Integer(self.p // other) return Integer(divmod(self, other)[0]) def __rfloordiv__(self, other): return Integer(Integer(other).p // self.p) # Add sympify converters for i_type in integer_types: converter[i_type] = Integer class AlgebraicNumber(Expr): """Class for representing algebraic numbers in SymPy. """ __slots__ = ['rep', 'root', 'alias', 'minpoly'] is_AlgebraicNumber = True is_algebraic = True is_number = True def __new__(cls, expr, coeffs=None, alias=None, **args): """Construct a new algebraic number. """ from sympy import Poly from sympy.polys.polyclasses import ANP, DMP from sympy.polys.numberfields import minimal_polynomial from sympy.core.symbol import Symbol expr = sympify(expr) if isinstance(expr, (tuple, Tuple)): minpoly, root = expr if not minpoly.is_Poly: minpoly = Poly(minpoly) elif expr.is_AlgebraicNumber: minpoly, root = expr.minpoly, expr.root else: minpoly, root = minimal_polynomial( expr, args.get('gen'), polys=True), expr dom = minpoly.get_domain() if coeffs is not None: if not isinstance(coeffs, ANP): rep = DMP.from_sympy_list(sympify(coeffs), 0, dom) scoeffs = Tuple(*coeffs) else: rep = DMP.from_list(coeffs.to_list(), 0, dom) scoeffs = Tuple(*coeffs.to_list()) if rep.degree() >= minpoly.degree(): rep = rep.rem(minpoly.rep) else: rep = DMP.from_list([1, 0], 0, dom) scoeffs = Tuple(1, 0) sargs = (root, scoeffs) if alias is not None: if not isinstance(alias, Symbol): alias = Symbol(alias) sargs = sargs + (alias,) obj = Expr.__new__(cls, *sargs) obj.rep = rep obj.root = root obj.alias = alias obj.minpoly = minpoly return obj def __hash__(self): return super(AlgebraicNumber, self).__hash__() def _eval_evalf(self, prec): return self.as_expr()._evalf(prec) @property def is_aliased(self): """Returns ``True`` if ``alias`` was set. """ return self.alias is not None def as_poly(self, x=None): """Create a Poly instance from ``self``. """ from sympy import Dummy, Poly, PurePoly if x is not None: return Poly.new(self.rep, x) else: if self.alias is not None: return Poly.new(self.rep, self.alias) else: return PurePoly.new(self.rep, Dummy('x')) def as_expr(self, x=None): """Create a Basic expression from ``self``. """ return self.as_poly(x or self.root).as_expr().expand() def coeffs(self): """Returns all SymPy coefficients of an algebraic number. """ return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ] def native_coeffs(self): """Returns all native coefficients of an algebraic number. """ return self.rep.all_coeffs() def to_algebraic_integer(self): """Convert ``self`` to an algebraic integer. """ from sympy import Poly f = self.minpoly if f.LC() == 1: return self coeff = f.LC()**(f.degree() - 1) poly = f.compose(Poly(f.gen/f.LC())) minpoly = poly*coeff root = f.LC()*self.root return AlgebraicNumber((minpoly, root), self.coeffs()) def _eval_simplify(self, **kwargs): from sympy.polys import CRootOf, minpoly measure, ratio = kwargs['measure'], kwargs['ratio'] for r in [r for r in self.minpoly.all_roots() if r.func != CRootOf]: if minpoly(self.root - r).is_Symbol: # use the matching root if it's simpler if measure(r) < ratio*measure(self.root): return AlgebraicNumber(r) return self class RationalConstant(Rational): """ Abstract base class for rationals with specific behaviors Derived classes must define class attributes p and q and should probably all be singletons. """ __slots__ = [] def __new__(cls): return AtomicExpr.__new__(cls) class IntegerConstant(Integer): __slots__ = [] def __new__(cls): return AtomicExpr.__new__(cls) class Zero(with_metaclass(Singleton, IntegerConstant)): """The number zero. Zero is a singleton, and can be accessed by ``S.Zero`` Examples ======== >>> from sympy import S, Integer, zoo >>> Integer(0) is S.Zero True >>> 1/S.Zero zoo References ========== .. [1] https://en.wikipedia.org/wiki/Zero """ p = 0 q = 1 is_positive = False is_negative = False is_zero = True is_number = True is_comparable = True __slots__ = [] @staticmethod def __abs__(): return S.Zero @staticmethod def __neg__(): return S.Zero def _eval_power(self, expt): if expt.is_positive: return self if expt.is_negative: return S.ComplexInfinity if expt.is_extended_real is False: return S.NaN # infinities are already handled with pos and neg # tests above; now throw away leading numbers on Mul # exponent coeff, terms = expt.as_coeff_Mul() if coeff.is_negative: return S.ComplexInfinity**terms if coeff is not S.One: # there is a Number to discard return self**terms def _eval_order(self, *symbols): # Order(0,x) -> 0 return self def __nonzero__(self): return False __bool__ = __nonzero__ def as_coeff_Mul(self, rational=False): # XXX this routine should be deleted """Efficiently extract the coefficient of a summation. """ return S.One, self class One(with_metaclass(Singleton, IntegerConstant)): """The number one. One is a singleton, and can be accessed by ``S.One``. Examples ======== >>> from sympy import S, Integer >>> Integer(1) is S.One True References ========== .. [1] https://en.wikipedia.org/wiki/1_%28number%29 """ is_number = True p = 1 q = 1 __slots__ = [] @staticmethod def __abs__(): return S.One @staticmethod def __neg__(): return S.NegativeOne def _eval_power(self, expt): return self def _eval_order(self, *symbols): return @staticmethod def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False, verbose=False, visual=False): if visual: return S.One else: return {} class NegativeOne(with_metaclass(Singleton, IntegerConstant)): """The number negative one. NegativeOne is a singleton, and can be accessed by ``S.NegativeOne``. Examples ======== >>> from sympy import S, Integer >>> Integer(-1) is S.NegativeOne True See Also ======== One References ========== .. [1] https://en.wikipedia.org/wiki/%E2%88%921_%28number%29 """ is_number = True p = -1 q = 1 __slots__ = [] @staticmethod def __abs__(): return S.One @staticmethod def __neg__(): return S.One def _eval_power(self, expt): if expt.is_odd: return S.NegativeOne if expt.is_even: return S.One if isinstance(expt, Number): if isinstance(expt, Float): return Float(-1.0)**expt if expt is S.NaN: return S.NaN if expt is S.Infinity or expt is S.NegativeInfinity: return S.NaN if expt is S.Half: return S.ImaginaryUnit if isinstance(expt, Rational): if expt.q == 2: return S.ImaginaryUnit**Integer(expt.p) i, r = divmod(expt.p, expt.q) if i: return self**i*self**Rational(r, expt.q) return class Half(with_metaclass(Singleton, RationalConstant)): """The rational number 1/2. Half is a singleton, and can be accessed by ``S.Half``. Examples ======== >>> from sympy import S, Rational >>> Rational(1, 2) is S.Half True References ========== .. [1] https://en.wikipedia.org/wiki/One_half """ is_number = True p = 1 q = 2 __slots__ = [] @staticmethod def __abs__(): return S.Half class Infinity(with_metaclass(Singleton, Number)): r"""Positive infinite quantity. In real analysis the symbol `\infty` denotes an unbounded limit: `x\to\infty` means that `x` grows without bound. Infinity is often used not only to define a limit but as a value in the affinely extended real number system. Points labeled `+\infty` and `-\infty` can be added to the topological space of the real numbers, producing the two-point compactification of the real numbers. Adding algebraic properties to this gives us the extended real numbers. Infinity is a singleton, and can be accessed by ``S.Infinity``, or can be imported as ``oo``. Examples ======== >>> from sympy import oo, exp, limit, Symbol >>> 1 + oo oo >>> 42/oo 0 >>> x = Symbol('x') >>> limit(exp(x), x, oo) oo See Also ======== NegativeInfinity, NaN References ========== .. [1] https://en.wikipedia.org/wiki/Infinity """ is_commutative = True is_number = True is_complex = False is_extended_real = True is_infinite = True is_comparable = True is_extended_positive = True is_prime = False __slots__ = [] def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"\infty" def _eval_subs(self, old, new): if self == old: return new def _eval_evalf(self, prec=None): return Float('inf') def evalf(self, prec=None, **options): return self._eval_evalf(prec) @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NegativeInfinity or other is S.NaN: return S.NaN return self return Number.__add__(self, other) __radd__ = __add__ @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or other is S.NaN: return S.NaN return self return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __rsub__(self, other): return (-self).__add__(other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other.is_zero or other is S.NaN: return S.NaN if other.is_extended_positive: return self return S.NegativeInfinity return Number.__mul__(self, other) __rmul__ = __mul__ @_sympifyit('other', NotImplemented) def __div__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or \ other is S.NegativeInfinity or \ other is S.NaN: return S.NaN if other.is_extended_nonnegative: return self return S.NegativeInfinity return Number.__div__(self, other) __truediv__ = __div__ def __abs__(self): return S.Infinity def __neg__(self): return S.NegativeInfinity def _eval_power(self, expt): """ ``expt`` is symbolic object but not equal to 0 or 1. ================ ======= ============================== Expression Result Notes ================ ======= ============================== ``oo ** nan`` ``nan`` ``oo ** -p`` ``0`` ``p`` is number, ``oo`` ================ ======= ============================== See Also ======== Pow NaN NegativeInfinity """ from sympy.functions import re if expt.is_extended_positive: return S.Infinity if expt.is_extended_negative: return S.Zero if expt is S.NaN: return S.NaN if expt is S.ComplexInfinity: return S.NaN if expt.is_extended_real is False and expt.is_number: expt_real = re(expt) if expt_real.is_positive: return S.ComplexInfinity if expt_real.is_negative: return S.Zero if expt_real.is_zero: return S.NaN return self**expt.evalf() def _as_mpf_val(self, prec): return mlib.finf def _sage_(self): import sage.all as sage return sage.oo def __hash__(self): return super(Infinity, self).__hash__() def __eq__(self, other): return other is S.Infinity or other == float('inf') def __ne__(self, other): return other is not S.Infinity and other != float('inf') __gt__ = Expr.__gt__ __ge__ = Expr.__ge__ __lt__ = Expr.__lt__ __le__ = Expr.__le__ @_sympifyit('other', NotImplemented) def __mod__(self, other): if not isinstance(other, Expr): return NotImplemented return S.NaN __rmod__ = __mod__ def floor(self): return self def ceiling(self): return self oo = S.Infinity class NegativeInfinity(with_metaclass(Singleton, Number)): """Negative infinite quantity. NegativeInfinity is a singleton, and can be accessed by ``S.NegativeInfinity``. See Also ======== Infinity """ is_extended_real = True is_complex = False is_commutative = True is_infinite = True is_comparable = True is_extended_negative = True is_number = True is_prime = False __slots__ = [] def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"-\infty" def _eval_subs(self, old, new): if self == old: return new def _eval_evalf(self, prec=None): return Float('-inf') def evalf(self, prec=None, **options): return self._eval_evalf(prec) @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or other is S.NaN: return S.NaN return self return Number.__add__(self, other) __radd__ = __add__ @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NegativeInfinity or other is S.NaN: return S.NaN return self return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __rsub__(self, other): return (-self).__add__(other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other.is_zero or other is S.NaN: return S.NaN if other.is_extended_positive: return self return S.Infinity return Number.__mul__(self, other) __rmul__ = __mul__ @_sympifyit('other', NotImplemented) def __div__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or \ other is S.NegativeInfinity or \ other is S.NaN: return S.NaN if other.is_extended_nonnegative: return self return S.Infinity return Number.__div__(self, other) __truediv__ = __div__ def __abs__(self): return S.Infinity def __neg__(self): return S.Infinity def _eval_power(self, expt): """ ``expt`` is symbolic object but not equal to 0 or 1. ================ ======= ============================== Expression Result Notes ================ ======= ============================== ``(-oo) ** nan`` ``nan`` ``(-oo) ** oo`` ``nan`` ``(-oo) ** -oo`` ``nan`` ``(-oo) ** e`` ``oo`` ``e`` is positive even integer ``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer ================ ======= ============================== See Also ======== Infinity Pow NaN """ if expt.is_number: if expt is S.NaN or \ expt is S.Infinity or \ expt is S.NegativeInfinity: return S.NaN if isinstance(expt, Integer) and expt.is_extended_positive: if expt.is_odd: return S.NegativeInfinity else: return S.Infinity return S.NegativeOne**expt*S.Infinity**expt def _as_mpf_val(self, prec): return mlib.fninf def _sage_(self): import sage.all as sage return -(sage.oo) def __hash__(self): return super(NegativeInfinity, self).__hash__() def __eq__(self, other): return other is S.NegativeInfinity or other == float('-inf') def __ne__(self, other): return other is not S.NegativeInfinity and other != float('-inf') __gt__ = Expr.__gt__ __ge__ = Expr.__ge__ __lt__ = Expr.__lt__ __le__ = Expr.__le__ @_sympifyit('other', NotImplemented) def __mod__(self, other): if not isinstance(other, Expr): return NotImplemented return S.NaN __rmod__ = __mod__ def floor(self): return self def ceiling(self): return self def as_powers_dict(self): return {S.NegativeOne: 1, S.Infinity: 1} class NaN(with_metaclass(Singleton, Number)): """ Not a Number. This serves as a place holder for numeric values that are indeterminate. Most operations on NaN, produce another NaN. Most indeterminate forms, such as ``0/0`` or ``oo - oo` produce NaN. Two exceptions are ``0**0`` and ``oo**0``, which all produce ``1`` (this is consistent with Python's float). NaN is loosely related to floating point nan, which is defined in the IEEE 754 floating point standard, and corresponds to the Python ``float('nan')``. Differences are noted below. NaN is mathematically not equal to anything else, even NaN itself. This explains the initially counter-intuitive results with ``Eq`` and ``==`` in the examples below. NaN is not comparable so inequalities raise a TypeError. This is in contrast with floating point nan where all inequalities are false. NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported as ``nan``. Examples ======== >>> from sympy import nan, S, oo, Eq >>> nan is S.NaN True >>> oo - oo nan >>> nan + 1 nan >>> Eq(nan, nan) # mathematical equality False >>> nan == nan # structural equality True References ========== .. [1] https://en.wikipedia.org/wiki/NaN """ is_commutative = True is_extended_real = None is_real = None is_rational = None is_algebraic = None is_transcendental = None is_integer = None is_comparable = False is_finite = None is_zero = None is_prime = None is_positive = None is_negative = None is_number = True __slots__ = [] def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"\text{NaN}" def __neg__(self): return self @_sympifyit('other', NotImplemented) def __add__(self, other): return self @_sympifyit('other', NotImplemented) def __sub__(self, other): return self @_sympifyit('other', NotImplemented) def __mul__(self, other): return self @_sympifyit('other', NotImplemented) def __div__(self, other): return self __truediv__ = __div__ def floor(self): return self def ceiling(self): return self def _as_mpf_val(self, prec): return _mpf_nan def _sage_(self): import sage.all as sage return sage.NaN def __hash__(self): return super(NaN, self).__hash__() def __eq__(self, other): # NaN is structurally equal to another NaN return other is S.NaN def __ne__(self, other): return other is not S.NaN def _eval_Eq(self, other): # NaN is not mathematically equal to anything, even NaN return S.false # Expr will _sympify and raise TypeError __gt__ = Expr.__gt__ __ge__ = Expr.__ge__ __lt__ = Expr.__lt__ __le__ = Expr.__le__ nan = S.NaN class ComplexInfinity(with_metaclass(Singleton, AtomicExpr)): r"""Complex infinity. In complex analysis the symbol `\tilde\infty`, called "complex infinity", represents a quantity with infinite magnitude, but undetermined complex phase. ComplexInfinity is a singleton, and can be accessed by ``S.ComplexInfinity``, or can be imported as ``zoo``. Examples ======== >>> from sympy import zoo, oo >>> zoo + 42 zoo >>> 42/zoo 0 >>> zoo + zoo nan >>> zoo*zoo zoo See Also ======== Infinity """ is_commutative = True is_infinite = True is_number = True is_prime = False is_complex = False is_extended_real = False __slots__ = [] def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"\tilde{\infty}" @staticmethod def __abs__(): return S.Infinity def floor(self): return self def ceiling(self): return self @staticmethod def __neg__(): return S.ComplexInfinity def _eval_power(self, expt): if expt is S.ComplexInfinity: return S.NaN if isinstance(expt, Number): if expt.is_zero: return S.NaN else: if expt.is_positive: return S.ComplexInfinity else: return S.Zero def _sage_(self): import sage.all as sage return sage.UnsignedInfinityRing.gen() zoo = S.ComplexInfinity class NumberSymbol(AtomicExpr): is_commutative = True is_finite = True is_number = True __slots__ = [] is_NumberSymbol = True def __new__(cls): return AtomicExpr.__new__(cls) def approximation(self, number_cls): """ Return an interval with number_cls endpoints that contains the value of NumberSymbol. If not implemented, then return None. """ def _eval_evalf(self, prec): return Float._new(self._as_mpf_val(prec), prec) def __eq__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if self is other: return True if other.is_Number and self.is_irrational: return False return False # NumberSymbol != non-(Number|self) def __ne__(self, other): return not self == other def __le__(self, other): if self is other: return S.true return Expr.__le__(self, other) def __ge__(self, other): if self is other: return S.true return Expr.__ge__(self, other) def __int__(self): # subclass with appropriate return value raise NotImplementedError def __long__(self): return self.__int__() def __hash__(self): return super(NumberSymbol, self).__hash__() class Exp1(with_metaclass(Singleton, NumberSymbol)): r"""The `e` constant. The transcendental number `e = 2.718281828\ldots` is the base of the natural logarithm and of the exponential function, `e = \exp(1)`. Sometimes called Euler's number or Napier's constant. Exp1 is a singleton, and can be accessed by ``S.Exp1``, or can be imported as ``E``. Examples ======== >>> from sympy import exp, log, E >>> E is exp(1) True >>> log(E) 1 References ========== .. [1] https://en.wikipedia.org/wiki/E_%28mathematical_constant%29 """ is_real = True is_positive = True is_negative = False # XXX Forces is_negative/is_nonnegative is_irrational = True is_number = True is_algebraic = False is_transcendental = True __slots__ = [] def _latex(self, printer): return r"e" @staticmethod def __abs__(): return S.Exp1 def __int__(self): return 2 def _as_mpf_val(self, prec): return mpf_e(prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (Integer(2), Integer(3)) elif issubclass(number_cls, Rational): pass def _eval_power(self, expt): from sympy import exp return exp(expt) def _eval_rewrite_as_sin(self, **kwargs): from sympy import sin I = S.ImaginaryUnit return sin(I + S.Pi/2) - I*sin(I) def _eval_rewrite_as_cos(self, **kwargs): from sympy import cos I = S.ImaginaryUnit return cos(I) + I*cos(I + S.Pi/2) def _sage_(self): import sage.all as sage return sage.e E = S.Exp1 class Pi(with_metaclass(Singleton, NumberSymbol)): r"""The `\pi` constant. The transcendental number `\pi = 3.141592654\ldots` represents the ratio of a circle's circumference to its diameter, the area of the unit circle, the half-period of trigonometric functions, and many other things in mathematics. Pi is a singleton, and can be accessed by ``S.Pi``, or can be imported as ``pi``. Examples ======== >>> from sympy import S, pi, oo, sin, exp, integrate, Symbol >>> S.Pi pi >>> pi > 3 True >>> pi.is_irrational True >>> x = Symbol('x') >>> sin(x + 2*pi) sin(x) >>> integrate(exp(-x**2), (x, -oo, oo)) sqrt(pi) References ========== .. [1] https://en.wikipedia.org/wiki/Pi """ is_real = True is_positive = True is_negative = False is_irrational = True is_number = True is_algebraic = False is_transcendental = True __slots__ = [] def _latex(self, printer): return r"\pi" @staticmethod def __abs__(): return S.Pi def __int__(self): return 3 def _as_mpf_val(self, prec): return mpf_pi(prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (Integer(3), Integer(4)) elif issubclass(number_cls, Rational): return (Rational(223, 71), Rational(22, 7)) def _sage_(self): import sage.all as sage return sage.pi pi = S.Pi class GoldenRatio(with_metaclass(Singleton, NumberSymbol)): r"""The golden ratio, `\phi`. `\phi = \frac{1 + \sqrt{5}}{2}` is algebraic number. Two quantities are in the golden ratio if their ratio is the same as the ratio of their sum to the larger of the two quantities, i.e. their maximum. GoldenRatio is a singleton, and can be accessed by ``S.GoldenRatio``. Examples ======== >>> from sympy import S >>> S.GoldenRatio > 1 True >>> S.GoldenRatio.expand(func=True) 1/2 + sqrt(5)/2 >>> S.GoldenRatio.is_irrational True References ========== .. [1] https://en.wikipedia.org/wiki/Golden_ratio """ is_real = True is_positive = True is_negative = False is_irrational = True is_number = True is_algebraic = True is_transcendental = False __slots__ = [] def _latex(self, printer): return r"\phi" def __int__(self): return 1 def _as_mpf_val(self, prec): # XXX track down why this has to be increased rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10) return mpf_norm(rv, prec) def _eval_expand_func(self, **hints): from sympy import sqrt return S.Half + S.Half*sqrt(5) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.One, Rational(2)) elif issubclass(number_cls, Rational): pass def _sage_(self): import sage.all as sage return sage.golden_ratio _eval_rewrite_as_sqrt = _eval_expand_func class TribonacciConstant(with_metaclass(Singleton, NumberSymbol)): r"""The tribonacci constant. The tribonacci numbers are like the Fibonacci numbers, but instead of starting with two predetermined terms, the sequence starts with three predetermined terms and each term afterwards is the sum of the preceding three terms. The tribonacci constant is the ratio toward which adjacent tribonacci numbers tend. It is a root of the polynomial `x^3 - x^2 - x - 1 = 0`, and also satisfies the equation `x + x^{-3} = 2`. TribonacciConstant is a singleton, and can be accessed by ``S.TribonacciConstant``. Examples ======== >>> from sympy import S >>> S.TribonacciConstant > 1 True >>> S.TribonacciConstant.expand(func=True) 1/3 + (19 - 3*sqrt(33))**(1/3)/3 + (3*sqrt(33) + 19)**(1/3)/3 >>> S.TribonacciConstant.is_irrational True >>> S.TribonacciConstant.n(20) 1.8392867552141611326 References ========== .. [1] https://en.wikipedia.org/wiki/Generalizations_of_Fibonacci_numbers#Tribonacci_numbers """ is_real = True is_positive = True is_negative = False is_irrational = True is_number = True is_algebraic = True is_transcendental = False __slots__ = [] def _latex(self, printer): return r"\text{TribonacciConstant}" def __int__(self): return 2 def _eval_evalf(self, prec): rv = self._eval_expand_func(function=True)._eval_evalf(prec + 4) return Float(rv, precision=prec) def _eval_expand_func(self, **hints): from sympy import sqrt, cbrt return (1 + cbrt(19 - 3*sqrt(33)) + cbrt(19 + 3*sqrt(33))) / 3 def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.One, Rational(2)) elif issubclass(number_cls, Rational): pass _eval_rewrite_as_sqrt = _eval_expand_func class EulerGamma(with_metaclass(Singleton, NumberSymbol)): r"""The Euler-Mascheroni constant. `\gamma = 0.5772157\ldots` (also called Euler's constant) is a mathematical constant recurring in analysis and number theory. It is defined as the limiting difference between the harmonic series and the natural logarithm: .. math:: \gamma = \lim\limits_{n\to\infty} \left(\sum\limits_{k=1}^n\frac{1}{k} - \ln n\right) EulerGamma is a singleton, and can be accessed by ``S.EulerGamma``. Examples ======== >>> from sympy import S >>> S.EulerGamma.is_irrational >>> S.EulerGamma > 0 True >>> S.EulerGamma > 1 False References ========== .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant """ is_real = True is_positive = True is_negative = False is_irrational = None is_number = True __slots__ = [] def _latex(self, printer): return r"\gamma" def __int__(self): return 0 def _as_mpf_val(self, prec): # XXX track down why this has to be increased v = mlib.libhyper.euler_fixed(prec + 10) rv = mlib.from_man_exp(v, -prec - 10) return mpf_norm(rv, prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.Zero, S.One) elif issubclass(number_cls, Rational): return (S.Half, Rational(3, 5)) def _sage_(self): import sage.all as sage return sage.euler_gamma class Catalan(with_metaclass(Singleton, NumberSymbol)): r"""Catalan's constant. `K = 0.91596559\ldots` is given by the infinite series .. math:: K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2} Catalan is a singleton, and can be accessed by ``S.Catalan``. Examples ======== >>> from sympy import S >>> S.Catalan.is_irrational >>> S.Catalan > 0 True >>> S.Catalan > 1 False References ========== .. [1] https://en.wikipedia.org/wiki/Catalan%27s_constant """ is_real = True is_positive = True is_negative = False is_irrational = None is_number = True __slots__ = [] def __int__(self): return 0 def _as_mpf_val(self, prec): # XXX track down why this has to be increased v = mlib.catalan_fixed(prec + 10) rv = mlib.from_man_exp(v, -prec - 10) return mpf_norm(rv, prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.Zero, S.One) elif issubclass(number_cls, Rational): return (Rational(9, 10), S.One) def _eval_rewrite_as_Sum(self, k_sym=None, symbols=None): from sympy import Sum, Dummy if (k_sym is not None) or (symbols is not None): return self k = Dummy('k', integer=True, nonnegative=True) return Sum((-1)**k / (2*k+1)**2, (k, 0, S.Infinity)) def _sage_(self): import sage.all as sage return sage.catalan class ImaginaryUnit(with_metaclass(Singleton, AtomicExpr)): r"""The imaginary unit, `i = \sqrt{-1}`. I is a singleton, and can be accessed by ``S.I``, or can be imported as ``I``. Examples ======== >>> from sympy import I, sqrt >>> sqrt(-1) I >>> I*I -1 >>> 1/I -I References ========== .. [1] https://en.wikipedia.org/wiki/Imaginary_unit """ is_commutative = True is_imaginary = True is_finite = True is_number = True is_algebraic = True is_transcendental = False __slots__ = [] def _latex(self, printer): return printer._settings['imaginary_unit_latex'] @staticmethod def __abs__(): return S.One def _eval_evalf(self, prec): return self def _eval_conjugate(self): return -S.ImaginaryUnit def _eval_power(self, expt): """ b is I = sqrt(-1) e is symbolic object but not equal to 0, 1 I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal I**0 mod 4 -> 1 I**1 mod 4 -> I I**2 mod 4 -> -1 I**3 mod 4 -> -I """ if isinstance(expt, Number): if isinstance(expt, Integer): expt = expt.p % 4 if expt == 0: return S.One if expt == 1: return S.ImaginaryUnit if expt == 2: return -S.One return -S.ImaginaryUnit return def as_base_exp(self): return S.NegativeOne, S.Half def _sage_(self): import sage.all as sage return sage.I @property def _mpc_(self): return (Float(0)._mpf_, Float(1)._mpf_) I = S.ImaginaryUnit def sympify_fractions(f): return Rational(f.numerator, f.denominator, 1) converter[fractions.Fraction] = sympify_fractions try: if HAS_GMPY == 2: import gmpy2 as gmpy elif HAS_GMPY == 1: import gmpy else: raise ImportError def sympify_mpz(x): return Integer(long(x)) def sympify_mpq(x): return Rational(long(x.numerator), long(x.denominator)) converter[type(gmpy.mpz(1))] = sympify_mpz converter[type(gmpy.mpq(1, 2))] = sympify_mpq except ImportError: pass def sympify_mpmath(x): return Expr._from_mpmath(x, x.context.prec) converter[mpnumeric] = sympify_mpmath def sympify_mpq(x): p, q = x._mpq_ return Rational(p, q, 1) converter[type(mpmath.rational.mpq(1, 2))] = sympify_mpq def sympify_complex(a): real, imag = list(map(sympify, (a.real, a.imag))) return real + S.ImaginaryUnit*imag converter[complex] = sympify_complex from .power import Pow, integer_nthroot from .mul import Mul Mul.identity = One() from .add import Add Add.identity = Zero() def _register_classes(): numbers.Number.register(Number) numbers.Real.register(Float) numbers.Rational.register(Rational) numbers.Rational.register(Integer) _register_classes()
a0b0caa5ba6d1a24868b7978876e173ef8f62cad5c6250f4ed927e1813476194
"""sympify -- convert objects SymPy internal format""" from __future__ import print_function, division from inspect import getmro from .core import all_classes as sympy_classes from .compatibility import iterable, string_types, range from .parameters import global_parameters class SympifyError(ValueError): def __init__(self, expr, base_exc=None): self.expr = expr self.base_exc = base_exc def __str__(self): if self.base_exc is None: return "SympifyError: %r" % (self.expr,) return ("Sympify of expression '%s' failed, because of exception being " "raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__, str(self.base_exc))) converter = {} # See sympify docstring. class CantSympify(object): """ Mix in this trait to a class to disallow sympification of its instances. Examples ======== >>> from sympy.core.sympify import sympify, CantSympify >>> class Something(dict): ... pass ... >>> sympify(Something()) {} >>> class Something(dict, CantSympify): ... pass ... >>> sympify(Something()) Traceback (most recent call last): ... SympifyError: SympifyError: {} """ pass def _convert_numpy_types(a, **sympify_args): """ Converts a numpy datatype input to an appropriate SymPy type. """ import numpy as np if not isinstance(a, np.floating): if np.iscomplex(a): return converter[complex](a.item()) else: return sympify(a.item(), **sympify_args) else: try: from sympy.core.numbers import Float prec = np.finfo(a).nmant + 1 # E.g. double precision means prec=53 but nmant=52 # Leading bit of mantissa is always 1, so is not stored a = str(list(np.reshape(np.asarray(a), (1, np.size(a)))[0]))[1:-1] return Float(a, precision=prec) except NotImplementedError: raise SympifyError('Translation for numpy float : %s ' 'is not implemented' % a) def sympify(a, locals=None, convert_xor=True, strict=False, rational=False, evaluate=None): """Converts an arbitrary expression to a type that can be used inside SymPy. For example, it will convert Python ints into instances of sympy.Integer, floats into instances of sympy.Float, etc. It is also able to coerce symbolic expressions which inherit from Basic. This can be useful in cooperation with SAGE. It currently accepts as arguments: - any object defined in SymPy - standard numeric python types: int, long, float, Decimal - strings (like "0.09" or "2e-19") - booleans, including ``None`` (will leave ``None`` unchanged) - dict, lists, sets or tuples containing any of the above .. warning:: Note that this function uses ``eval``, and thus shouldn't be used on unsanitized input. If the argument is already a type that SymPy understands, it will do nothing but return that value. This can be used at the beginning of a function to ensure you are working with the correct type. >>> from sympy import sympify >>> sympify(2).is_integer True >>> sympify(2).is_real True >>> sympify(2.0).is_real True >>> sympify("2.0").is_real True >>> sympify("2e-45").is_real True If the expression could not be converted, a SympifyError is raised. >>> sympify("x***2") Traceback (most recent call last): ... SympifyError: SympifyError: "could not parse u'x***2'" Locals ------ The sympification happens with access to everything that is loaded by ``from sympy import *``; anything used in a string that is not defined by that import will be converted to a symbol. In the following, the ``bitcount`` function is treated as a symbol and the ``O`` is interpreted as the Order object (used with series) and it raises an error when used improperly: >>> s = 'bitcount(42)' >>> sympify(s) bitcount(42) >>> sympify("O(x)") O(x) >>> sympify("O + 1") Traceback (most recent call last): ... TypeError: unbound method... In order to have ``bitcount`` be recognized it can be imported into a namespace dictionary and passed as locals: >>> from sympy.core.compatibility import exec_ >>> ns = {} >>> exec_('from sympy.core.evalf import bitcount', ns) >>> sympify(s, locals=ns) 6 In order to have the ``O`` interpreted as a Symbol, identify it as such in the namespace dictionary. This can be done in a variety of ways; all three of the following are possibilities: >>> from sympy import Symbol >>> ns["O"] = Symbol("O") # method 1 >>> exec_('from sympy.abc import O', ns) # method 2 >>> ns.update(dict(O=Symbol("O"))) # method 3 >>> sympify("O + 1", locals=ns) O + 1 If you want *all* single-letter and Greek-letter variables to be symbols then you can use the clashing-symbols dictionaries that have been defined there as private variables: _clash1 (single-letter variables), _clash2 (the multi-letter Greek names) or _clash (both single and multi-letter names that are defined in abc). >>> from sympy.abc import _clash1 >>> _clash1 {'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S} >>> sympify('I & Q', _clash1) I & Q Strict ------ If the option ``strict`` is set to ``True``, only the types for which an explicit conversion has been defined are converted. In the other cases, a SympifyError is raised. >>> print(sympify(None)) None >>> sympify(None, strict=True) Traceback (most recent call last): ... SympifyError: SympifyError: None Evaluation ---------- If the option ``evaluate`` is set to ``False``, then arithmetic and operators will be converted into their SymPy equivalents and the ``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will be denested first. This is done via an AST transformation that replaces operators with their SymPy equivalents, so if an operand redefines any of those operations, the redefined operators will not be used. >>> sympify('2**2 / 3 + 5') 19/3 >>> sympify('2**2 / 3 + 5', evaluate=False) 2**2/3 + 5 Extending --------- To extend ``sympify`` to convert custom objects (not derived from ``Basic``), just define a ``_sympy_`` method to your class. You can do that even to classes that you do not own by subclassing or adding the method at runtime. >>> from sympy import Matrix >>> class MyList1(object): ... def __iter__(self): ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] ... def _sympy_(self): return Matrix(self) >>> sympify(MyList1()) Matrix([ [1], [2]]) If you do not have control over the class definition you could also use the ``converter`` global dictionary. The key is the class and the value is a function that takes a single argument and returns the desired SymPy object, e.g. ``converter[MyList] = lambda x: Matrix(x)``. >>> class MyList2(object): # XXX Do not do this if you control the class! ... def __iter__(self): # Use _sympy_! ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] >>> from sympy.core.sympify import converter >>> converter[MyList2] = lambda x: Matrix(x) >>> sympify(MyList2()) Matrix([ [1], [2]]) Notes ===== The keywords ``rational`` and ``convert_xor`` are only used when the input is a string. Sometimes autosimplification during sympification results in expressions that are very different in structure than what was entered. Until such autosimplification is no longer done, the ``kernS`` function might be of some use. In the example below you can see how an expression reduces to -1 by autosimplification, but does not do so when ``kernS`` is used. >>> from sympy.core.sympify import kernS >>> from sympy.abc import x >>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 -1 >>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1' >>> sympify(s) -1 >>> kernS(s) -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 """ is_sympy = getattr(a, '__sympy__', None) if is_sympy is not None: return a if isinstance(a, CantSympify): raise SympifyError(a) cls = getattr(a, "__class__", None) if cls is None: cls = type(a) # Probably an old-style class conv = converter.get(cls, None) if conv is not None: return conv(a) for superclass in getmro(cls): try: return converter[superclass](a) except KeyError: continue if cls is type(None): if strict: raise SympifyError(a) else: return a if evaluate is None: evaluate = global_parameters.evaluate # Support for basic numpy datatypes # Note that this check exists to avoid importing NumPy when not necessary if type(a).__module__ == 'numpy': import numpy as np if np.isscalar(a): return _convert_numpy_types(a, locals=locals, convert_xor=convert_xor, strict=strict, rational=rational, evaluate=evaluate) _sympy_ = getattr(a, "_sympy_", None) if _sympy_ is not None: try: return a._sympy_() # XXX: Catches AttributeError: 'SympyConverter' object has no # attribute 'tuple' # This is probably a bug somewhere but for now we catch it here. except AttributeError: pass if not strict: # Put numpy array conversion _before_ float/int, see # <https://github.com/sympy/sympy/issues/13924>. flat = getattr(a, "flat", None) if flat is not None: shape = getattr(a, "shape", None) if shape is not None: from ..tensor.array import Array return Array(a.flat, a.shape) # works with e.g. NumPy arrays if not isinstance(a, string_types): for coerce in (float, int): try: coerced = coerce(a) except (TypeError, ValueError): continue # XXX: AttributeError only needed here for Py2 except AttributeError: continue try: return sympify(coerced) except SympifyError: continue if strict: raise SympifyError(a) if iterable(a): try: return type(a)([sympify(x, locals=locals, convert_xor=convert_xor, rational=rational) for x in a]) except TypeError: # Not all iterables are rebuildable with their type. pass if isinstance(a, dict): try: return type(a)([sympify(x, locals=locals, convert_xor=convert_xor, rational=rational) for x in a.items()]) except TypeError: # Not all iterables are rebuildable with their type. pass # At this point we were given an arbitrary expression # which does not inherit from Basic and doesn't implement # _sympy_ (which is a canonical and robust way to convert # anything to SymPy expression). # # As a last chance, we try to take "a"'s normal form via unicode() # and try to parse it. If it fails, then we have no luck and # return an exception try: from .compatibility import unicode a = unicode(a) except Exception as exc: raise SympifyError(a, exc) from sympy.parsing.sympy_parser import (parse_expr, TokenError, standard_transformations) from sympy.parsing.sympy_parser import convert_xor as t_convert_xor from sympy.parsing.sympy_parser import rationalize as t_rationalize transformations = standard_transformations if rational: transformations += (t_rationalize,) if convert_xor: transformations += (t_convert_xor,) try: a = a.replace('\n', '') expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate) except (TokenError, SyntaxError) as exc: raise SympifyError('could not parse %r' % a, exc) return expr def _sympify(a): """ Short version of sympify for internal usage for __add__ and __eq__ methods where it is ok to allow some things (like Python integers and floats) in the expression. This excludes things (like strings) that are unwise to allow into such an expression. >>> from sympy import Integer >>> Integer(1) == 1 True >>> Integer(1) == '1' False >>> from sympy.abc import x >>> x + 1 x + 1 >>> x + '1' Traceback (most recent call last): ... TypeError: unsupported operand type(s) for +: 'Symbol' and 'str' see: sympify """ return sympify(a, strict=True) def kernS(s): """Use a hack to try keep autosimplification from distributing a a number into an Add; this modification doesn't prevent the 2-arg Mul from becoming an Add, however. Examples ======== >>> from sympy.core.sympify import kernS >>> from sympy.abc import x, y, z The 2-arg Mul distributes a number (or minus sign) across the terms of an expression, but kernS will prevent that: >>> 2*(x + y), -(x + 1) (2*x + 2*y, -x - 1) >>> kernS('2*(x + y)') 2*(x + y) >>> kernS('-(x + 1)') -(x + 1) If use of the hack fails, the un-hacked string will be passed to sympify... and you get what you get. XXX This hack should not be necessary once issue 4596 has been resolved. """ import string from random import choice from sympy.core.symbol import Symbol hit = False quoted = '"' in s or "'" in s if '(' in s and not quoted: if s.count('(') != s.count(")"): raise SympifyError('unmatched left parenthesis') # strip all space from s s = ''.join(s.split()) olds = s # now use space to represent a symbol that # will # step 1. turn potential 2-arg Muls into 3-arg versions # 1a. *( -> * *( s = s.replace('*(', '* *(') # 1b. close up exponentials s = s.replace('** *', '**') # 2. handle the implied multiplication of a negated # parenthesized expression in two steps # 2a: -(...) --> -( *(...) target = '-( *(' s = s.replace('-(', target) # 2b: double the matching closing parenthesis # -( *(...) --> -( *(...)) i = nest = 0 assert target.endswith('(') # assumption below while True: j = s.find(target, i) if j == -1: break j += len(target) - 1 for j in range(j, len(s)): if s[j] == "(": nest += 1 elif s[j] == ")": nest -= 1 if nest == 0: break s = s[:j] + ")" + s[j:] i = j + 2 # the first char after 2nd ) if ' ' in s: # get a unique kern kern = '_' while kern in s: kern += choice(string.ascii_letters + string.digits) s = s.replace(' ', kern) hit = kern in s for i in range(2): try: expr = sympify(s) break except: # the kern might cause unknown errors, so use bare except if hit: s = olds # maybe it didn't like the kern; use un-kerned s hit = False continue expr = sympify(s) # let original error raise if not hit: return expr rep = {Symbol(kern): 1} def _clear(expr): if isinstance(expr, (list, tuple, set)): return type(expr)([_clear(e) for e in expr]) if hasattr(expr, 'subs'): return expr.subs(rep, hack2=True) return expr expr = _clear(expr) # hope that kern is not there anymore return expr
6a9c807392d9f798cb8e81626e5b50654645463450467f5668a809f9dd6c0f24
"""Geometrical Points. Contains ======== Point Point2D Point3D When methods of Point require 1 or more points as arguments, they can be passed as a sequence of coordinates or Points: >>> from sympy.geometry.point import Point >>> Point(1, 1).is_collinear((2, 2), (3, 4)) False >>> Point(1, 1).is_collinear(Point(2, 2), Point(3, 4)) False """ from __future__ import division, print_function import warnings from sympy.core import S, sympify, Expr from sympy.core.compatibility import is_sequence from sympy.core.containers import Tuple from sympy.simplify import nsimplify, simplify from sympy.geometry.exceptions import GeometryError from sympy.functions.elementary.miscellaneous import sqrt from sympy.functions.elementary.complexes import im from sympy.matrices import Matrix from sympy.core.numbers import Float from sympy.core.parameters import global_parameters from sympy.core.add import Add from sympy.utilities.iterables import uniq from sympy.utilities.misc import filldedent, func_name, Undecidable from .entity import GeometryEntity class Point(GeometryEntity): """A point in a n-dimensional Euclidean space. Parameters ========== coords : sequence of n-coordinate values. In the special case where n=2 or 3, a Point2D or Point3D will be created as appropriate. evaluate : if `True` (default), all floats are turn into exact types. dim : number of coordinates the point should have. If coordinates are unspecified, they are padded with zeros. on_morph : indicates what should happen when the number of coordinates of a point need to be changed by adding or removing zeros. Possible values are `'warn'`, `'error'`, or `ignore` (default). No warning or error is given when `*args` is empty and `dim` is given. An error is always raised when trying to remove nonzero coordinates. Attributes ========== length origin: A `Point` representing the origin of the appropriately-dimensioned space. Raises ====== TypeError : When instantiating with anything but a Point or sequence ValueError : when instantiating with a sequence with length < 2 or when trying to reduce dimensions if keyword `on_morph='error'` is set. See Also ======== sympy.geometry.line.Segment : Connects two Points Examples ======== >>> from sympy.geometry import Point >>> from sympy.abc import x >>> Point(1, 2, 3) Point3D(1, 2, 3) >>> Point([1, 2]) Point2D(1, 2) >>> Point(0, x) Point2D(0, x) >>> Point(dim=4) Point(0, 0, 0, 0) Floats are automatically converted to Rational unless the evaluate flag is False: >>> Point(0.5, 0.25) Point2D(1/2, 1/4) >>> Point(0.5, 0.25, evaluate=False) Point2D(0.5, 0.25) """ is_Point = True def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) on_morph = kwargs.get('on_morph', 'ignore') # unpack into coords coords = args[0] if len(args) == 1 else args # check args and handle quickly handle Point instances if isinstance(coords, Point): # even if we're mutating the dimension of a point, we # don't reevaluate its coordinates evaluate = False if len(coords) == kwargs.get('dim', len(coords)): return coords if not is_sequence(coords): raise TypeError(filldedent(''' Expecting sequence of coordinates, not `{}`''' .format(func_name(coords)))) # A point where only `dim` is specified is initialized # to zeros. if len(coords) == 0 and kwargs.get('dim', None): coords = (S.Zero,)*kwargs.get('dim') coords = Tuple(*coords) dim = kwargs.get('dim', len(coords)) if len(coords) < 2: raise ValueError(filldedent(''' Point requires 2 or more coordinates or keyword `dim` > 1.''')) if len(coords) != dim: message = ("Dimension of {} needs to be changed " "from {} to {}.").format(coords, len(coords), dim) if on_morph == 'ignore': pass elif on_morph == "error": raise ValueError(message) elif on_morph == 'warn': warnings.warn(message) else: raise ValueError(filldedent(''' on_morph value should be 'error', 'warn' or 'ignore'.''')) if any(coords[dim:]): raise ValueError('Nonzero coordinates cannot be removed.') if any(a.is_number and im(a) for a in coords): raise ValueError('Imaginary coordinates are not permitted.') if not all(isinstance(a, Expr) for a in coords): raise TypeError('Coordinates must be valid SymPy expressions.') # pad with zeros appropriately coords = coords[:dim] + (S.Zero,)*(dim - len(coords)) # Turn any Floats into rationals and simplify # any expressions before we instantiate if evaluate: coords = coords.xreplace(dict( [(f, simplify(nsimplify(f, rational=True))) for f in coords.atoms(Float)])) # return 2D or 3D instances if len(coords) == 2: kwargs['_nocheck'] = True return Point2D(*coords, **kwargs) elif len(coords) == 3: kwargs['_nocheck'] = True return Point3D(*coords, **kwargs) # the general Point return GeometryEntity.__new__(cls, *coords) def __abs__(self): """Returns the distance between this point and the origin.""" origin = Point([0]*len(self)) return Point.distance(origin, self) def __add__(self, other): """Add other to self by incrementing self's coordinates by those of other. Notes ===== >>> from sympy.geometry.point import Point When sequences of coordinates are passed to Point methods, they are converted to a Point internally. This __add__ method does not do that so if floating point values are used, a floating point result (in terms of SymPy Floats) will be returned. >>> Point(1, 2) + (.1, .2) Point2D(1.1, 2.2) If this is not desired, the `translate` method can be used or another Point can be added: >>> Point(1, 2).translate(.1, .2) Point2D(11/10, 11/5) >>> Point(1, 2) + Point(.1, .2) Point2D(11/10, 11/5) See Also ======== sympy.geometry.point.Point.translate """ try: s, o = Point._normalize_dimension(self, Point(other, evaluate=False)) except TypeError: raise GeometryError("Don't know how to add {} and a Point object".format(other)) coords = [simplify(a + b) for a, b in zip(s, o)] return Point(coords, evaluate=False) def __contains__(self, item): return item in self.args def __div__(self, divisor): """Divide point's coordinates by a factor.""" divisor = sympify(divisor) coords = [simplify(x/divisor) for x in self.args] return Point(coords, evaluate=False) def __eq__(self, other): if not isinstance(other, Point) or len(self.args) != len(other.args): return False return self.args == other.args def __getitem__(self, key): return self.args[key] def __hash__(self): return hash(self.args) def __iter__(self): return self.args.__iter__() def __len__(self): return len(self.args) def __mul__(self, factor): """Multiply point's coordinates by a factor. Notes ===== >>> from sympy.geometry.point import Point When multiplying a Point by a floating point number, the coordinates of the Point will be changed to Floats: >>> Point(1, 2)*0.1 Point2D(0.1, 0.2) If this is not desired, the `scale` method can be used or else only multiply or divide by integers: >>> Point(1, 2).scale(1.1, 1.1) Point2D(11/10, 11/5) >>> Point(1, 2)*11/10 Point2D(11/10, 11/5) See Also ======== sympy.geometry.point.Point.scale """ factor = sympify(factor) coords = [simplify(x*factor) for x in self.args] return Point(coords, evaluate=False) def __rmul__(self, factor): """Multiply a factor by point's coordinates.""" return self.__mul__(factor) def __neg__(self): """Negate the point.""" coords = [-x for x in self.args] return Point(coords, evaluate=False) def __sub__(self, other): """Subtract two points, or subtract a factor from this point's coordinates.""" return self + [-x for x in other] @classmethod def _normalize_dimension(cls, *points, **kwargs): """Ensure that points have the same dimension. By default `on_morph='warn'` is passed to the `Point` constructor.""" # if we have a built-in ambient dimension, use it dim = getattr(cls, '_ambient_dimension', None) # override if we specified it dim = kwargs.get('dim', dim) # if no dim was given, use the highest dimensional point if dim is None: dim = max(i.ambient_dimension for i in points) if all(i.ambient_dimension == dim for i in points): return list(points) kwargs['dim'] = dim kwargs['on_morph'] = kwargs.get('on_morph', 'warn') return [Point(i, **kwargs) for i in points] @staticmethod def affine_rank(*args): """The affine rank of a set of points is the dimension of the smallest affine space containing all the points. For example, if the points lie on a line (and are not all the same) their affine rank is 1. If the points lie on a plane but not a line, their affine rank is 2. By convention, the empty set has affine rank -1.""" if len(args) == 0: return -1 # make sure we're genuinely points # and translate every point to the origin points = Point._normalize_dimension(*[Point(i) for i in args]) origin = points[0] points = [i - origin for i in points[1:]] m = Matrix([i.args for i in points]) # XXX fragile -- what is a better way? return m.rank(iszerofunc = lambda x: abs(x.n(2)) < 1e-12 if x.is_number else x.is_zero) @property def ambient_dimension(self): """Number of components this point has.""" return getattr(self, '_ambient_dimension', len(self)) @classmethod def are_coplanar(cls, *points): """Return True if there exists a plane in which all the points lie. A trivial True value is returned if `len(points) < 3` or all Points are 2-dimensional. Parameters ========== A set of points Raises ====== ValueError : if less than 3 unique points are given Returns ======= boolean Examples ======== >>> from sympy import Point3D >>> p1 = Point3D(1, 2, 2) >>> p2 = Point3D(2, 7, 2) >>> p3 = Point3D(0, 0, 2) >>> p4 = Point3D(1, 1, 2) >>> Point3D.are_coplanar(p1, p2, p3, p4) True >>> p5 = Point3D(0, 1, 3) >>> Point3D.are_coplanar(p1, p2, p3, p5) False """ if len(points) <= 1: return True points = cls._normalize_dimension(*[Point(i) for i in points]) # quick exit if we are in 2D if points[0].ambient_dimension == 2: return True points = list(uniq(points)) return Point.affine_rank(*points) <= 2 def distance(self, other): """The Euclidean distance between self and another GeometricEntity. Returns ======= distance : number or symbolic expression. Raises ====== TypeError : if other is not recognized as a GeometricEntity or is a GeometricEntity for which distance is not defined. See Also ======== sympy.geometry.line.Segment.length sympy.geometry.point.Point.taxicab_distance Examples ======== >>> from sympy.geometry import Point, Line >>> p1, p2 = Point(1, 1), Point(4, 5) >>> l = Line((3, 1), (2, 2)) >>> p1.distance(p2) 5 >>> p1.distance(l) sqrt(2) The computed distance may be symbolic, too: >>> from sympy.abc import x, y >>> p3 = Point(x, y) >>> p3.distance((0, 0)) sqrt(x**2 + y**2) """ if not isinstance(other, GeometryEntity): try: other = Point(other, dim=self.ambient_dimension) except TypeError: raise TypeError("not recognized as a GeometricEntity: %s" % type(other)) if isinstance(other, Point): s, p = Point._normalize_dimension(self, Point(other)) return sqrt(Add(*((a - b)**2 for a, b in zip(s, p)))) distance = getattr(other, 'distance', None) if distance is None: raise TypeError("distance between Point and %s is not defined" % type(other)) return distance(self) def dot(self, p): """Return dot product of self with another Point.""" if not is_sequence(p): p = Point(p) # raise the error via Point return Add(*(a*b for a, b in zip(self, p))) def equals(self, other): """Returns whether the coordinates of self and other agree.""" # a point is equal to another point if all its components are equal if not isinstance(other, Point) or len(self) != len(other): return False return all(a.equals(b) for a, b in zip(self, other)) def evalf(self, prec=None, **options): """Evaluate the coordinates of the point. This method will, where possible, create and return a new Point where the coordinates are evaluated as floating point numbers to the precision indicated (default=15). Parameters ========== prec : int Returns ======= point : Point Examples ======== >>> from sympy import Point, Rational >>> p1 = Point(Rational(1, 2), Rational(3, 2)) >>> p1 Point2D(1/2, 3/2) >>> p1.evalf() Point2D(0.5, 1.5) """ coords = [x.evalf(prec, **options) for x in self.args] return Point(*coords, evaluate=False) def intersection(self, other): """The intersection between this point and another GeometryEntity. Parameters ========== other : GeometryEntity or sequence of coordinates Returns ======= intersection : list of Points Notes ===== The return value will either be an empty list if there is no intersection, otherwise it will contain this point. Examples ======== >>> from sympy import Point >>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, 0) >>> p1.intersection(p2) [] >>> p1.intersection(p3) [Point2D(0, 0)] """ if not isinstance(other, GeometryEntity): other = Point(other) if isinstance(other, Point): if self == other: return [self] p1, p2 = Point._normalize_dimension(self, other) if p1 == self and p1 == p2: return [self] return [] return other.intersection(self) def is_collinear(self, *args): """Returns `True` if there exists a line that contains `self` and `points`. Returns `False` otherwise. A trivially True value is returned if no points are given. Parameters ========== args : sequence of Points Returns ======= is_collinear : boolean See Also ======== sympy.geometry.line.Line Examples ======== >>> from sympy import Point >>> from sympy.abc import x >>> p1, p2 = Point(0, 0), Point(1, 1) >>> p3, p4, p5 = Point(2, 2), Point(x, x), Point(1, 2) >>> Point.is_collinear(p1, p2, p3, p4) True >>> Point.is_collinear(p1, p2, p3, p5) False """ points = (self,) + args points = Point._normalize_dimension(*[Point(i) for i in points]) points = list(uniq(points)) return Point.affine_rank(*points) <= 1 def is_concyclic(self, *args): """Do `self` and the given sequence of points lie in a circle? Returns True if the set of points are concyclic and False otherwise. A trivial value of True is returned if there are fewer than 2 other points. Parameters ========== args : sequence of Points Returns ======= is_concyclic : boolean Examples ======== >>> from sympy import Point Define 4 points that are on the unit circle: >>> p1, p2, p3, p4 = Point(1, 0), (0, 1), (-1, 0), (0, -1) >>> p1.is_concyclic() == p1.is_concyclic(p2, p3, p4) == True True Define a point not on that circle: >>> p = Point(1, 1) >>> p.is_concyclic(p1, p2, p3) False """ points = (self,) + args points = Point._normalize_dimension(*[Point(i) for i in points]) points = list(uniq(points)) if not Point.affine_rank(*points) <= 2: return False origin = points[0] points = [p - origin for p in points] # points are concyclic if they are coplanar and # there is a point c so that ||p_i-c|| == ||p_j-c|| for all # i and j. Rearranging this equation gives us the following # condition: the matrix `mat` must not a pivot in the last # column. mat = Matrix([list(i) + [i.dot(i)] for i in points]) rref, pivots = mat.rref() if len(origin) not in pivots: return True return False @property def is_nonzero(self): """True if any coordinate is nonzero, False if every coordinate is zero, and None if it cannot be determined.""" is_zero = self.is_zero if is_zero is None: return None return not is_zero def is_scalar_multiple(self, p): """Returns whether each coordinate of `self` is a scalar multiple of the corresponding coordinate in point p. """ s, o = Point._normalize_dimension(self, Point(p)) # 2d points happen a lot, so optimize this function call if s.ambient_dimension == 2: (x1, y1), (x2, y2) = s.args, o.args rv = (x1*y2 - x2*y1).equals(0) if rv is None: raise Undecidable(filldedent( '''can't determine if %s is a scalar multiple of %s''' % (s, o))) # if the vectors p1 and p2 are linearly dependent, then they must # be scalar multiples of each other m = Matrix([s.args, o.args]) return m.rank() < 2 @property def is_zero(self): """True if every coordinate is zero, False if any coordinate is not zero, and None if it cannot be determined.""" nonzero = [x.is_nonzero for x in self.args] if any(nonzero): return False if any(x is None for x in nonzero): return None return True @property def length(self): """ Treating a Point as a Line, this returns 0 for the length of a Point. Examples ======== >>> from sympy import Point >>> p = Point(0, 1) >>> p.length 0 """ return S.Zero def midpoint(self, p): """The midpoint between self and point p. Parameters ========== p : Point Returns ======= midpoint : Point See Also ======== sympy.geometry.line.Segment.midpoint Examples ======== >>> from sympy.geometry import Point >>> p1, p2 = Point(1, 1), Point(13, 5) >>> p1.midpoint(p2) Point2D(7, 3) """ s, p = Point._normalize_dimension(self, Point(p)) return Point([simplify((a + b)*S.Half) for a, b in zip(s, p)]) @property def origin(self): """A point of all zeros of the same ambient dimension as the current point""" return Point([0]*len(self), evaluate=False) @property def orthogonal_direction(self): """Returns a non-zero point that is orthogonal to the line containing `self` and the origin. Examples ======== >>> from sympy.geometry import Line, Point >>> a = Point(1, 2, 3) >>> a.orthogonal_direction Point3D(-2, 1, 0) >>> b = _ >>> Line(b, b.origin).is_perpendicular(Line(a, a.origin)) True """ dim = self.ambient_dimension # if a coordinate is zero, we can put a 1 there and zeros elsewhere if self[0].is_zero: return Point([1] + (dim - 1)*[0]) if self[1].is_zero: return Point([0,1] + (dim - 2)*[0]) # if the first two coordinates aren't zero, we can create a non-zero # orthogonal vector by swapping them, negating one, and padding with zeros return Point([-self[1], self[0]] + (dim - 2)*[0]) @staticmethod def project(a, b): """Project the point `a` onto the line between the origin and point `b` along the normal direction. Parameters ========== a : Point b : Point Returns ======= p : Point See Also ======== sympy.geometry.line.LinearEntity.projection Examples ======== >>> from sympy.geometry import Line, Point >>> a = Point(1, 2) >>> b = Point(2, 5) >>> z = a.origin >>> p = Point.project(a, b) >>> Line(p, a).is_perpendicular(Line(p, b)) True >>> Point.is_collinear(z, p, b) True """ a, b = Point._normalize_dimension(Point(a), Point(b)) if b.is_zero: raise ValueError("Cannot project to the zero vector.") return b*(a.dot(b) / b.dot(b)) def taxicab_distance(self, p): """The Taxicab Distance from self to point p. Returns the sum of the horizontal and vertical distances to point p. Parameters ========== p : Point Returns ======= taxicab_distance : The sum of the horizontal and vertical distances to point p. See Also ======== sympy.geometry.point.Point.distance Examples ======== >>> from sympy.geometry import Point >>> p1, p2 = Point(1, 1), Point(4, 5) >>> p1.taxicab_distance(p2) 7 """ s, p = Point._normalize_dimension(self, Point(p)) return Add(*(abs(a - b) for a, b in zip(s, p))) def canberra_distance(self, p): """The Canberra Distance from self to point p. Returns the weighted sum of horizontal and vertical distances to point p. Parameters ========== p : Point Returns ======= canberra_distance : The weighted sum of horizontal and vertical distances to point p. The weight used is the sum of absolute values of the coordinates. Examples ======== >>> from sympy.geometry import Point >>> p1, p2 = Point(1, 1), Point(3, 3) >>> p1.canberra_distance(p2) 1 >>> p1, p2 = Point(0, 0), Point(3, 3) >>> p1.canberra_distance(p2) 2 Raises ====== ValueError when both vectors are zero. See Also ======== sympy.geometry.point.Point.distance """ s, p = Point._normalize_dimension(self, Point(p)) if self.is_zero and p.is_zero: raise ValueError("Cannot project to the zero vector.") return Add(*((abs(a - b)/(abs(a) + abs(b))) for a, b in zip(s, p))) @property def unit(self): """Return the Point that is in the same direction as `self` and a distance of 1 from the origin""" return self / abs(self) n = evalf __truediv__ = __div__ class Point2D(Point): """A point in a 2-dimensional Euclidean space. Parameters ========== coords : sequence of 2 coordinate values. Attributes ========== x y length Raises ====== TypeError When trying to add or subtract points with different dimensions. When trying to create a point with more than two dimensions. When `intersection` is called with object other than a Point. See Also ======== sympy.geometry.line.Segment : Connects two Points Examples ======== >>> from sympy.geometry import Point2D >>> from sympy.abc import x >>> Point2D(1, 2) Point2D(1, 2) >>> Point2D([1, 2]) Point2D(1, 2) >>> Point2D(0, x) Point2D(0, x) Floats are automatically converted to Rational unless the evaluate flag is False: >>> Point2D(0.5, 0.25) Point2D(1/2, 1/4) >>> Point2D(0.5, 0.25, evaluate=False) Point2D(0.5, 0.25) """ _ambient_dimension = 2 def __new__(cls, *args, **kwargs): if not kwargs.pop('_nocheck', False): kwargs['dim'] = 2 args = Point(*args, **kwargs) return GeometryEntity.__new__(cls, *args) def __contains__(self, item): return item == self @property def bounds(self): """Return a tuple (xmin, ymin, xmax, ymax) representing the bounding rectangle for the geometric figure. """ return (self.x, self.y, self.x, self.y) def rotate(self, angle, pt=None): """Rotate ``angle`` radians counterclockwise about Point ``pt``. See Also ======== translate, scale Examples ======== >>> from sympy import Point2D, pi >>> t = Point2D(1, 0) >>> t.rotate(pi/2) Point2D(0, 1) >>> t.rotate(pi/2, (2, 0)) Point2D(2, -1) """ from sympy import cos, sin, Point c = cos(angle) s = sin(angle) rv = self if pt is not None: pt = Point(pt, dim=2) rv -= pt x, y = rv.args rv = Point(c*x - s*y, s*x + c*y) if pt is not None: rv += pt return rv def scale(self, x=1, y=1, pt=None): """Scale the coordinates of the Point by multiplying by ``x`` and ``y`` after subtracting ``pt`` -- default is (0, 0) -- and then adding ``pt`` back again (i.e. ``pt`` is the point of reference for the scaling). See Also ======== rotate, translate Examples ======== >>> from sympy import Point2D >>> t = Point2D(1, 1) >>> t.scale(2) Point2D(2, 1) >>> t.scale(2, 2) Point2D(2, 2) """ if pt: pt = Point(pt, dim=2) return self.translate(*(-pt).args).scale(x, y).translate(*pt.args) return Point(self.x*x, self.y*y) def transform(self, matrix): """Return the point after applying the transformation described by the 3x3 Matrix, ``matrix``. See Also ======== sympy.geometry.point.Point2D.rotate sympy.geometry.point.Point2D.scale sympy.geometry.point.Point2D.translate """ if not (matrix.is_Matrix and matrix.shape == (3, 3)): raise ValueError("matrix must be a 3x3 matrix") col, row = matrix.shape x, y = self.args return Point(*(Matrix(1, 3, [x, y, 1])*matrix).tolist()[0][:2]) def translate(self, x=0, y=0): """Shift the Point by adding x and y to the coordinates of the Point. See Also ======== sympy.geometry.point.Point2D.rotate, scale Examples ======== >>> from sympy import Point2D >>> t = Point2D(0, 1) >>> t.translate(2) Point2D(2, 1) >>> t.translate(2, 2) Point2D(2, 3) >>> t + Point2D(2, 2) Point2D(2, 3) """ return Point(self.x + x, self.y + y) @property def x(self): """ Returns the X coordinate of the Point. Examples ======== >>> from sympy import Point2D >>> p = Point2D(0, 1) >>> p.x 0 """ return self.args[0] @property def y(self): """ Returns the Y coordinate of the Point. Examples ======== >>> from sympy import Point2D >>> p = Point2D(0, 1) >>> p.y 1 """ return self.args[1] class Point3D(Point): """A point in a 3-dimensional Euclidean space. Parameters ========== coords : sequence of 3 coordinate values. Attributes ========== x y z length Raises ====== TypeError When trying to add or subtract points with different dimensions. When `intersection` is called with object other than a Point. Examples ======== >>> from sympy import Point3D >>> from sympy.abc import x >>> Point3D(1, 2, 3) Point3D(1, 2, 3) >>> Point3D([1, 2, 3]) Point3D(1, 2, 3) >>> Point3D(0, x, 3) Point3D(0, x, 3) Floats are automatically converted to Rational unless the evaluate flag is False: >>> Point3D(0.5, 0.25, 2) Point3D(1/2, 1/4, 2) >>> Point3D(0.5, 0.25, 3, evaluate=False) Point3D(0.5, 0.25, 3) """ _ambient_dimension = 3 def __new__(cls, *args, **kwargs): if not kwargs.pop('_nocheck', False): kwargs['dim'] = 3 args = Point(*args, **kwargs) return GeometryEntity.__new__(cls, *args) def __contains__(self, item): return item == self @staticmethod def are_collinear(*points): """Is a sequence of points collinear? Test whether or not a set of points are collinear. Returns True if the set of points are collinear, or False otherwise. Parameters ========== points : sequence of Point Returns ======= are_collinear : boolean See Also ======== sympy.geometry.line.Line3D Examples ======== >>> from sympy import Point3D, Matrix >>> from sympy.abc import x >>> p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 1) >>> p3, p4, p5 = Point3D(2, 2, 2), Point3D(x, x, x), Point3D(1, 2, 6) >>> Point3D.are_collinear(p1, p2, p3, p4) True >>> Point3D.are_collinear(p1, p2, p3, p5) False """ return Point.is_collinear(*points) def direction_cosine(self, point): """ Gives the direction cosine between 2 points Parameters ========== p : Point3D Returns ======= list Examples ======== >>> from sympy import Point3D >>> p1 = Point3D(1, 2, 3) >>> p1.direction_cosine(Point3D(2, 3, 5)) [sqrt(6)/6, sqrt(6)/6, sqrt(6)/3] """ a = self.direction_ratio(point) b = sqrt(Add(*(i**2 for i in a))) return [(point.x - self.x) / b,(point.y - self.y) / b, (point.z - self.z) / b] def direction_ratio(self, point): """ Gives the direction ratio between 2 points Parameters ========== p : Point3D Returns ======= list Examples ======== >>> from sympy import Point3D >>> p1 = Point3D(1, 2, 3) >>> p1.direction_ratio(Point3D(2, 3, 5)) [1, 1, 2] """ return [(point.x - self.x),(point.y - self.y),(point.z - self.z)] def intersection(self, other): """The intersection between this point and another GeometryEntity. Parameters ========== other : GeometryEntity or sequence of coordinates Returns ======= intersection : list of Points Notes ===== The return value will either be an empty list if there is no intersection, otherwise it will contain this point. Examples ======== >>> from sympy import Point3D >>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, 0, 0) >>> p1.intersection(p2) [] >>> p1.intersection(p3) [Point3D(0, 0, 0)] """ if not isinstance(other, GeometryEntity): other = Point(other, dim=3) if isinstance(other, Point3D): if self == other: return [self] return [] return other.intersection(self) def scale(self, x=1, y=1, z=1, pt=None): """Scale the coordinates of the Point by multiplying by ``x`` and ``y`` after subtracting ``pt`` -- default is (0, 0) -- and then adding ``pt`` back again (i.e. ``pt`` is the point of reference for the scaling). See Also ======== translate Examples ======== >>> from sympy import Point3D >>> t = Point3D(1, 1, 1) >>> t.scale(2) Point3D(2, 1, 1) >>> t.scale(2, 2) Point3D(2, 2, 1) """ if pt: pt = Point3D(pt) return self.translate(*(-pt).args).scale(x, y, z).translate(*pt.args) return Point3D(self.x*x, self.y*y, self.z*z) def transform(self, matrix): """Return the point after applying the transformation described by the 4x4 Matrix, ``matrix``. See Also ======== sympy.geometry.point.Point3D.scale sympy.geometry.point.Point3D.translate """ if not (matrix.is_Matrix and matrix.shape == (4, 4)): raise ValueError("matrix must be a 4x4 matrix") col, row = matrix.shape from sympy.matrices.expressions import Transpose x, y, z = self.args m = Transpose(matrix) return Point3D(*(Matrix(1, 4, [x, y, z, 1])*m).tolist()[0][:3]) def translate(self, x=0, y=0, z=0): """Shift the Point by adding x and y to the coordinates of the Point. See Also ======== scale Examples ======== >>> from sympy import Point3D >>> t = Point3D(0, 1, 1) >>> t.translate(2) Point3D(2, 1, 1) >>> t.translate(2, 2) Point3D(2, 3, 1) >>> t + Point3D(2, 2, 2) Point3D(2, 3, 3) """ return Point3D(self.x + x, self.y + y, self.z + z) @property def x(self): """ Returns the X coordinate of the Point. Examples ======== >>> from sympy import Point3D >>> p = Point3D(0, 1, 3) >>> p.x 0 """ return self.args[0] @property def y(self): """ Returns the Y coordinate of the Point. Examples ======== >>> from sympy import Point3D >>> p = Point3D(0, 1, 2) >>> p.y 1 """ return self.args[1] @property def z(self): """ Returns the Z coordinate of the Point. Examples ======== >>> from sympy import Point3D >>> p = Point3D(0, 1, 1) >>> p.z 1 """ return self.args[2]
e07010923664d7dba79c348182b5a019a9180387cadbadf8c99b0b1e70c1e8d4
from __future__ import print_function, division from collections import defaultdict import inspect from sympy.core.basic import Basic from sympy.core.compatibility import (iterable, with_metaclass, ordered, range, PY3, reduce) from sympy.core.cache import cacheit from sympy.core.containers import Tuple from sympy.core.decorators import (deprecated, sympify_method_args, sympify_return) from sympy.core.evalf import EvalfMixin from sympy.core.parameters import global_parameters from sympy.core.expr import Expr from sympy.core.logic import fuzzy_bool, fuzzy_or, fuzzy_and, fuzzy_not from sympy.core.numbers import Float from sympy.core.operations import LatticeOp from sympy.core.relational import Eq, Ne from sympy.core.singleton import Singleton, S from sympy.core.symbol import Symbol, Dummy, _uniquely_named_symbol from sympy.core.sympify import _sympify, sympify, converter from sympy.logic.boolalg import And, Or, Not, Xor, true, false from sympy.sets.contains import Contains from sympy.utilities import subsets from sympy.utilities.exceptions import SymPyDeprecationWarning from sympy.utilities.iterables import iproduct, sift, roundrobin from sympy.utilities.misc import func_name, filldedent from mpmath import mpi, mpf tfn = defaultdict(lambda: None, { True: S.true, S.true: S.true, False: S.false, S.false: S.false}) @sympify_method_args class Set(Basic): """ The base class for any kind of set. This is not meant to be used directly as a container of items. It does not behave like the builtin ``set``; see :class:`FiniteSet` for that. Real intervals are represented by the :class:`Interval` class and unions of sets by the :class:`Union` class. The empty set is represented by the :class:`EmptySet` class and available as a singleton as ``S.EmptySet``. """ is_number = False is_iterable = False is_interval = False is_FiniteSet = False is_Interval = False is_ProductSet = False is_Union = False is_Intersection = None is_UniversalSet = None is_Complement = None is_ComplexRegion = False is_empty = None is_finite_set = None @property @deprecated(useinstead="is S.EmptySet or is_empty", issue=16946, deprecated_since_version="1.5") def is_EmptySet(self): return None @staticmethod def _infimum_key(expr): """ Return infimum (if possible) else S.Infinity. """ try: infimum = expr.inf assert infimum.is_comparable except (NotImplementedError, AttributeError, AssertionError, ValueError): infimum = S.Infinity return infimum def union(self, other): """ Returns the union of 'self' and 'other'. Examples ======== As a shortcut it is possible to use the '+' operator: >>> from sympy import Interval, FiniteSet >>> Interval(0, 1).union(Interval(2, 3)) Union(Interval(0, 1), Interval(2, 3)) >>> Interval(0, 1) + Interval(2, 3) Union(Interval(0, 1), Interval(2, 3)) >>> Interval(1, 2, True, True) + FiniteSet(2, 3) Union(FiniteSet(3), Interval.Lopen(1, 2)) Similarly it is possible to use the '-' operator for set differences: >>> Interval(0, 2) - Interval(0, 1) Interval.Lopen(1, 2) >>> Interval(1, 3) - FiniteSet(2) Union(Interval.Ropen(1, 2), Interval.Lopen(2, 3)) """ return Union(self, other) def intersect(self, other): """ Returns the intersection of 'self' and 'other'. >>> from sympy import Interval >>> Interval(1, 3).intersect(Interval(1, 2)) Interval(1, 2) >>> from sympy import imageset, Lambda, symbols, S >>> n, m = symbols('n m') >>> a = imageset(Lambda(n, 2*n), S.Integers) >>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers)) EmptySet """ return Intersection(self, other) def intersection(self, other): """ Alias for :meth:`intersect()` """ return self.intersect(other) def is_disjoint(self, other): """ Returns True if 'self' and 'other' are disjoint Examples ======== >>> from sympy import Interval >>> Interval(0, 2).is_disjoint(Interval(1, 2)) False >>> Interval(0, 2).is_disjoint(Interval(3, 4)) True References ========== .. [1] https://en.wikipedia.org/wiki/Disjoint_sets """ return self.intersect(other) == S.EmptySet def isdisjoint(self, other): """ Alias for :meth:`is_disjoint()` """ return self.is_disjoint(other) def complement(self, universe): r""" The complement of 'self' w.r.t the given universe. Examples ======== >>> from sympy import Interval, S >>> Interval(0, 1).complement(S.Reals) Union(Interval.open(-oo, 0), Interval.open(1, oo)) >>> Interval(0, 1).complement(S.UniversalSet) Complement(UniversalSet, Interval(0, 1)) """ return Complement(universe, self) def _complement(self, other): # this behaves as other - self if isinstance(self, ProductSet) and isinstance(other, ProductSet): # If self and other are disjoint then other - self == self if len(self.sets) != len(other.sets): return other # There can be other ways to represent this but this gives: # (A x B) - (C x D) = ((A - C) x B) U (A x (B - D)) overlaps = [] pairs = list(zip(self.sets, other.sets)) for n in range(len(pairs)): sets = (o if i != n else o-s for i, (s, o) in enumerate(pairs)) overlaps.append(ProductSet(*sets)) return Union(*overlaps) elif isinstance(other, Interval): if isinstance(self, Interval) or isinstance(self, FiniteSet): return Intersection(other, self.complement(S.Reals)) elif isinstance(other, Union): return Union(*(o - self for o in other.args)) elif isinstance(other, Complement): return Complement(other.args[0], Union(other.args[1], self), evaluate=False) elif isinstance(other, EmptySet): return S.EmptySet elif isinstance(other, FiniteSet): from sympy.utilities.iterables import sift sifted = sift(other, lambda x: fuzzy_bool(self.contains(x))) # ignore those that are contained in self return Union(FiniteSet(*(sifted[False])), Complement(FiniteSet(*(sifted[None])), self, evaluate=False) if sifted[None] else S.EmptySet) def symmetric_difference(self, other): """ Returns symmetric difference of `self` and `other`. Examples ======== >>> from sympy import Interval, S >>> Interval(1, 3).symmetric_difference(S.Reals) Union(Interval.open(-oo, 1), Interval.open(3, oo)) >>> Interval(1, 10).symmetric_difference(S.Reals) Union(Interval.open(-oo, 1), Interval.open(10, oo)) >>> from sympy import S, EmptySet >>> S.Reals.symmetric_difference(EmptySet) Reals References ========== .. [1] https://en.wikipedia.org/wiki/Symmetric_difference """ return SymmetricDifference(self, other) def _symmetric_difference(self, other): return Union(Complement(self, other), Complement(other, self)) @property def inf(self): """ The infimum of 'self' Examples ======== >>> from sympy import Interval, Union >>> Interval(0, 1).inf 0 >>> Union(Interval(0, 1), Interval(2, 3)).inf 0 """ return self._inf @property def _inf(self): raise NotImplementedError("(%s)._inf" % self) @property def sup(self): """ The supremum of 'self' Examples ======== >>> from sympy import Interval, Union >>> Interval(0, 1).sup 1 >>> Union(Interval(0, 1), Interval(2, 3)).sup 3 """ return self._sup @property def _sup(self): raise NotImplementedError("(%s)._sup" % self) def contains(self, other): """ Returns a SymPy value indicating whether ``other`` is contained in ``self``: ``true`` if it is, ``false`` if it isn't, else an unevaluated ``Contains`` expression (or, as in the case of ConditionSet and a union of FiniteSet/Intervals, an expression indicating the conditions for containment). Examples ======== >>> from sympy import Interval, S >>> from sympy.abc import x >>> Interval(0, 1).contains(0.5) True As a shortcut it is possible to use the 'in' operator, but that will raise an error unless an affirmative true or false is not obtained. >>> Interval(0, 1).contains(x) (0 <= x) & (x <= 1) >>> x in Interval(0, 1) Traceback (most recent call last): ... TypeError: did not evaluate to a bool: None The result of 'in' is a bool, not a SymPy value >>> 1 in Interval(0, 2) True >>> _ is S.true False """ other = sympify(other, strict=True) c = self._contains(other) if c is None: return Contains(other, self, evaluate=False) b = tfn[c] if b is None: return c return b def _contains(self, other): raise NotImplementedError(filldedent(''' (%s)._contains(%s) is not defined. This method, when defined, will receive a sympified object. The method should return True, False, None or something that expresses what must be true for the containment of that object in self to be evaluated. If None is returned then a generic Contains object will be returned by the ``contains`` method.''' % (self, other))) def is_subset(self, other): """ Returns True if 'self' is a subset of 'other'. Examples ======== >>> from sympy import Interval >>> Interval(0, 0.5).is_subset(Interval(0, 1)) True >>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True)) False """ if not isinstance(other, Set): raise ValueError("Unknown argument '%s'" % other) # Handle the trivial cases if self == other: return True is_empty = self.is_empty if is_empty is True: return True elif fuzzy_not(is_empty) and other.is_empty: return False if self.is_finite_set is False and other.is_finite_set: return False # Dispatch on subclass rules ret = self._eval_is_subset(other) if ret is not None: return ret ret = other._eval_is_superset(self) if ret is not None: return ret # Use pairwise rules from multiple dispatch from sympy.sets.handlers.issubset import is_subset_sets ret = is_subset_sets(self, other) if ret is not None: return ret # Fall back on computing the intersection # XXX: We shouldn't do this. A query like this should be handled # without evaluating new Set objects. It should be the other way round # so that the intersect method uses is_subset for evaluation. if self.intersect(other) == self: return True def _eval_is_subset(self, other): '''Returns a fuzzy bool for whether self is a subset of other.''' return None def _eval_is_superset(self, other): '''Returns a fuzzy bool for whether self is a subset of other.''' return None # This should be deprecated: def issubset(self, other): """ Alias for :meth:`is_subset()` """ return self.is_subset(other) def is_proper_subset(self, other): """ Returns True if 'self' is a proper subset of 'other'. Examples ======== >>> from sympy import Interval >>> Interval(0, 0.5).is_proper_subset(Interval(0, 1)) True >>> Interval(0, 1).is_proper_subset(Interval(0, 1)) False """ if isinstance(other, Set): return self != other and self.is_subset(other) else: raise ValueError("Unknown argument '%s'" % other) def is_superset(self, other): """ Returns True if 'self' is a superset of 'other'. Examples ======== >>> from sympy import Interval >>> Interval(0, 0.5).is_superset(Interval(0, 1)) False >>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True)) True """ if isinstance(other, Set): return other.is_subset(self) else: raise ValueError("Unknown argument '%s'" % other) # This should be deprecated: def issuperset(self, other): """ Alias for :meth:`is_superset()` """ return self.is_superset(other) def is_proper_superset(self, other): """ Returns True if 'self' is a proper superset of 'other'. Examples ======== >>> from sympy import Interval >>> Interval(0, 1).is_proper_superset(Interval(0, 0.5)) True >>> Interval(0, 1).is_proper_superset(Interval(0, 1)) False """ if isinstance(other, Set): return self != other and self.is_superset(other) else: raise ValueError("Unknown argument '%s'" % other) def _eval_powerset(self): from .powerset import PowerSet return PowerSet(self) def powerset(self): """ Find the Power set of 'self'. Examples ======== >>> from sympy import EmptySet, FiniteSet, Interval, PowerSet A power set of an empty set: >>> from sympy import FiniteSet, EmptySet >>> A = EmptySet >>> A.powerset() FiniteSet(EmptySet) A power set of a finite set: >>> A = FiniteSet(1, 2) >>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2) >>> A.powerset() == FiniteSet(a, b, c, EmptySet) True A power set of an interval: >>> Interval(1, 2).powerset() PowerSet(Interval(1, 2)) References ========== .. [1] https://en.wikipedia.org/wiki/Power_set """ return self._eval_powerset() @property def measure(self): """ The (Lebesgue) measure of 'self' Examples ======== >>> from sympy import Interval, Union >>> Interval(0, 1).measure 1 >>> Union(Interval(0, 1), Interval(2, 3)).measure 2 """ return self._measure @property def boundary(self): """ The boundary or frontier of a set A point x is on the boundary of a set S if 1. x is in the closure of S. I.e. Every neighborhood of x contains a point in S. 2. x is not in the interior of S. I.e. There does not exist an open set centered on x contained entirely within S. There are the points on the outer rim of S. If S is open then these points need not actually be contained within S. For example, the boundary of an interval is its start and end points. This is true regardless of whether or not the interval is open. Examples ======== >>> from sympy import Interval >>> Interval(0, 1).boundary FiniteSet(0, 1) >>> Interval(0, 1, True, False).boundary FiniteSet(0, 1) """ return self._boundary @property def is_open(self): """ Property method to check whether a set is open. A set is open if and only if it has an empty intersection with its boundary. In particular, a subset A of the reals is open if and only if each one of its points is contained in an open interval that is a subset of A. Examples ======== >>> from sympy import S >>> S.Reals.is_open True >>> S.Rationals.is_open False """ return Intersection(self, self.boundary).is_empty @property def is_closed(self): """ A property method to check whether a set is closed. A set is closed if its complement is an open set. The closedness of a subset of the reals is determined with respect to R and its standard topology. Examples ======== >>> from sympy import Interval >>> Interval(0, 1).is_closed True """ return self.boundary.is_subset(self) @property def closure(self): """ Property method which returns the closure of a set. The closure is defined as the union of the set itself and its boundary. Examples ======== >>> from sympy import S, Interval >>> S.Reals.closure Reals >>> Interval(0, 1).closure Interval(0, 1) """ return self + self.boundary @property def interior(self): """ Property method which returns the interior of a set. The interior of a set S consists all points of S that do not belong to the boundary of S. Examples ======== >>> from sympy import Interval >>> Interval(0, 1).interior Interval.open(0, 1) >>> Interval(0, 1).boundary.interior EmptySet """ return self - self.boundary @property def _boundary(self): raise NotImplementedError() @property def _measure(self): raise NotImplementedError("(%s)._measure" % self) @sympify_return([('other', 'Set')], NotImplemented) def __add__(self, other): return self.union(other) @sympify_return([('other', 'Set')], NotImplemented) def __or__(self, other): return self.union(other) @sympify_return([('other', 'Set')], NotImplemented) def __and__(self, other): return self.intersect(other) @sympify_return([('other', 'Set')], NotImplemented) def __mul__(self, other): return ProductSet(self, other) @sympify_return([('other', 'Set')], NotImplemented) def __xor__(self, other): return SymmetricDifference(self, other) @sympify_return([('exp', Expr)], NotImplemented) def __pow__(self, exp): if not (exp.is_Integer and exp >= 0): raise ValueError("%s: Exponent must be a positive Integer" % exp) return ProductSet(*[self]*exp) @sympify_return([('other', 'Set')], NotImplemented) def __sub__(self, other): return Complement(self, other) def __contains__(self, other): other = _sympify(other) c = self._contains(other) b = tfn[c] if b is None: raise TypeError('did not evaluate to a bool: %r' % c) return b class ProductSet(Set): """ Represents a Cartesian Product of Sets. Returns a Cartesian product given several sets as either an iterable or individual arguments. Can use '*' operator on any sets for convenient shorthand. Examples ======== >>> from sympy import Interval, FiniteSet, ProductSet >>> I = Interval(0, 5); S = FiniteSet(1, 2, 3) >>> ProductSet(I, S) ProductSet(Interval(0, 5), FiniteSet(1, 2, 3)) >>> (2, 2) in ProductSet(I, S) True >>> Interval(0, 1) * Interval(0, 1) # The unit square ProductSet(Interval(0, 1), Interval(0, 1)) >>> coin = FiniteSet('H', 'T') >>> set(coin**2) {(H, H), (H, T), (T, H), (T, T)} The Cartesian product is not commutative or associative e.g.: >>> I*S == S*I False >>> (I*I)*I == I*(I*I) False Notes ===== - Passes most operations down to the argument sets References ========== .. [1] https://en.wikipedia.org/wiki/Cartesian_product """ is_ProductSet = True def __new__(cls, *sets, **assumptions): if len(sets) == 1 and iterable(sets[0]) and not isinstance(sets[0], (Set, set)): SymPyDeprecationWarning( feature="ProductSet(iterable)", useinstead="ProductSet(*iterable)", issue=17557, deprecated_since_version="1.5" ).warn() sets = tuple(sets[0]) sets = [sympify(s) for s in sets] if not all(isinstance(s, Set) for s in sets): raise TypeError("Arguments to ProductSet should be of type Set") # Nullary product of sets is *not* the empty set if len(sets) == 0: return FiniteSet(()) if S.EmptySet in sets: return S.EmptySet return Basic.__new__(cls, *sets, **assumptions) @property def sets(self): return self.args def flatten(self): def _flatten(sets): for s in sets: if s.is_ProductSet: for s2 in _flatten(s.sets): yield s2 else: yield s return ProductSet(*_flatten(self.sets)) def _eval_Eq(self, other): if not other.is_ProductSet: return if len(self.sets) != len(other.sets): return false eqs = (Eq(x, y) for x, y in zip(self.sets, other.sets)) return tfn[fuzzy_and(map(fuzzy_bool, eqs))] def _contains(self, element): """ 'in' operator for ProductSets Examples ======== >>> from sympy import Interval >>> (2, 3) in Interval(0, 5) * Interval(0, 5) True >>> (10, 10) in Interval(0, 5) * Interval(0, 5) False Passes operation on to constituent sets """ if element.is_Symbol: return None if not isinstance(element, Tuple) or len(element) != len(self.sets): return False return fuzzy_and(s._contains(e) for s, e in zip(self.sets, element)) def as_relational(self, *symbols): symbols = [_sympify(s) for s in symbols] if len(symbols) != len(self.sets) or not all( i.is_Symbol for i in symbols): raise ValueError( 'number of symbols must match the number of sets') return And(*[s.as_relational(i) for s, i in zip(self.sets, symbols)]) @property def _boundary(self): return Union(*(ProductSet(*(b + b.boundary if i != j else b.boundary for j, b in enumerate(self.sets))) for i, a in enumerate(self.sets))) @property def is_iterable(self): """ A property method which tests whether a set is iterable or not. Returns True if set is iterable, otherwise returns False. Examples ======== >>> from sympy import FiniteSet, Interval, ProductSet >>> I = Interval(0, 1) >>> A = FiniteSet(1, 2, 3, 4, 5) >>> I.is_iterable False >>> A.is_iterable True """ return all(set.is_iterable for set in self.sets) def __iter__(self): """ A method which implements is_iterable property method. If self.is_iterable returns True (both constituent sets are iterable), then return the Cartesian Product. Otherwise, raise TypeError. """ return iproduct(*self.sets) @property def is_empty(self): return fuzzy_or(s.is_empty for s in self.sets) @property def is_finite_set(self): all_finite = fuzzy_and(s.is_finite_set for s in self.sets) return fuzzy_or([self.is_empty, all_finite]) @property def _measure(self): measure = 1 for s in self.sets: measure *= s.measure return measure def __len__(self): return reduce(lambda a, b: a*b, (len(s) for s in self.args)) def __bool__(self): return all([bool(s) for s in self.sets]) __nonzero__ = __bool__ class Interval(Set, EvalfMixin): """ Represents a real interval as a Set. Usage: Returns an interval with end points "start" and "end". For left_open=True (default left_open is False) the interval will be open on the left. Similarly, for right_open=True the interval will be open on the right. Examples ======== >>> from sympy import Symbol, Interval >>> Interval(0, 1) Interval(0, 1) >>> Interval.Ropen(0, 1) Interval.Ropen(0, 1) >>> Interval.Ropen(0, 1) Interval.Ropen(0, 1) >>> Interval.Lopen(0, 1) Interval.Lopen(0, 1) >>> Interval.open(0, 1) Interval.open(0, 1) >>> a = Symbol('a', real=True) >>> Interval(0, a) Interval(0, a) Notes ===== - Only real end points are supported - Interval(a, b) with a > b will return the empty set - Use the evalf() method to turn an Interval into an mpmath 'mpi' interval instance References ========== .. [1] https://en.wikipedia.org/wiki/Interval_%28mathematics%29 """ is_Interval = True def __new__(cls, start, end, left_open=False, right_open=False): start = _sympify(start) end = _sympify(end) left_open = _sympify(left_open) right_open = _sympify(right_open) if not all(isinstance(a, (type(true), type(false))) for a in [left_open, right_open]): raise NotImplementedError( "left_open and right_open can have only true/false values, " "got %s and %s" % (left_open, right_open)) inftys = [S.Infinity, S.NegativeInfinity] # Only allow real intervals (use symbols with 'is_extended_real=True'). if not all(i.is_extended_real is not False or i in inftys for i in (start, end)): raise ValueError("Non-real intervals are not supported") # evaluate if possible if (end < start) == True: return S.EmptySet elif (end - start).is_negative: return S.EmptySet if end == start and (left_open or right_open): return S.EmptySet if end == start and not (left_open or right_open): if start is S.Infinity or start is S.NegativeInfinity: return S.EmptySet return FiniteSet(end) # Make sure infinite interval end points are open. if start is S.NegativeInfinity: left_open = true if end is S.Infinity: right_open = true if start == S.Infinity or end == S.NegativeInfinity: return S.EmptySet return Basic.__new__(cls, start, end, left_open, right_open) @property def start(self): """ The left end point of 'self'. This property takes the same value as the 'inf' property. Examples ======== >>> from sympy import Interval >>> Interval(0, 1).start 0 """ return self._args[0] _inf = left = start @classmethod def open(cls, a, b): """Return an interval including neither boundary.""" return cls(a, b, True, True) @classmethod def Lopen(cls, a, b): """Return an interval not including the left boundary.""" return cls(a, b, True, False) @classmethod def Ropen(cls, a, b): """Return an interval not including the right boundary.""" return cls(a, b, False, True) @property def end(self): """ The right end point of 'self'. This property takes the same value as the 'sup' property. Examples ======== >>> from sympy import Interval >>> Interval(0, 1).end 1 """ return self._args[1] _sup = right = end @property def left_open(self): """ True if 'self' is left-open. Examples ======== >>> from sympy import Interval >>> Interval(0, 1, left_open=True).left_open True >>> Interval(0, 1, left_open=False).left_open False """ return self._args[2] @property def right_open(self): """ True if 'self' is right-open. Examples ======== >>> from sympy import Interval >>> Interval(0, 1, right_open=True).right_open True >>> Interval(0, 1, right_open=False).right_open False """ return self._args[3] @property def is_empty(self): if self.left_open or self.right_open: cond = self.start >= self.end # One/both bounds open else: cond = self.start > self.end # Both bounds closed return fuzzy_bool(cond) @property def is_finite_set(self): return self.measure.is_zero def _complement(self, other): if other == S.Reals: a = Interval(S.NegativeInfinity, self.start, True, not self.left_open) b = Interval(self.end, S.Infinity, not self.right_open, True) return Union(a, b) if isinstance(other, FiniteSet): nums = [m for m in other.args if m.is_number] if nums == []: return None return Set._complement(self, other) @property def _boundary(self): finite_points = [p for p in (self.start, self.end) if abs(p) != S.Infinity] return FiniteSet(*finite_points) def _contains(self, other): if not isinstance(other, Expr) or ( other is S.Infinity or other is S.NegativeInfinity or other is S.NaN or other is S.ComplexInfinity) or other.is_extended_real is False: return false if self.start is S.NegativeInfinity and self.end is S.Infinity: if not other.is_extended_real is None: return other.is_extended_real d = Dummy() return self.as_relational(d).subs(d, other) def as_relational(self, x): """Rewrite an interval in terms of inequalities and logic operators.""" x = sympify(x) if self.right_open: right = x < self.end else: right = x <= self.end if self.left_open: left = self.start < x else: left = self.start <= x return And(left, right) @property def _measure(self): return self.end - self.start def to_mpi(self, prec=53): return mpi(mpf(self.start._eval_evalf(prec)), mpf(self.end._eval_evalf(prec))) def _eval_evalf(self, prec): return Interval(self.left._eval_evalf(prec), self.right._eval_evalf(prec), left_open=self.left_open, right_open=self.right_open) def _is_comparable(self, other): is_comparable = self.start.is_comparable is_comparable &= self.end.is_comparable is_comparable &= other.start.is_comparable is_comparable &= other.end.is_comparable return is_comparable @property def is_left_unbounded(self): """Return ``True`` if the left endpoint is negative infinity. """ return self.left is S.NegativeInfinity or self.left == Float("-inf") @property def is_right_unbounded(self): """Return ``True`` if the right endpoint is positive infinity. """ return self.right is S.Infinity or self.right == Float("+inf") def _eval_Eq(self, other): if not isinstance(other, Interval): if isinstance(other, FiniteSet): return false elif isinstance(other, Set): return None return false return And(Eq(self.left, other.left), Eq(self.right, other.right), self.left_open == other.left_open, self.right_open == other.right_open) class Union(Set, LatticeOp, EvalfMixin): """ Represents a union of sets as a :class:`Set`. Examples ======== >>> from sympy import Union, Interval >>> Union(Interval(1, 2), Interval(3, 4)) Union(Interval(1, 2), Interval(3, 4)) The Union constructor will always try to merge overlapping intervals, if possible. For example: >>> Union(Interval(1, 2), Interval(2, 3)) Interval(1, 3) See Also ======== Intersection References ========== .. [1] https://en.wikipedia.org/wiki/Union_%28set_theory%29 """ is_Union = True @property def identity(self): return S.EmptySet @property def zero(self): return S.UniversalSet def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) # flatten inputs to merge intersections and iterables args = _sympify(args) # Reduce sets using known rules if evaluate: args = list(cls._new_args_filter(args)) return simplify_union(args) args = list(ordered(args, Set._infimum_key)) obj = Basic.__new__(cls, *args) obj._argset = frozenset(args) return obj @property @cacheit def args(self): return self._args def _complement(self, universe): # DeMorgan's Law return Intersection(s.complement(universe) for s in self.args) @property def _inf(self): # We use Min so that sup is meaningful in combination with symbolic # interval end points. from sympy.functions.elementary.miscellaneous import Min return Min(*[set.inf for set in self.args]) @property def _sup(self): # We use Max so that sup is meaningful in combination with symbolic # end points. from sympy.functions.elementary.miscellaneous import Max return Max(*[set.sup for set in self.args]) @property def is_empty(self): return fuzzy_and(set.is_empty for set in self.args) @property def is_finite_set(self): return fuzzy_and(set.is_finite_set for set in self.args) @property def _measure(self): # Measure of a union is the sum of the measures of the sets minus # the sum of their pairwise intersections plus the sum of their # triple-wise intersections minus ... etc... # Sets is a collection of intersections and a set of elementary # sets which made up those intersections (called "sos" for set of sets) # An example element might of this list might be: # ( {A,B,C}, A.intersect(B).intersect(C) ) # Start with just elementary sets ( ({A}, A), ({B}, B), ... ) # Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero sets = [(FiniteSet(s), s) for s in self.args] measure = 0 parity = 1 while sets: # Add up the measure of these sets and add or subtract it to total measure += parity * sum(inter.measure for sos, inter in sets) # For each intersection in sets, compute the intersection with every # other set not already part of the intersection. sets = ((sos + FiniteSet(newset), newset.intersect(intersection)) for sos, intersection in sets for newset in self.args if newset not in sos) # Clear out sets with no measure sets = [(sos, inter) for sos, inter in sets if inter.measure != 0] # Clear out duplicates sos_list = [] sets_list = [] for set in sets: if set[0] in sos_list: continue else: sos_list.append(set[0]) sets_list.append(set) sets = sets_list # Flip Parity - next time subtract/add if we added/subtracted here parity *= -1 return measure @property def _boundary(self): def boundary_of_set(i): """ The boundary of set i minus interior of all other sets """ b = self.args[i].boundary for j, a in enumerate(self.args): if j != i: b = b - a.interior return b return Union(*map(boundary_of_set, range(len(self.args)))) def _contains(self, other): return Or(*[s.contains(other) for s in self.args]) def is_subset(self, other): return fuzzy_and(s.is_subset(other) for s in self.args) def as_relational(self, symbol): """Rewrite a Union in terms of equalities and logic operators. """ if all(isinstance(i, (FiniteSet, Interval)) for i in self.args): if len(self.args) == 2: a, b = self.args if (a.sup == b.inf and a.inf is S.NegativeInfinity and b.sup is S.Infinity): return And(Ne(symbol, a.sup), symbol < b.sup, symbol > a.inf) return Or(*[set.as_relational(symbol) for set in self.args]) raise NotImplementedError('relational of Union with non-Intervals') @property def is_iterable(self): return all(arg.is_iterable for arg in self.args) def _eval_evalf(self, prec): try: return Union(*(set._eval_evalf(prec) for set in self.args)) except (TypeError, ValueError, NotImplementedError): import sys raise (TypeError("Not all sets are evalf-able"), None, sys.exc_info()[2]) def __iter__(self): return roundrobin(*(iter(arg) for arg in self.args)) class Intersection(Set, LatticeOp): """ Represents an intersection of sets as a :class:`Set`. Examples ======== >>> from sympy import Intersection, Interval >>> Intersection(Interval(1, 3), Interval(2, 4)) Interval(2, 3) We often use the .intersect method >>> Interval(1,3).intersect(Interval(2,4)) Interval(2, 3) See Also ======== Union References ========== .. [1] https://en.wikipedia.org/wiki/Intersection_%28set_theory%29 """ is_Intersection = True @property def identity(self): return S.UniversalSet @property def zero(self): return S.EmptySet def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) # flatten inputs to merge intersections and iterables args = list(ordered(set(_sympify(args)))) # Reduce sets using known rules if evaluate: args = list(cls._new_args_filter(args)) return simplify_intersection(args) args = list(ordered(args, Set._infimum_key)) obj = Basic.__new__(cls, *args) obj._argset = frozenset(args) return obj @property @cacheit def args(self): return self._args @property def is_iterable(self): return any(arg.is_iterable for arg in self.args) @property def is_finite_set(self): if fuzzy_or(arg.is_finite_set for arg in self.args): return True @property def _inf(self): raise NotImplementedError() @property def _sup(self): raise NotImplementedError() def _contains(self, other): return And(*[set.contains(other) for set in self.args]) def __iter__(self): sets_sift = sift(self.args, lambda x: x.is_iterable) completed = False candidates = sets_sift[True] + sets_sift[None] finite_candidates, others = [], [] for candidate in candidates: length = None try: length = len(candidate) except TypeError: others.append(candidate) if length is not None: finite_candidates.append(candidate) finite_candidates.sort(key=len) for s in finite_candidates + others: other_sets = set(self.args) - set((s,)) other = Intersection(*other_sets, evaluate=False) completed = True for x in s: try: if x in other: yield x except TypeError: completed = False if completed: return if not completed: if not candidates: raise TypeError("None of the constituent sets are iterable") raise TypeError( "The computation had not completed because of the " "undecidable set membership is found in every candidates.") @staticmethod def _handle_finite_sets(args): '''Simplify intersection of one or more FiniteSets and other sets''' # First separate the FiniteSets from the others fs_args, others = sift(args, lambda x: x.is_FiniteSet, binary=True) # Let the caller handle intersection of non-FiniteSets if not fs_args: return # Convert to Python sets and build the set of all elements fs_sets = [set(fs) for fs in fs_args] all_elements = reduce(lambda a, b: a | b, fs_sets, set()) # Extract elements that are definitely in or definitely not in the # intersection. Here we check contains for all of args. definite = set() for e in all_elements: inall = fuzzy_and(s.contains(e) for s in args) if inall is True: definite.add(e) if inall is not None: for s in fs_sets: s.discard(e) # At this point all elements in all of fs_sets are possibly in the # intersection. In some cases this is because they are definitely in # the intersection of the finite sets but it's not clear if they are # members of others. We might have {m, n}, {m}, and Reals where we # don't know if m or n is real. We want to remove n here but it is # possibly in because it might be equal to m. So what we do now is # extract the elements that are definitely in the remaining finite # sets iteratively until we end up with {n}, {}. At that point if we # get any empty set all remaining elements are discarded. fs_elements = reduce(lambda a, b: a | b, fs_sets, set()) # Need fuzzy containment testing fs_symsets = [FiniteSet(*s) for s in fs_sets] while fs_elements: for e in fs_elements: infs = fuzzy_and(s.contains(e) for s in fs_symsets) if infs is True: definite.add(e) if infs is not None: for n, s in enumerate(fs_sets): # Update Python set and FiniteSet if e in s: s.remove(e) fs_symsets[n] = FiniteSet(*s) fs_elements.remove(e) break # If we completed the for loop without removing anything we are # done so quit the outer while loop else: break # If any of the sets of remainder elements is empty then we discard # all of them for the intersection. if not all(fs_sets): fs_sets = [set()] # Here we fold back the definitely included elements into each fs. # Since they are definitely included they must have been members of # each FiniteSet to begin with. We could instead fold these in with a # Union at the end to get e.g. {3}|({x}&{y}) rather than {3,x}&{3,y}. if definite: fs_sets = [fs | definite for fs in fs_sets] if fs_sets == [set()]: return S.EmptySet sets = [FiniteSet(*s) for s in fs_sets] # Any set in others is redundant if it contains all the elements that # are in the finite sets so we don't need it in the Intersection all_elements = reduce(lambda a, b: a | b, fs_sets, set()) is_redundant = lambda o: all(fuzzy_bool(o.contains(e)) for e in all_elements) others = [o for o in others if not is_redundant(o)] if others: rest = Intersection(*others) # XXX: Maybe this shortcut should be at the beginning. For large # FiniteSets it could much more efficient to process the other # sets first... if rest is S.EmptySet: return S.EmptySet # Flatten the Intersection if rest.is_Intersection: sets.extend(rest.args) else: sets.append(rest) if len(sets) == 1: return sets[0] else: return Intersection(*sets, evaluate=False) def as_relational(self, symbol): """Rewrite an Intersection in terms of equalities and logic operators""" return And(*[set.as_relational(symbol) for set in self.args]) class Complement(Set, EvalfMixin): r"""Represents the set difference or relative complement of a set with another set. `A - B = \{x \in A \mid x \notin B\}` Examples ======== >>> from sympy import Complement, FiniteSet >>> Complement(FiniteSet(0, 1, 2), FiniteSet(1)) FiniteSet(0, 2) See Also ========= Intersection, Union References ========== .. [1] http://mathworld.wolfram.com/ComplementSet.html """ is_Complement = True def __new__(cls, a, b, evaluate=True): if evaluate: return Complement.reduce(a, b) return Basic.__new__(cls, a, b) @staticmethod def reduce(A, B): """ Simplify a :class:`Complement`. """ if B == S.UniversalSet or A.is_subset(B): return S.EmptySet if isinstance(B, Union): return Intersection(*(s.complement(A) for s in B.args)) result = B._complement(A) if result is not None: return result else: return Complement(A, B, evaluate=False) def _contains(self, other): A = self.args[0] B = self.args[1] return And(A.contains(other), Not(B.contains(other))) def as_relational(self, symbol): """Rewrite a complement in terms of equalities and logic operators""" A, B = self.args A_rel = A.as_relational(symbol) B_rel = Not(B.as_relational(symbol)) return And(A_rel, B_rel) @property def is_iterable(self): if self.args[0].is_iterable: return True @property def is_finite_set(self): A, B = self.args a_finite = A.is_finite_set if a_finite is True: return True elif a_finite is False and B.is_finite_set: return False def __iter__(self): A, B = self.args for a in A: if a not in B: yield a else: continue class EmptySet(with_metaclass(Singleton, Set)): """ Represents the empty set. The empty set is available as a singleton as S.EmptySet. Examples ======== >>> from sympy import S, Interval >>> S.EmptySet EmptySet >>> Interval(1, 2).intersect(S.EmptySet) EmptySet See Also ======== UniversalSet References ========== .. [1] https://en.wikipedia.org/wiki/Empty_set """ is_empty = True is_finite_set = True is_FiniteSet = True @property @deprecated(useinstead="is S.EmptySet or is_empty", issue=16946, deprecated_since_version="1.5") def is_EmptySet(self): return True @property def _measure(self): return 0 def _contains(self, other): return false def as_relational(self, symbol): return false def __len__(self): return 0 def __iter__(self): return iter([]) def _eval_powerset(self): return FiniteSet(self) @property def _boundary(self): return self def _complement(self, other): return other def _symmetric_difference(self, other): return other class UniversalSet(with_metaclass(Singleton, Set)): """ Represents the set of all things. The universal set is available as a singleton as S.UniversalSet Examples ======== >>> from sympy import S, Interval >>> S.UniversalSet UniversalSet >>> Interval(1, 2).intersect(S.UniversalSet) Interval(1, 2) See Also ======== EmptySet References ========== .. [1] https://en.wikipedia.org/wiki/Universal_set """ is_UniversalSet = True is_empty = False is_finite_set = False def _complement(self, other): return S.EmptySet def _symmetric_difference(self, other): return other @property def _measure(self): return S.Infinity def _contains(self, other): return true def as_relational(self, symbol): return true @property def _boundary(self): return S.EmptySet class FiniteSet(Set, EvalfMixin): """ Represents a finite set of discrete numbers Examples ======== >>> from sympy import FiniteSet >>> FiniteSet(1, 2, 3, 4) FiniteSet(1, 2, 3, 4) >>> 3 in FiniteSet(1, 2, 3, 4) True >>> members = [1, 2, 3, 4] >>> f = FiniteSet(*members) >>> f FiniteSet(1, 2, 3, 4) >>> f - FiniteSet(2) FiniteSet(1, 3, 4) >>> f + FiniteSet(2, 5) FiniteSet(1, 2, 3, 4, 5) References ========== .. [1] https://en.wikipedia.org/wiki/Finite_set """ is_FiniteSet = True is_iterable = True is_empty = False is_finite_set = True def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) if evaluate: args = list(map(sympify, args)) if len(args) == 0: return S.EmptySet else: args = list(map(sympify, args)) _args_set = set(args) args = list(ordered(_args_set, Set._infimum_key)) obj = Basic.__new__(cls, *args) obj._args_set = _args_set return obj def _eval_Eq(self, other): if not isinstance(other, FiniteSet): # XXX: If Interval(x, x, evaluate=False) worked then the line # below would mean that # FiniteSet(x) & Interval(x, x, evaluate=False) -> false if isinstance(other, Interval): return false elif isinstance(other, Set): return None return false def all_in_both(): s_set = set(self.args) o_set = set(other.args) yield fuzzy_and(self._contains(e) for e in o_set - s_set) yield fuzzy_and(other._contains(e) for e in s_set - o_set) return tfn[fuzzy_and(all_in_both())] def __iter__(self): return iter(self.args) def _complement(self, other): if isinstance(other, Interval): nums = sorted(m for m in self.args if m.is_number) if other == S.Reals and nums != []: syms = [m for m in self.args if m.is_Symbol] # Reals cannot contain elements other than numbers and symbols. intervals = [] # Build up a list of intervals between the elements intervals += [Interval(S.NegativeInfinity, nums[0], True, True)] for a, b in zip(nums[:-1], nums[1:]): intervals.append(Interval(a, b, True, True)) # both open intervals.append(Interval(nums[-1], S.Infinity, True, True)) if syms != []: return Complement(Union(*intervals, evaluate=False), FiniteSet(*syms), evaluate=False) else: return Union(*intervals, evaluate=False) elif nums == []: return None elif isinstance(other, FiniteSet): unk = [] for i in self: c = sympify(other.contains(i)) if c is not S.true and c is not S.false: unk.append(i) unk = FiniteSet(*unk) if unk == self: return not_true = [] for i in other: c = sympify(self.contains(i)) if c is not S.true: not_true.append(i) return Complement(FiniteSet(*not_true), unk) return Set._complement(self, other) def _contains(self, other): """ Tests whether an element, other, is in the set. The actual test is for mathematical equality (as opposed to syntactical equality). In the worst case all elements of the set must be checked. Examples ======== >>> from sympy import FiniteSet >>> 1 in FiniteSet(1, 2) True >>> 5 in FiniteSet(1, 2) False """ if other in self._args_set: return True else: # evaluate=True is needed to override evaluate=False context; # we need Eq to do the evaluation return fuzzy_or(fuzzy_bool(Eq(e, other, evaluate=True)) for e in self.args) def _eval_is_subset(self, other): return fuzzy_and(other._contains(e) for e in self.args) @property def _boundary(self): return self @property def _inf(self): from sympy.functions.elementary.miscellaneous import Min return Min(*self) @property def _sup(self): from sympy.functions.elementary.miscellaneous import Max return Max(*self) @property def measure(self): return 0 def __len__(self): return len(self.args) def as_relational(self, symbol): """Rewrite a FiniteSet in terms of equalities and logic operators. """ from sympy.core.relational import Eq return Or(*[Eq(symbol, elem) for elem in self]) def compare(self, other): return (hash(self) - hash(other)) def _eval_evalf(self, prec): return FiniteSet(*[elem._eval_evalf(prec) for elem in self]) @property def _sorted_args(self): return self.args def _eval_powerset(self): return self.func(*[self.func(*s) for s in subsets(self.args)]) def _eval_rewrite_as_PowerSet(self, *args, **kwargs): """Rewriting method for a finite set to a power set.""" from .powerset import PowerSet is2pow = lambda n: bool(n and not n & (n - 1)) if not is2pow(len(self)): return None fs_test = lambda arg: isinstance(arg, Set) and arg.is_FiniteSet if not all((fs_test(arg) for arg in args)): return None biggest = max(args, key=len) for arg in subsets(biggest.args): arg_set = FiniteSet(*arg) if arg_set not in args: return None return PowerSet(biggest) def __ge__(self, other): if not isinstance(other, Set): raise TypeError("Invalid comparison of set with %s" % func_name(other)) return other.is_subset(self) def __gt__(self, other): if not isinstance(other, Set): raise TypeError("Invalid comparison of set with %s" % func_name(other)) return self.is_proper_superset(other) def __le__(self, other): if not isinstance(other, Set): raise TypeError("Invalid comparison of set with %s" % func_name(other)) return self.is_subset(other) def __lt__(self, other): if not isinstance(other, Set): raise TypeError("Invalid comparison of set with %s" % func_name(other)) return self.is_proper_subset(other) converter[set] = lambda x: FiniteSet(*x) converter[frozenset] = lambda x: FiniteSet(*x) class SymmetricDifference(Set): """Represents the set of elements which are in either of the sets and not in their intersection. Examples ======== >>> from sympy import SymmetricDifference, FiniteSet >>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5)) FiniteSet(1, 2, 4, 5) See Also ======== Complement, Union References ========== .. [1] https://en.wikipedia.org/wiki/Symmetric_difference """ is_SymmetricDifference = True def __new__(cls, a, b, evaluate=True): if evaluate: return SymmetricDifference.reduce(a, b) return Basic.__new__(cls, a, b) @staticmethod def reduce(A, B): result = B._symmetric_difference(A) if result is not None: return result else: return SymmetricDifference(A, B, evaluate=False) def as_relational(self, symbol): """Rewrite a symmetric_difference in terms of equalities and logic operators""" A, B = self.args A_rel = A.as_relational(symbol) B_rel = B.as_relational(symbol) return Xor(A_rel, B_rel) @property def is_iterable(self): if all(arg.is_iterable for arg in self.args): return True def __iter__(self): args = self.args union = roundrobin(*(iter(arg) for arg in args)) for item in union: count = 0 for s in args: if item in s: count += 1 if count % 2 == 1: yield item def imageset(*args): r""" Return an image of the set under transformation ``f``. If this function can't compute the image, it returns an unevaluated ImageSet object. .. math:: \{ f(x) \mid x \in \mathrm{self} \} Examples ======== >>> from sympy import S, Interval, Symbol, imageset, sin, Lambda >>> from sympy.abc import x, y >>> imageset(x, 2*x, Interval(0, 2)) Interval(0, 4) >>> imageset(lambda x: 2*x, Interval(0, 2)) Interval(0, 4) >>> imageset(Lambda(x, sin(x)), Interval(-2, 1)) ImageSet(Lambda(x, sin(x)), Interval(-2, 1)) >>> imageset(sin, Interval(-2, 1)) ImageSet(Lambda(x, sin(x)), Interval(-2, 1)) >>> imageset(lambda y: x + y, Interval(-2, 1)) ImageSet(Lambda(y, x + y), Interval(-2, 1)) Expressions applied to the set of Integers are simplified to show as few negatives as possible and linear expressions are converted to a canonical form. If this is not desirable then the unevaluated ImageSet should be used. >>> imageset(x, -2*x + 5, S.Integers) ImageSet(Lambda(x, 2*x + 1), Integers) See Also ======== sympy.sets.fancysets.ImageSet """ from sympy.core import Lambda from sympy.sets.fancysets import ImageSet from sympy.sets.setexpr import set_function if len(args) < 2: raise ValueError('imageset expects at least 2 args, got: %s' % len(args)) if isinstance(args[0], (Symbol, tuple)) and len(args) > 2: f = Lambda(args[0], args[1]) set_list = args[2:] else: f = args[0] set_list = args[1:] if isinstance(f, Lambda): pass elif callable(f): nargs = getattr(f, 'nargs', {}) if nargs: if len(nargs) != 1: raise NotImplementedError(filldedent(''' This function can take more than 1 arg but the potentially complicated set input has not been analyzed at this point to know its dimensions. TODO ''')) N = nargs.args[0] if N == 1: s = 'x' else: s = [Symbol('x%i' % i) for i in range(1, N + 1)] else: if PY3: s = inspect.signature(f).parameters else: s = inspect.getargspec(f).args dexpr = _sympify(f(*[Dummy() for i in s])) var = tuple(_uniquely_named_symbol(Symbol(i), dexpr) for i in s) f = Lambda(var, f(*var)) else: raise TypeError(filldedent(''' expecting lambda, Lambda, or FunctionClass, not \'%s\'.''' % func_name(f))) if any(not isinstance(s, Set) for s in set_list): name = [func_name(s) for s in set_list] raise ValueError( 'arguments after mapping should be sets, not %s' % name) if len(set_list) == 1: set = set_list[0] try: # TypeError if arg count != set dimensions r = set_function(f, set) if r is None: raise TypeError if not r: return r except TypeError: r = ImageSet(f, set) if isinstance(r, ImageSet): f, set = r.args if f.variables[0] == f.expr: return set if isinstance(set, ImageSet): # XXX: Maybe this should just be: # f2 = set.lambda # fun = Lambda(f2.signature, f(*f2.expr)) # return imageset(fun, *set.base_sets) if len(set.lamda.variables) == 1 and len(f.variables) == 1: x = set.lamda.variables[0] y = f.variables[0] return imageset( Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets) if r is not None: return r return ImageSet(f, *set_list) def is_function_invertible_in_set(func, setv): """ Checks whether function ``func`` is invertible when the domain is restricted to set ``setv``. """ from sympy import exp, log # Functions known to always be invertible: if func in (exp, log): return True u = Dummy("u") fdiff = func(u).diff(u) # monotonous functions: # TODO: check subsets (`func` in `setv`) if (fdiff > 0) == True or (fdiff < 0) == True: return True # TODO: support more return None def simplify_union(args): """ Simplify a :class:`Union` using known rules We first start with global rules like 'Merge all FiniteSets' Then we iterate through all pairs and ask the constituent sets if they can simplify themselves with any other constituent. This process depends on ``union_sets(a, b)`` functions. """ from sympy.sets.handlers.union import union_sets # ===== Global Rules ===== if not args: return S.EmptySet for arg in args: if not isinstance(arg, Set): raise TypeError("Input args to Union must be Sets") # Merge all finite sets finite_sets = [x for x in args if x.is_FiniteSet] if len(finite_sets) > 1: a = (x for set in finite_sets for x in set) finite_set = FiniteSet(*a) args = [finite_set] + [x for x in args if not x.is_FiniteSet] # ===== Pair-wise Rules ===== # Here we depend on rules built into the constituent sets args = set(args) new_args = True while new_args: for s in args: new_args = False for t in args - set((s,)): new_set = union_sets(s, t) # This returns None if s does not know how to intersect # with t. Returns the newly intersected set otherwise if new_set is not None: if not isinstance(new_set, set): new_set = set((new_set, )) new_args = (args - set((s, t))).union(new_set) break if new_args: args = new_args break if len(args) == 1: return args.pop() else: return Union(*args, evaluate=False) def simplify_intersection(args): """ Simplify an intersection using known rules We first start with global rules like 'if any empty sets return empty set' and 'distribute any unions' Then we iterate through all pairs and ask the constituent sets if they can simplify themselves with any other constituent """ # ===== Global Rules ===== if not args: return S.UniversalSet for arg in args: if not isinstance(arg, Set): raise TypeError("Input args to Union must be Sets") # If any EmptySets return EmptySet if S.EmptySet in args: return S.EmptySet # Handle Finite sets rv = Intersection._handle_finite_sets(args) if rv is not None: return rv # If any of the sets are unions, return a Union of Intersections for s in args: if s.is_Union: other_sets = set(args) - set((s,)) if len(other_sets) > 0: other = Intersection(*other_sets) return Union(*(Intersection(arg, other) for arg in s.args)) else: return Union(*[arg for arg in s.args]) for s in args: if s.is_Complement: args.remove(s) other_sets = args + [s.args[0]] return Complement(Intersection(*other_sets), s.args[1]) from sympy.sets.handlers.intersection import intersection_sets # At this stage we are guaranteed not to have any # EmptySets, FiniteSets, or Unions in the intersection # ===== Pair-wise Rules ===== # Here we depend on rules built into the constituent sets args = set(args) new_args = True while new_args: for s in args: new_args = False for t in args - set((s,)): new_set = intersection_sets(s, t) # This returns None if s does not know how to intersect # with t. Returns the newly intersected set otherwise if new_set is not None: new_args = (args - set((s, t))).union(set((new_set, ))) break if new_args: args = new_args break if len(args) == 1: return args.pop() else: return Intersection(*args, evaluate=False) def _handle_finite_sets(op, x, y, commutative): # Handle finite sets: fs_args, other = sift([x, y], lambda x: isinstance(x, FiniteSet), binary=True) if len(fs_args) == 2: return FiniteSet(*[op(i, j) for i in fs_args[0] for j in fs_args[1]]) elif len(fs_args) == 1: sets = [_apply_operation(op, other[0], i, commutative) for i in fs_args[0]] return Union(*sets) else: return None def _apply_operation(op, x, y, commutative): from sympy.sets import ImageSet from sympy import symbols,Lambda d = Dummy('d') out = _handle_finite_sets(op, x, y, commutative) if out is None: out = op(x, y) if out is None and commutative: out = op(y, x) if out is None: _x, _y = symbols("x y") if isinstance(x, Set) and not isinstance(y, Set): out = ImageSet(Lambda(d, op(d, y)), x).doit() elif not isinstance(x, Set) and isinstance(y, Set): out = ImageSet(Lambda(d, op(x, d)), y).doit() else: out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y) return out def set_add(x, y): from sympy.sets.handlers.add import _set_add return _apply_operation(_set_add, x, y, commutative=True) def set_sub(x, y): from sympy.sets.handlers.add import _set_sub return _apply_operation(_set_sub, x, y, commutative=False) def set_mul(x, y): from sympy.sets.handlers.mul import _set_mul return _apply_operation(_set_mul, x, y, commutative=True) def set_div(x, y): from sympy.sets.handlers.mul import _set_div return _apply_operation(_set_div, x, y, commutative=False) def set_pow(x, y): from sympy.sets.handlers.power import _set_pow return _apply_operation(_set_pow, x, y, commutative=False) def set_function(f, x): from sympy.sets.handlers.functions import _set_function return _set_function(f, x)
87206dded807279c03307e40653d13c890213ba61edcf3d923121ffe0ac11b41
# -*- coding: utf-8 -*- from sympy import ( Add, And, Basic, Derivative, Dict, Eq, Equivalent, FF, FiniteSet, Function, Ge, Gt, I, Implies, Integral, SingularityFunction, Lambda, Le, Limit, Lt, Matrix, Mul, Nand, Ne, Nor, Not, O, Or, Pow, Product, QQ, RR, Rational, Ray, rootof, RootSum, S, Segment, Subs, Sum, Symbol, Tuple, Trace, Xor, ZZ, conjugate, groebner, oo, pi, symbols, ilex, grlex, Range, Contains, SeqPer, SeqFormula, SeqAdd, SeqMul, fourier_series, fps, ITE, Complement, Interval, Intersection, Union, EulerGamma, GoldenRatio, LambertW, airyai, airybi, airyaiprime, airybiprime, fresnelc, fresnels, Heaviside, dirichlet_eta, diag) from sympy.codegen.ast import (Assignment, AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment, DivAugmentedAssignment, ModAugmentedAssignment) from sympy.core.compatibility import range, u_decode as u, unicode, PY3 from sympy.core.expr import UnevaluatedExpr from sympy.core.trace import Tr from sympy.functions import (Abs, Chi, Ci, Ei, KroneckerDelta, Piecewise, Shi, Si, atan2, beta, binomial, catalan, ceiling, cos, euler, exp, expint, factorial, factorial2, floor, gamma, hyper, log, meijerg, sin, sqrt, subfactorial, tan, uppergamma, lerchphi, elliptic_k, elliptic_f, elliptic_e, elliptic_pi, DiracDelta, bell, bernoulli, fibonacci, tribonacci, lucas, stieltjes, mathieuc, mathieus, mathieusprime, mathieucprime) from sympy.matrices import Adjoint, Inverse, MatrixSymbol, Transpose, KroneckerProduct from sympy.matrices.expressions import hadamard_power from sympy.physics import mechanics from sympy.physics.units import joule, degree from sympy.printing.pretty import pprint, pretty as xpretty from sympy.printing.pretty.pretty_symbology import center_accent, is_combining from sympy.sets import ImageSet, ProductSet from sympy.sets.setexpr import SetExpr from sympy.tensor.array import (ImmutableDenseNDimArray, ImmutableSparseNDimArray, MutableDenseNDimArray, MutableSparseNDimArray, tensorproduct) from sympy.tensor.functions import TensorProduct from sympy.tensor.tensor import (TensorIndexType, tensor_indices, TensorHead, TensorElement, tensor_heads) from sympy.utilities.pytest import raises from sympy.vector import CoordSys3D, Gradient, Curl, Divergence, Dot, Cross, Laplacian import sympy as sym class lowergamma(sym.lowergamma): pass # testing notation inheritance by a subclass with same name a, b, c, d, x, y, z, k, n = symbols('a,b,c,d,x,y,z,k,n') f = Function("f") th = Symbol('theta') ph = Symbol('phi') """ Expressions whose pretty-printing is tested here: (A '#' to the right of an expression indicates that its various acceptable orderings are accounted for by the tests.) BASIC EXPRESSIONS: oo (x**2) 1/x y*x**-2 x**Rational(-5,2) (-2)**x Pow(3, 1, evaluate=False) (x**2 + x + 1) # 1-x # 1-2*x # x/y -x/y (x+2)/y # (1+x)*y #3 -5*x/(x+10) # correct placement of negative sign 1 - Rational(3,2)*(x+1) -(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5) # issue 5524 ORDERING: x**2 + x + 1 1 - x 1 - 2*x 2*x**4 + y**2 - x**2 + y**3 RELATIONAL: Eq(x, y) Lt(x, y) Gt(x, y) Le(x, y) Ge(x, y) Ne(x/(y+1), y**2) # RATIONAL NUMBERS: y*x**-2 y**Rational(3,2) * x**Rational(-5,2) sin(x)**3/tan(x)**2 FUNCTIONS (ABS, CONJ, EXP, FUNCTION BRACES, FACTORIAL, FLOOR, CEILING): (2*x + exp(x)) # Abs(x) Abs(x/(x**2+1)) # Abs(1 / (y - Abs(x))) factorial(n) factorial(2*n) subfactorial(n) subfactorial(2*n) factorial(factorial(factorial(n))) factorial(n+1) # conjugate(x) conjugate(f(x+1)) # f(x) f(x, y) f(x/(y+1), y) # f(x**x**x**x**x**x) sin(x)**2 conjugate(a+b*I) conjugate(exp(a+b*I)) conjugate( f(1 + conjugate(f(x))) ) # f(x/(y+1), y) # denom of first arg floor(1 / (y - floor(x))) ceiling(1 / (y - ceiling(x))) SQRT: sqrt(2) 2**Rational(1,3) 2**Rational(1,1000) sqrt(x**2 + 1) (1 + sqrt(5))**Rational(1,3) 2**(1/x) sqrt(2+pi) (2+(1+x**2)/(2+x))**Rational(1,4)+(1+x**Rational(1,1000))/sqrt(3+x**2) DERIVATIVES: Derivative(log(x), x, evaluate=False) Derivative(log(x), x, evaluate=False) + x # Derivative(log(x) + x**2, x, y, evaluate=False) Derivative(2*x*y, y, x, evaluate=False) + x**2 # beta(alpha).diff(alpha) INTEGRALS: Integral(log(x), x) Integral(x**2, x) Integral((sin(x))**2 / (tan(x))**2) Integral(x**(2**x), x) Integral(x**2, (x,1,2)) Integral(x**2, (x,Rational(1,2),10)) Integral(x**2*y**2, x,y) Integral(x**2, (x, None, 1)) Integral(x**2, (x, 1, None)) Integral(sin(th)/cos(ph), (th,0,pi), (ph, 0, 2*pi)) MATRICES: Matrix([[x**2+1, 1], [y, x+y]]) # Matrix([[x/y, y, th], [0, exp(I*k*ph), 1]]) PIECEWISE: Piecewise((x,x<1),(x**2,True)) ITE: ITE(x, y, z) SEQUENCES (TUPLES, LISTS, DICTIONARIES): () [] {} (1/x,) [x**2, 1/x, x, y, sin(th)**2/cos(ph)**2] (x**2, 1/x, x, y, sin(th)**2/cos(ph)**2) {x: sin(x)} {1/x: 1/y, x: sin(x)**2} # [x**2] (x**2,) {x**2: 1} LIMITS: Limit(x, x, oo) Limit(x**2, x, 0) Limit(1/x, x, 0) Limit(sin(x)/x, x, 0) UNITS: joule => kg*m**2/s SUBS: Subs(f(x), x, ph**2) Subs(f(x).diff(x), x, 0) Subs(f(x).diff(x)/y, (x, y), (0, Rational(1, 2))) ORDER: O(1) O(1/x) O(x**2 + y**2) """ def pretty(expr, order=None): """ASCII pretty-printing""" return xpretty(expr, order=order, use_unicode=False, wrap_line=False) def upretty(expr, order=None): """Unicode pretty-printing""" return xpretty(expr, order=order, use_unicode=True, wrap_line=False) def test_pretty_ascii_str(): assert pretty( 'xxx' ) == 'xxx' assert pretty( "xxx" ) == 'xxx' assert pretty( 'xxx\'xxx' ) == 'xxx\'xxx' assert pretty( 'xxx"xxx' ) == 'xxx\"xxx' assert pretty( 'xxx\"xxx' ) == 'xxx\"xxx' assert pretty( "xxx'xxx" ) == 'xxx\'xxx' assert pretty( "xxx\'xxx" ) == 'xxx\'xxx' assert pretty( "xxx\"xxx" ) == 'xxx\"xxx' assert pretty( "xxx\"xxx\'xxx" ) == 'xxx"xxx\'xxx' assert pretty( "xxx\nxxx" ) == 'xxx\nxxx' def test_pretty_unicode_str(): assert pretty( u'xxx' ) == u'xxx' assert pretty( u'xxx' ) == u'xxx' assert pretty( u'xxx\'xxx' ) == u'xxx\'xxx' assert pretty( u'xxx"xxx' ) == u'xxx\"xxx' assert pretty( u'xxx\"xxx' ) == u'xxx\"xxx' assert pretty( u"xxx'xxx" ) == u'xxx\'xxx' assert pretty( u"xxx\'xxx" ) == u'xxx\'xxx' assert pretty( u"xxx\"xxx" ) == u'xxx\"xxx' assert pretty( u"xxx\"xxx\'xxx" ) == u'xxx"xxx\'xxx' assert pretty( u"xxx\nxxx" ) == u'xxx\nxxx' def test_upretty_greek(): assert upretty( oo ) == u'∞' assert upretty( Symbol('alpha^+_1') ) == u'α⁺₁' assert upretty( Symbol('beta') ) == u'β' assert upretty(Symbol('lambda')) == u'λ' def test_upretty_multiindex(): assert upretty( Symbol('beta12') ) == u'β₁₂' assert upretty( Symbol('Y00') ) == u'Y₀₀' assert upretty( Symbol('Y_00') ) == u'Y₀₀' assert upretty( Symbol('F^+-') ) == u'F⁺⁻' def test_upretty_sub_super(): assert upretty( Symbol('beta_1_2') ) == u'β₁ ₂' assert upretty( Symbol('beta^1^2') ) == u'β¹ ²' assert upretty( Symbol('beta_1^2') ) == u'β²₁' assert upretty( Symbol('beta_10_20') ) == u'β₁₀ ₂₀' assert upretty( Symbol('beta_ax_gamma^i') ) == u'βⁱₐₓ ᵧ' assert upretty( Symbol("F^1^2_3_4") ) == u'F¹ ²₃ ₄' assert upretty( Symbol("F_1_2^3^4") ) == u'F³ ⁴₁ ₂' assert upretty( Symbol("F_1_2_3_4") ) == u'F₁ ₂ ₃ ₄' assert upretty( Symbol("F^1^2^3^4") ) == u'F¹ ² ³ ⁴' def test_upretty_subs_missing_in_24(): assert upretty( Symbol('F_beta') ) == u'Fᵦ' assert upretty( Symbol('F_gamma') ) == u'Fᵧ' assert upretty( Symbol('F_rho') ) == u'Fᵨ' assert upretty( Symbol('F_phi') ) == u'Fᵩ' assert upretty( Symbol('F_chi') ) == u'Fᵪ' assert upretty( Symbol('F_a') ) == u'Fₐ' assert upretty( Symbol('F_e') ) == u'Fₑ' assert upretty( Symbol('F_i') ) == u'Fᵢ' assert upretty( Symbol('F_o') ) == u'Fₒ' assert upretty( Symbol('F_u') ) == u'Fᵤ' assert upretty( Symbol('F_r') ) == u'Fᵣ' assert upretty( Symbol('F_v') ) == u'Fᵥ' assert upretty( Symbol('F_x') ) == u'Fₓ' def test_missing_in_2X_issue_9047(): if PY3: assert upretty( Symbol('F_h') ) == u'Fₕ' assert upretty( Symbol('F_k') ) == u'Fₖ' assert upretty( Symbol('F_l') ) == u'Fₗ' assert upretty( Symbol('F_m') ) == u'Fₘ' assert upretty( Symbol('F_n') ) == u'Fₙ' assert upretty( Symbol('F_p') ) == u'Fₚ' assert upretty( Symbol('F_s') ) == u'Fₛ' assert upretty( Symbol('F_t') ) == u'Fₜ' def test_upretty_modifiers(): # Accents assert upretty( Symbol('Fmathring') ) == u'F̊' assert upretty( Symbol('Fddddot') ) == u'F⃜' assert upretty( Symbol('Fdddot') ) == u'F⃛' assert upretty( Symbol('Fddot') ) == u'F̈' assert upretty( Symbol('Fdot') ) == u'Ḟ' assert upretty( Symbol('Fcheck') ) == u'F̌' assert upretty( Symbol('Fbreve') ) == u'F̆' assert upretty( Symbol('Facute') ) == u'F́' assert upretty( Symbol('Fgrave') ) == u'F̀' assert upretty( Symbol('Ftilde') ) == u'F̃' assert upretty( Symbol('Fhat') ) == u'F̂' assert upretty( Symbol('Fbar') ) == u'F̅' assert upretty( Symbol('Fvec') ) == u'F⃗' assert upretty( Symbol('Fprime') ) == u'F′' assert upretty( Symbol('Fprm') ) == u'F′' # No faces are actually implemented, but test to make sure the modifiers are stripped assert upretty( Symbol('Fbold') ) == u'Fbold' assert upretty( Symbol('Fbm') ) == u'Fbm' assert upretty( Symbol('Fcal') ) == u'Fcal' assert upretty( Symbol('Fscr') ) == u'Fscr' assert upretty( Symbol('Ffrak') ) == u'Ffrak' # Brackets assert upretty( Symbol('Fnorm') ) == u'‖F‖' assert upretty( Symbol('Favg') ) == u'⟨F⟩' assert upretty( Symbol('Fabs') ) == u'|F|' assert upretty( Symbol('Fmag') ) == u'|F|' # Combinations assert upretty( Symbol('xvecdot') ) == u'x⃗̇' assert upretty( Symbol('xDotVec') ) == u'ẋ⃗' assert upretty( Symbol('xHATNorm') ) == u'‖x̂‖' assert upretty( Symbol('xMathring_yCheckPRM__zbreveAbs') ) == u'x̊_y̌′__|z̆|' assert upretty( Symbol('alphadothat_nVECDOT__tTildePrime') ) == u'α̇̂_n⃗̇__t̃′' assert upretty( Symbol('x_dot') ) == u'x_dot' assert upretty( Symbol('x__dot') ) == u'x__dot' def test_pretty_Cycle(): from sympy.combinatorics.permutations import Cycle assert pretty(Cycle(1, 2)) == '(1 2)' assert pretty(Cycle(2)) == '(2)' assert pretty(Cycle(1, 3)(4, 5)) == '(1 3)(4 5)' assert pretty(Cycle()) == '()' def test_pretty_Permutation(): from sympy.combinatorics.permutations import Permutation p1 = Permutation(1, 2)(3, 4) assert xpretty(p1, perm_cyclic=True, use_unicode=True) == "(1 2)(3 4)" assert xpretty(p1, perm_cyclic=True, use_unicode=False) == "(1 2)(3 4)" assert xpretty(p1, perm_cyclic=False, use_unicode=True) == \ u'⎛0 1 2 3 4⎞\n'\ u'⎝0 2 1 4 3⎠' assert xpretty(p1, perm_cyclic=False, use_unicode=False) == \ "/0 1 2 3 4\\\n"\ "\\0 2 1 4 3/" def test_pretty_basic(): assert pretty( -Rational(1)/2 ) == '-1/2' assert pretty( -Rational(13)/22 ) == \ """\ -13 \n\ ----\n\ 22 \ """ expr = oo ascii_str = \ """\ oo\ """ ucode_str = \ u("""\ ∞\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (x**2) ascii_str = \ """\ 2\n\ x \ """ ucode_str = \ u("""\ 2\n\ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = 1/x ascii_str = \ """\ 1\n\ -\n\ x\ """ ucode_str = \ u("""\ 1\n\ ─\n\ x\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str # not the same as 1/x expr = x**-1.0 ascii_str = \ """\ -1.0\n\ x \ """ ucode_str = \ ("""\ -1.0\n\ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str # see issue #2860 expr = Pow(S(2), -1.0, evaluate=False) ascii_str = \ """\ -1.0\n\ 2 \ """ ucode_str = \ ("""\ -1.0\n\ 2 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = y*x**-2 ascii_str = \ """\ y \n\ --\n\ 2\n\ x \ """ ucode_str = \ u("""\ y \n\ ──\n\ 2\n\ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str #see issue #14033 expr = x**Rational(1, 3) ascii_str = \ """\ 1/3\n\ x \ """ ucode_str = \ u("""\ 1/3\n\ x \ """) assert xpretty(expr, use_unicode=False, wrap_line=False,\ root_notation = False) == ascii_str assert xpretty(expr, use_unicode=True, wrap_line=False,\ root_notation = False) == ucode_str expr = x**Rational(-5, 2) ascii_str = \ """\ 1 \n\ ----\n\ 5/2\n\ x \ """ ucode_str = \ u("""\ 1 \n\ ────\n\ 5/2\n\ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (-2)**x ascii_str = \ """\ x\n\ (-2) \ """ ucode_str = \ u("""\ x\n\ (-2) \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str # See issue 4923 expr = Pow(3, 1, evaluate=False) ascii_str = \ """\ 1\n\ 3 \ """ ucode_str = \ u("""\ 1\n\ 3 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (x**2 + x + 1) ascii_str_1 = \ """\ 2\n\ 1 + x + x \ """ ascii_str_2 = \ """\ 2 \n\ x + x + 1\ """ ascii_str_3 = \ """\ 2 \n\ x + 1 + x\ """ ucode_str_1 = \ u("""\ 2\n\ 1 + x + x \ """) ucode_str_2 = \ u("""\ 2 \n\ x + x + 1\ """) ucode_str_3 = \ u("""\ 2 \n\ x + 1 + x\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3] assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3] expr = 1 - x ascii_str_1 = \ """\ 1 - x\ """ ascii_str_2 = \ """\ -x + 1\ """ ucode_str_1 = \ u("""\ 1 - x\ """) ucode_str_2 = \ u("""\ -x + 1\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = 1 - 2*x ascii_str_1 = \ """\ 1 - 2*x\ """ ascii_str_2 = \ """\ -2*x + 1\ """ ucode_str_1 = \ u("""\ 1 - 2⋅x\ """) ucode_str_2 = \ u("""\ -2⋅x + 1\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = x/y ascii_str = \ """\ x\n\ -\n\ y\ """ ucode_str = \ u("""\ x\n\ ─\n\ y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -x/y ascii_str = \ """\ -x \n\ ---\n\ y \ """ ucode_str = \ u("""\ -x \n\ ───\n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (x + 2)/y ascii_str_1 = \ """\ 2 + x\n\ -----\n\ y \ """ ascii_str_2 = \ """\ x + 2\n\ -----\n\ y \ """ ucode_str_1 = \ u("""\ 2 + x\n\ ─────\n\ y \ """) ucode_str_2 = \ u("""\ x + 2\n\ ─────\n\ y \ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = (1 + x)*y ascii_str_1 = \ """\ y*(1 + x)\ """ ascii_str_2 = \ """\ (1 + x)*y\ """ ascii_str_3 = \ """\ y*(x + 1)\ """ ucode_str_1 = \ u("""\ y⋅(1 + x)\ """) ucode_str_2 = \ u("""\ (1 + x)⋅y\ """) ucode_str_3 = \ u("""\ y⋅(x + 1)\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3] assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3] # Test for correct placement of the negative sign expr = -5*x/(x + 10) ascii_str_1 = \ """\ -5*x \n\ ------\n\ 10 + x\ """ ascii_str_2 = \ """\ -5*x \n\ ------\n\ x + 10\ """ ucode_str_1 = \ u("""\ -5⋅x \n\ ──────\n\ 10 + x\ """) ucode_str_2 = \ u("""\ -5⋅x \n\ ──────\n\ x + 10\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = -S.Half - 3*x ascii_str = \ """\ -3*x - 1/2\ """ ucode_str = \ u("""\ -3⋅x - 1/2\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = S.Half - 3*x ascii_str = \ """\ 1/2 - 3*x\ """ ucode_str = \ u("""\ 1/2 - 3⋅x\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -S.Half - 3*x/2 ascii_str = \ """\ 3*x 1\n\ - --- - -\n\ 2 2\ """ ucode_str = \ u("""\ 3⋅x 1\n\ - ─── - ─\n\ 2 2\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = S.Half - 3*x/2 ascii_str = \ """\ 1 3*x\n\ - - ---\n\ 2 2 \ """ ucode_str = \ u("""\ 1 3⋅x\n\ ─ - ───\n\ 2 2 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_negative_fractions(): expr = -x/y ascii_str =\ """\ -x \n\ ---\n\ y \ """ ucode_str =\ u("""\ -x \n\ ───\n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -x*z/y ascii_str =\ """\ -x*z \n\ -----\n\ y \ """ ucode_str =\ u("""\ -x⋅z \n\ ─────\n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = x**2/y ascii_str =\ """\ 2\n\ x \n\ --\n\ y \ """ ucode_str =\ u("""\ 2\n\ x \n\ ──\n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -x**2/y ascii_str =\ """\ 2 \n\ -x \n\ ----\n\ y \ """ ucode_str =\ u("""\ 2 \n\ -x \n\ ────\n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -x/(y*z) ascii_str =\ """\ -x \n\ ---\n\ y*z\ """ ucode_str =\ u("""\ -x \n\ ───\n\ y⋅z\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -a/y**2 ascii_str =\ """\ -a \n\ ---\n\ 2\n\ y \ """ ucode_str =\ u("""\ -a \n\ ───\n\ 2\n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = y**(-a/b) ascii_str =\ """\ -a \n\ ---\n\ b \n\ y \ """ ucode_str =\ u("""\ -a \n\ ───\n\ b \n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -1/y**2 ascii_str =\ """\ -1 \n\ ---\n\ 2\n\ y \ """ ucode_str =\ u("""\ -1 \n\ ───\n\ 2\n\ y \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -10/b**2 ascii_str =\ """\ -10 \n\ ----\n\ 2 \n\ b \ """ ucode_str =\ u("""\ -10 \n\ ────\n\ 2 \n\ b \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Rational(-200, 37) ascii_str =\ """\ -200 \n\ -----\n\ 37 \ """ ucode_str =\ u("""\ -200 \n\ ─────\n\ 37 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_issue_5524(): assert pretty(-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5)) == \ """\ 2 / ___ \\\n\ - (5 - y) + (x - 5)*\\-x - 2*\\/ 2 + 5/\ """ assert upretty(-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5)) == \ u("""\ 2 \n\ - (5 - y) + (x - 5)⋅(-x - 2⋅√2 + 5)\ """) def test_pretty_ordering(): assert pretty(x**2 + x + 1, order='lex') == \ """\ 2 \n\ x + x + 1\ """ assert pretty(x**2 + x + 1, order='rev-lex') == \ """\ 2\n\ 1 + x + x \ """ assert pretty(1 - x, order='lex') == '-x + 1' assert pretty(1 - x, order='rev-lex') == '1 - x' assert pretty(1 - 2*x, order='lex') == '-2*x + 1' assert pretty(1 - 2*x, order='rev-lex') == '1 - 2*x' f = 2*x**4 + y**2 - x**2 + y**3 assert pretty(f, order=None) == \ """\ 4 2 3 2\n\ 2*x - x + y + y \ """ assert pretty(f, order='lex') == \ """\ 4 2 3 2\n\ 2*x - x + y + y \ """ assert pretty(f, order='rev-lex') == \ """\ 2 3 2 4\n\ y + y - x + 2*x \ """ expr = x - x**3/6 + x**5/120 + O(x**6) ascii_str = \ """\ 3 5 \n\ x x / 6\\\n\ x - -- + --- + O\\x /\n\ 6 120 \ """ ucode_str = \ u("""\ 3 5 \n\ x x ⎛ 6⎞\n\ x - ── + ─── + O⎝x ⎠\n\ 6 120 \ """) assert pretty(expr, order=None) == ascii_str assert upretty(expr, order=None) == ucode_str assert pretty(expr, order='lex') == ascii_str assert upretty(expr, order='lex') == ucode_str assert pretty(expr, order='rev-lex') == ascii_str assert upretty(expr, order='rev-lex') == ucode_str def test_EulerGamma(): assert pretty(EulerGamma) == str(EulerGamma) == "EulerGamma" assert upretty(EulerGamma) == u"γ" def test_GoldenRatio(): assert pretty(GoldenRatio) == str(GoldenRatio) == "GoldenRatio" assert upretty(GoldenRatio) == u"φ" def test_pretty_relational(): expr = Eq(x, y) ascii_str = \ """\ x = y\ """ ucode_str = \ u("""\ x = y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Lt(x, y) ascii_str = \ """\ x < y\ """ ucode_str = \ u("""\ x < y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Gt(x, y) ascii_str = \ """\ x > y\ """ ucode_str = \ u("""\ x > y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Le(x, y) ascii_str = \ """\ x <= y\ """ ucode_str = \ u("""\ x ≤ y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Ge(x, y) ascii_str = \ """\ x >= y\ """ ucode_str = \ u("""\ x ≥ y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Ne(x/(y + 1), y**2) ascii_str_1 = \ """\ x 2\n\ ----- != y \n\ 1 + y \ """ ascii_str_2 = \ """\ x 2\n\ ----- != y \n\ y + 1 \ """ ucode_str_1 = \ u("""\ x 2\n\ ───── ≠ y \n\ 1 + y \ """) ucode_str_2 = \ u("""\ x 2\n\ ───── ≠ y \n\ y + 1 \ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] def test_Assignment(): expr = Assignment(x, y) ascii_str = \ """\ x := y\ """ ucode_str = \ u("""\ x := y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_AugmentedAssignment(): expr = AddAugmentedAssignment(x, y) ascii_str = \ """\ x += y\ """ ucode_str = \ u("""\ x += y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = SubAugmentedAssignment(x, y) ascii_str = \ """\ x -= y\ """ ucode_str = \ u("""\ x -= y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = MulAugmentedAssignment(x, y) ascii_str = \ """\ x *= y\ """ ucode_str = \ u("""\ x *= y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = DivAugmentedAssignment(x, y) ascii_str = \ """\ x /= y\ """ ucode_str = \ u("""\ x /= y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = ModAugmentedAssignment(x, y) ascii_str = \ """\ x %= y\ """ ucode_str = \ u("""\ x %= y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_rational(): expr = y*x**-2 ascii_str = \ """\ y \n\ --\n\ 2\n\ x \ """ ucode_str = \ u("""\ y \n\ ──\n\ 2\n\ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = y**Rational(3, 2) * x**Rational(-5, 2) ascii_str = \ """\ 3/2\n\ y \n\ ----\n\ 5/2\n\ x \ """ ucode_str = \ u("""\ 3/2\n\ y \n\ ────\n\ 5/2\n\ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = sin(x)**3/tan(x)**2 ascii_str = \ """\ 3 \n\ sin (x)\n\ -------\n\ 2 \n\ tan (x)\ """ ucode_str = \ u("""\ 3 \n\ sin (x)\n\ ───────\n\ 2 \n\ tan (x)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_functions(): """Tests for Abs, conjugate, exp, function braces, and factorial.""" expr = (2*x + exp(x)) ascii_str_1 = \ """\ x\n\ 2*x + e \ """ ascii_str_2 = \ """\ x \n\ e + 2*x\ """ ucode_str_1 = \ u("""\ x\n\ 2⋅x + ℯ \ """) ucode_str_2 = \ u("""\ x \n\ ℯ + 2⋅x\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = Abs(x) ascii_str = \ """\ |x|\ """ ucode_str = \ u("""\ │x│\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Abs(x/(x**2 + 1)) ascii_str_1 = \ """\ | x |\n\ |------|\n\ | 2|\n\ |1 + x |\ """ ascii_str_2 = \ """\ | x |\n\ |------|\n\ | 2 |\n\ |x + 1|\ """ ucode_str_1 = \ u("""\ │ x │\n\ │──────│\n\ │ 2│\n\ │1 + x │\ """) ucode_str_2 = \ u("""\ │ x │\n\ │──────│\n\ │ 2 │\n\ │x + 1│\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = Abs(1 / (y - Abs(x))) ascii_str = \ """\ 1 \n\ ---------\n\ |y - |x||\ """ ucode_str = \ u("""\ 1 \n\ ─────────\n\ │y - │x││\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str n = Symbol('n', integer=True) expr = factorial(n) ascii_str = \ """\ n!\ """ ucode_str = \ u("""\ n!\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = factorial(2*n) ascii_str = \ """\ (2*n)!\ """ ucode_str = \ u("""\ (2⋅n)!\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = factorial(factorial(factorial(n))) ascii_str = \ """\ ((n!)!)!\ """ ucode_str = \ u("""\ ((n!)!)!\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = factorial(n + 1) ascii_str_1 = \ """\ (1 + n)!\ """ ascii_str_2 = \ """\ (n + 1)!\ """ ucode_str_1 = \ u("""\ (1 + n)!\ """) ucode_str_2 = \ u("""\ (n + 1)!\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = subfactorial(n) ascii_str = \ """\ !n\ """ ucode_str = \ u("""\ !n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = subfactorial(2*n) ascii_str = \ """\ !(2*n)\ """ ucode_str = \ u("""\ !(2⋅n)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str n = Symbol('n', integer=True) expr = factorial2(n) ascii_str = \ """\ n!!\ """ ucode_str = \ u("""\ n!!\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = factorial2(2*n) ascii_str = \ """\ (2*n)!!\ """ ucode_str = \ u("""\ (2⋅n)!!\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = factorial2(factorial2(factorial2(n))) ascii_str = \ """\ ((n!!)!!)!!\ """ ucode_str = \ u("""\ ((n!!)!!)!!\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = factorial2(n + 1) ascii_str_1 = \ """\ (1 + n)!!\ """ ascii_str_2 = \ """\ (n + 1)!!\ """ ucode_str_1 = \ u("""\ (1 + n)!!\ """) ucode_str_2 = \ u("""\ (n + 1)!!\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = 2*binomial(n, k) ascii_str = \ """\ /n\\\n\ 2*| |\n\ \\k/\ """ ucode_str = \ u("""\ ⎛n⎞\n\ 2⋅⎜ ⎟\n\ ⎝k⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = 2*binomial(2*n, k) ascii_str = \ """\ /2*n\\\n\ 2*| |\n\ \\ k /\ """ ucode_str = \ u("""\ ⎛2⋅n⎞\n\ 2⋅⎜ ⎟\n\ ⎝ k ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = 2*binomial(n**2, k) ascii_str = \ """\ / 2\\\n\ |n |\n\ 2*| |\n\ \\k /\ """ ucode_str = \ u("""\ ⎛ 2⎞\n\ ⎜n ⎟\n\ 2⋅⎜ ⎟\n\ ⎝k ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = catalan(n) ascii_str = \ """\ C \n\ n\ """ ucode_str = \ u("""\ C \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = catalan(n) ascii_str = \ """\ C \n\ n\ """ ucode_str = \ u("""\ C \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = bell(n) ascii_str = \ """\ B \n\ n\ """ ucode_str = \ u("""\ B \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = bernoulli(n) ascii_str = \ """\ B \n\ n\ """ ucode_str = \ u("""\ B \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = bernoulli(n, x) ascii_str = \ """\ B (x)\n\ n \ """ ucode_str = \ u("""\ B (x)\n\ n \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = fibonacci(n) ascii_str = \ """\ F \n\ n\ """ ucode_str = \ u("""\ F \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = lucas(n) ascii_str = \ """\ L \n\ n\ """ ucode_str = \ u("""\ L \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = tribonacci(n) ascii_str = \ """\ T \n\ n\ """ ucode_str = \ u("""\ T \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = stieltjes(n) ascii_str = \ """\ stieltjes \n\ n\ """ ucode_str = \ u("""\ γ \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = stieltjes(n, x) ascii_str = \ """\ stieltjes (x)\n\ n \ """ ucode_str = \ u("""\ γ (x)\n\ n \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = mathieuc(x, y, z) ascii_str = 'C(x, y, z)' ucode_str = u('C(x, y, z)') assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = mathieus(x, y, z) ascii_str = 'S(x, y, z)' ucode_str = u('S(x, y, z)') assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = mathieucprime(x, y, z) ascii_str = "C'(x, y, z)" ucode_str = u("C'(x, y, z)") assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = mathieusprime(x, y, z) ascii_str = "S'(x, y, z)" ucode_str = u("S'(x, y, z)") assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = conjugate(x) ascii_str = \ """\ _\n\ x\ """ ucode_str = \ u("""\ _\n\ x\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str f = Function('f') expr = conjugate(f(x + 1)) ascii_str_1 = \ """\ ________\n\ f(1 + x)\ """ ascii_str_2 = \ """\ ________\n\ f(x + 1)\ """ ucode_str_1 = \ u("""\ ________\n\ f(1 + x)\ """) ucode_str_2 = \ u("""\ ________\n\ f(x + 1)\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = f(x) ascii_str = \ """\ f(x)\ """ ucode_str = \ u("""\ f(x)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = f(x, y) ascii_str = \ """\ f(x, y)\ """ ucode_str = \ u("""\ f(x, y)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = f(x/(y + 1), y) ascii_str_1 = \ """\ / x \\\n\ f|-----, y|\n\ \\1 + y /\ """ ascii_str_2 = \ """\ / x \\\n\ f|-----, y|\n\ \\y + 1 /\ """ ucode_str_1 = \ u("""\ ⎛ x ⎞\n\ f⎜─────, y⎟\n\ ⎝1 + y ⎠\ """) ucode_str_2 = \ u("""\ ⎛ x ⎞\n\ f⎜─────, y⎟\n\ ⎝y + 1 ⎠\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = f(x**x**x**x**x**x) ascii_str = \ """\ / / / / / x\\\\\\\\\\ | | | | \\x /|||| | | | \\x /||| | | \\x /|| | \\x /| f\\x /\ """ ucode_str = \ u("""\ ⎛ ⎛ ⎛ ⎛ ⎛ x⎞⎞⎞⎞⎞ ⎜ ⎜ ⎜ ⎜ ⎝x ⎠⎟⎟⎟⎟ ⎜ ⎜ ⎜ ⎝x ⎠⎟⎟⎟ ⎜ ⎜ ⎝x ⎠⎟⎟ ⎜ ⎝x ⎠⎟ f⎝x ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = sin(x)**2 ascii_str = \ """\ 2 \n\ sin (x)\ """ ucode_str = \ u("""\ 2 \n\ sin (x)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = conjugate(a + b*I) ascii_str = \ """\ _ _\n\ a - I*b\ """ ucode_str = \ u("""\ _ _\n\ a - ⅈ⋅b\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = conjugate(exp(a + b*I)) ascii_str = \ """\ _ _\n\ a - I*b\n\ e \ """ ucode_str = \ u("""\ _ _\n\ a - ⅈ⋅b\n\ ℯ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = conjugate( f(1 + conjugate(f(x))) ) ascii_str_1 = \ """\ ___________\n\ / ____\\\n\ f\\1 + f(x)/\ """ ascii_str_2 = \ """\ ___________\n\ /____ \\\n\ f\\f(x) + 1/\ """ ucode_str_1 = \ u("""\ ___________\n\ ⎛ ____⎞\n\ f⎝1 + f(x)⎠\ """) ucode_str_2 = \ u("""\ ___________\n\ ⎛____ ⎞\n\ f⎝f(x) + 1⎠\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = f(x/(y + 1), y) ascii_str_1 = \ """\ / x \\\n\ f|-----, y|\n\ \\1 + y /\ """ ascii_str_2 = \ """\ / x \\\n\ f|-----, y|\n\ \\y + 1 /\ """ ucode_str_1 = \ u("""\ ⎛ x ⎞\n\ f⎜─────, y⎟\n\ ⎝1 + y ⎠\ """) ucode_str_2 = \ u("""\ ⎛ x ⎞\n\ f⎜─────, y⎟\n\ ⎝y + 1 ⎠\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = floor(1 / (y - floor(x))) ascii_str = \ """\ / 1 \\\n\ floor|------------|\n\ \\y - floor(x)/\ """ ucode_str = \ u("""\ ⎢ 1 ⎥\n\ ⎢───────⎥\n\ ⎣y - ⌊x⌋⎦\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = ceiling(1 / (y - ceiling(x))) ascii_str = \ """\ / 1 \\\n\ ceiling|--------------|\n\ \\y - ceiling(x)/\ """ ucode_str = \ u("""\ ⎡ 1 ⎤\n\ ⎢───────⎥\n\ ⎢y - ⌈x⌉⎥\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = euler(n) ascii_str = \ """\ E \n\ n\ """ ucode_str = \ u("""\ E \n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = euler(1/(1 + 1/(1 + 1/n))) ascii_str = \ """\ E \n\ 1 \n\ ---------\n\ 1 \n\ 1 + -----\n\ 1\n\ 1 + -\n\ n\ """ ucode_str = \ u("""\ E \n\ 1 \n\ ─────────\n\ 1 \n\ 1 + ─────\n\ 1\n\ 1 + ─\n\ n\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = euler(n, x) ascii_str = \ """\ E (x)\n\ n \ """ ucode_str = \ u("""\ E (x)\n\ n \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = euler(n, x/2) ascii_str = \ """\ /x\\\n\ E |-|\n\ n\\2/\ """ ucode_str = \ u("""\ ⎛x⎞\n\ E ⎜─⎟\n\ n⎝2⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_sqrt(): expr = sqrt(2) ascii_str = \ """\ ___\n\ \\/ 2 \ """ ucode_str = \ u"√2" assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = 2**Rational(1, 3) ascii_str = \ """\ 3 ___\n\ \\/ 2 \ """ ucode_str = \ u("""\ 3 ___\n\ ╲╱ 2 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = 2**Rational(1, 1000) ascii_str = \ """\ 1000___\n\ \\/ 2 \ """ ucode_str = \ u("""\ 1000___\n\ ╲╱ 2 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = sqrt(x**2 + 1) ascii_str = \ """\ ________\n\ / 2 \n\ \\/ x + 1 \ """ ucode_str = \ u("""\ ________\n\ ╱ 2 \n\ ╲╱ x + 1 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (1 + sqrt(5))**Rational(1, 3) ascii_str = \ """\ ___________\n\ 3 / ___ \n\ \\/ 1 + \\/ 5 \ """ ucode_str = \ u("""\ 3 ________\n\ ╲╱ 1 + √5 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = 2**(1/x) ascii_str = \ """\ x ___\n\ \\/ 2 \ """ ucode_str = \ u("""\ x ___\n\ ╲╱ 2 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = sqrt(2 + pi) ascii_str = \ """\ ________\n\ \\/ 2 + pi \ """ ucode_str = \ u("""\ _______\n\ ╲╱ 2 + π \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (2 + ( 1 + x**2)/(2 + x))**Rational(1, 4) + (1 + x**Rational(1, 1000))/sqrt(3 + x**2) ascii_str = \ """\ ____________ \n\ / 2 1000___ \n\ / x + 1 \\/ x + 1\n\ 4 / 2 + ------ + -----------\n\ \\/ x + 2 ________\n\ / 2 \n\ \\/ x + 3 \ """ ucode_str = \ u("""\ ____________ \n\ ╱ 2 1000___ \n\ ╱ x + 1 ╲╱ x + 1\n\ 4 ╱ 2 + ────── + ───────────\n\ ╲╱ x + 2 ________\n\ ╱ 2 \n\ ╲╱ x + 3 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_sqrt_char_knob(): # See PR #9234. expr = sqrt(2) ucode_str1 = \ u("""\ ___\n\ ╲╱ 2 \ """) ucode_str2 = \ u"√2" assert xpretty(expr, use_unicode=True, use_unicode_sqrt_char=False) == ucode_str1 assert xpretty(expr, use_unicode=True, use_unicode_sqrt_char=True) == ucode_str2 def test_pretty_sqrt_longsymbol_no_sqrt_char(): # Do not use unicode sqrt char for long symbols (see PR #9234). expr = sqrt(Symbol('C1')) ucode_str = \ u("""\ ____\n\ ╲╱ C₁ \ """) assert upretty(expr) == ucode_str def test_pretty_KroneckerDelta(): x, y = symbols("x, y") expr = KroneckerDelta(x, y) ascii_str = \ """\ d \n\ x,y\ """ ucode_str = \ u("""\ δ \n\ x,y\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_product(): n, m, k, l = symbols('n m k l') f = symbols('f', cls=Function) expr = Product(f((n/3)**2), (n, k**2, l)) unicode_str = \ u("""\ l \n\ ─┬──────┬─ \n\ │ │ ⎛ 2⎞\n\ │ │ ⎜n ⎟\n\ │ │ f⎜──⎟\n\ │ │ ⎝9 ⎠\n\ │ │ \n\ 2 \n\ n = k """) ascii_str = \ """\ l \n\ __________ \n\ | | / 2\\\n\ | | |n |\n\ | | f|--|\n\ | | \\9 /\n\ | | \n\ 2 \n\ n = k """ expr = Product(f((n/3)**2), (n, k**2, l), (l, 1, m)) unicode_str = \ u("""\ m l \n\ ─┬──────┬─ ─┬──────┬─ \n\ │ │ │ │ ⎛ 2⎞\n\ │ │ │ │ ⎜n ⎟\n\ │ │ │ │ f⎜──⎟\n\ │ │ │ │ ⎝9 ⎠\n\ │ │ │ │ \n\ l = 1 2 \n\ n = k """) ascii_str = \ """\ m l \n\ __________ __________ \n\ | | | | / 2\\\n\ | | | | |n |\n\ | | | | f|--|\n\ | | | | \\9 /\n\ | | | | \n\ l = 1 2 \n\ n = k """ assert pretty(expr) == ascii_str assert upretty(expr) == unicode_str def test_pretty_Lambda(): # S.IdentityFunction is a special case expr = Lambda(y, y) assert pretty(expr) == "x -> x" assert upretty(expr) == u"x ↦ x" expr = Lambda(x, x+1) assert pretty(expr) == "x -> x + 1" assert upretty(expr) == u"x ↦ x + 1" expr = Lambda(x, x**2) ascii_str = \ """\ 2\n\ x -> x \ """ ucode_str = \ u("""\ 2\n\ x ↦ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Lambda(x, x**2)**2 ascii_str = \ """\ 2 / 2\\ \n\ \\x -> x / \ """ ucode_str = \ u("""\ 2 ⎛ 2⎞ \n\ ⎝x ↦ x ⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Lambda((x, y), x) ascii_str = "(x, y) -> x" ucode_str = u"(x, y) ↦ x" assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Lambda((x, y), x**2) ascii_str = \ """\ 2\n\ (x, y) -> x \ """ ucode_str = \ u("""\ 2\n\ (x, y) ↦ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Lambda(((x, y),), x**2) ascii_str = \ """\ 2\n\ ((x, y),) -> x \ """ ucode_str = \ u("""\ 2\n\ ((x, y),) ↦ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_order(): expr = O(1) ascii_str = \ """\ O(1)\ """ ucode_str = \ u("""\ O(1)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = O(1/x) ascii_str = \ """\ /1\\\n\ O|-|\n\ \\x/\ """ ucode_str = \ u("""\ ⎛1⎞\n\ O⎜─⎟\n\ ⎝x⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = O(x**2 + y**2) ascii_str = \ """\ / 2 2 \\\n\ O\\x + y ; (x, y) -> (0, 0)/\ """ ucode_str = \ u("""\ ⎛ 2 2 ⎞\n\ O⎝x + y ; (x, y) → (0, 0)⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = O(1, (x, oo)) ascii_str = \ """\ O(1; x -> oo)\ """ ucode_str = \ u("""\ O(1; x → ∞)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = O(1/x, (x, oo)) ascii_str = \ """\ /1 \\\n\ O|-; x -> oo|\n\ \\x /\ """ ucode_str = \ u("""\ ⎛1 ⎞\n\ O⎜─; x → ∞⎟\n\ ⎝x ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = O(x**2 + y**2, (x, oo), (y, oo)) ascii_str = \ """\ / 2 2 \\\n\ O\\x + y ; (x, y) -> (oo, oo)/\ """ ucode_str = \ u("""\ ⎛ 2 2 ⎞\n\ O⎝x + y ; (x, y) → (∞, ∞)⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_derivatives(): # Simple expr = Derivative(log(x), x, evaluate=False) ascii_str = \ """\ d \n\ --(log(x))\n\ dx \ """ ucode_str = \ u("""\ d \n\ ──(log(x))\n\ dx \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Derivative(log(x), x, evaluate=False) + x ascii_str_1 = \ """\ d \n\ x + --(log(x))\n\ dx \ """ ascii_str_2 = \ """\ d \n\ --(log(x)) + x\n\ dx \ """ ucode_str_1 = \ u("""\ d \n\ x + ──(log(x))\n\ dx \ """) ucode_str_2 = \ u("""\ d \n\ ──(log(x)) + x\n\ dx \ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] # basic partial derivatives expr = Derivative(log(x + y) + x, x) ascii_str_1 = \ """\ d \n\ --(log(x + y) + x)\n\ dx \ """ ascii_str_2 = \ """\ d \n\ --(x + log(x + y))\n\ dx \ """ ucode_str_1 = \ u("""\ ∂ \n\ ──(log(x + y) + x)\n\ ∂x \ """) ucode_str_2 = \ u("""\ ∂ \n\ ──(x + log(x + y))\n\ ∂x \ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2], upretty(expr) # Multiple symbols expr = Derivative(log(x) + x**2, x, y) ascii_str_1 = \ """\ 2 \n\ d / 2\\\n\ -----\\log(x) + x /\n\ dy dx \ """ ascii_str_2 = \ """\ 2 \n\ d / 2 \\\n\ -----\\x + log(x)/\n\ dy dx \ """ ucode_str_1 = \ u("""\ 2 \n\ d ⎛ 2⎞\n\ ─────⎝log(x) + x ⎠\n\ dy dx \ """) ucode_str_2 = \ u("""\ 2 \n\ d ⎛ 2 ⎞\n\ ─────⎝x + log(x)⎠\n\ dy dx \ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = Derivative(2*x*y, y, x) + x**2 ascii_str_1 = \ """\ 2 \n\ d 2\n\ -----(2*x*y) + x \n\ dx dy \ """ ascii_str_2 = \ """\ 2 \n\ 2 d \n\ x + -----(2*x*y)\n\ dx dy \ """ ucode_str_1 = \ u("""\ 2 \n\ ∂ 2\n\ ─────(2⋅x⋅y) + x \n\ ∂x ∂y \ """) ucode_str_2 = \ u("""\ 2 \n\ 2 ∂ \n\ x + ─────(2⋅x⋅y)\n\ ∂x ∂y \ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = Derivative(2*x*y, x, x) ascii_str = \ """\ 2 \n\ d \n\ ---(2*x*y)\n\ 2 \n\ dx \ """ ucode_str = \ u("""\ 2 \n\ ∂ \n\ ───(2⋅x⋅y)\n\ 2 \n\ ∂x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Derivative(2*x*y, x, 17) ascii_str = \ """\ 17 \n\ d \n\ ----(2*x*y)\n\ 17 \n\ dx \ """ ucode_str = \ u("""\ 17 \n\ ∂ \n\ ────(2⋅x⋅y)\n\ 17 \n\ ∂x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Derivative(2*x*y, x, x, y) ascii_str = \ """\ 3 \n\ d \n\ ------(2*x*y)\n\ 2 \n\ dy dx \ """ ucode_str = \ u("""\ 3 \n\ ∂ \n\ ──────(2⋅x⋅y)\n\ 2 \n\ ∂y ∂x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str # Greek letters alpha = Symbol('alpha') beta = Function('beta') expr = beta(alpha).diff(alpha) ascii_str = \ """\ d \n\ ------(beta(alpha))\n\ dalpha \ """ ucode_str = \ u("""\ d \n\ ──(β(α))\n\ dα \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Derivative(f(x), (x, n)) ascii_str = \ """\ n \n\ d \n\ ---(f(x))\n\ n \n\ dx \ """ ucode_str = \ u("""\ n \n\ d \n\ ───(f(x))\n\ n \n\ dx \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_integrals(): expr = Integral(log(x), x) ascii_str = \ """\ / \n\ | \n\ | log(x) dx\n\ | \n\ / \ """ ucode_str = \ u("""\ ⌠ \n\ ⎮ log(x) dx\n\ ⌡ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral(x**2, x) ascii_str = \ """\ / \n\ | \n\ | 2 \n\ | x dx\n\ | \n\ / \ """ ucode_str = \ u("""\ ⌠ \n\ ⎮ 2 \n\ ⎮ x dx\n\ ⌡ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral((sin(x))**2 / (tan(x))**2) ascii_str = \ """\ / \n\ | \n\ | 2 \n\ | sin (x) \n\ | ------- dx\n\ | 2 \n\ | tan (x) \n\ | \n\ / \ """ ucode_str = \ u("""\ ⌠ \n\ ⎮ 2 \n\ ⎮ sin (x) \n\ ⎮ ─────── dx\n\ ⎮ 2 \n\ ⎮ tan (x) \n\ ⌡ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral(x**(2**x), x) ascii_str = \ """\ / \n\ | \n\ | / x\\ \n\ | \\2 / \n\ | x dx\n\ | \n\ / \ """ ucode_str = \ u("""\ ⌠ \n\ ⎮ ⎛ x⎞ \n\ ⎮ ⎝2 ⎠ \n\ ⎮ x dx\n\ ⌡ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral(x**2, (x, 1, 2)) ascii_str = \ """\ 2 \n\ / \n\ | \n\ | 2 \n\ | x dx\n\ | \n\ / \n\ 1 \ """ ucode_str = \ u("""\ 2 \n\ ⌠ \n\ ⎮ 2 \n\ ⎮ x dx\n\ ⌡ \n\ 1 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral(x**2, (x, Rational(1, 2), 10)) ascii_str = \ """\ 10 \n\ / \n\ | \n\ | 2 \n\ | x dx\n\ | \n\ / \n\ 1/2 \ """ ucode_str = \ u("""\ 10 \n\ ⌠ \n\ ⎮ 2 \n\ ⎮ x dx\n\ ⌡ \n\ 1/2 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral(x**2*y**2, x, y) ascii_str = \ """\ / / \n\ | | \n\ | | 2 2 \n\ | | x *y dx dy\n\ | | \n\ / / \ """ ucode_str = \ u("""\ ⌠ ⌠ \n\ ⎮ ⎮ 2 2 \n\ ⎮ ⎮ x ⋅y dx dy\n\ ⌡ ⌡ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral(sin(th)/cos(ph), (th, 0, pi), (ph, 0, 2*pi)) ascii_str = \ """\ 2*pi pi \n\ / / \n\ | | \n\ | | sin(theta) \n\ | | ---------- d(theta) d(phi)\n\ | | cos(phi) \n\ | | \n\ / / \n\ 0 0 \ """ ucode_str = \ u("""\ 2⋅π π \n\ ⌠ ⌠ \n\ ⎮ ⎮ sin(θ) \n\ ⎮ ⎮ ────── dθ dφ\n\ ⎮ ⎮ cos(φ) \n\ ⌡ ⌡ \n\ 0 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_matrix(): # Empty Matrix expr = Matrix() ascii_str = "[]" unicode_str = "[]" assert pretty(expr) == ascii_str assert upretty(expr) == unicode_str expr = Matrix(2, 0, lambda i, j: 0) ascii_str = "[]" unicode_str = "[]" assert pretty(expr) == ascii_str assert upretty(expr) == unicode_str expr = Matrix(0, 2, lambda i, j: 0) ascii_str = "[]" unicode_str = "[]" assert pretty(expr) == ascii_str assert upretty(expr) == unicode_str expr = Matrix([[x**2 + 1, 1], [y, x + y]]) ascii_str_1 = \ """\ [ 2 ] [1 + x 1 ] [ ] [ y x + y]\ """ ascii_str_2 = \ """\ [ 2 ] [x + 1 1 ] [ ] [ y x + y]\ """ ucode_str_1 = \ u("""\ ⎡ 2 ⎤ ⎢1 + x 1 ⎥ ⎢ ⎥ ⎣ y x + y⎦\ """) ucode_str_2 = \ u("""\ ⎡ 2 ⎤ ⎢x + 1 1 ⎥ ⎢ ⎥ ⎣ y x + y⎦\ """) assert pretty(expr) in [ascii_str_1, ascii_str_2] assert upretty(expr) in [ucode_str_1, ucode_str_2] expr = Matrix([[x/y, y, th], [0, exp(I*k*ph), 1]]) ascii_str = \ """\ [x ] [- y theta] [y ] [ ] [ I*k*phi ] [0 e 1 ]\ """ ucode_str = \ u("""\ ⎡x ⎤ ⎢─ y θ⎥ ⎢y ⎥ ⎢ ⎥ ⎢ ⅈ⋅k⋅φ ⎥ ⎣0 ℯ 1⎦\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str unicode_str = \ u("""\ ⎡v̇_msc_00 0 0 ⎤ ⎢ ⎥ ⎢ 0 v̇_msc_01 0 ⎥ ⎢ ⎥ ⎣ 0 0 v̇_msc_02⎦\ """) expr = diag(*MatrixSymbol('vdot_msc',1,3)) assert upretty(expr) == unicode_str def test_pretty_ndim_arrays(): x, y, z, w = symbols("x y z w") for ArrayType in (ImmutableDenseNDimArray, ImmutableSparseNDimArray, MutableDenseNDimArray, MutableSparseNDimArray): # Basic: scalar array M = ArrayType(x) assert pretty(M) == "x" assert upretty(M) == "x" M = ArrayType([[1/x, y], [z, w]]) M1 = ArrayType([1/x, y, z]) M2 = tensorproduct(M1, M) M3 = tensorproduct(M, M) ascii_str = \ """\ [1 ]\n\ [- y]\n\ [x ]\n\ [ ]\n\ [z w]\ """ ucode_str = \ u("""\ ⎡1 ⎤\n\ ⎢─ y⎥\n\ ⎢x ⎥\n\ ⎢ ⎥\n\ ⎣z w⎦\ """) assert pretty(M) == ascii_str assert upretty(M) == ucode_str ascii_str = \ """\ [1 ]\n\ [- y z]\n\ [x ]\ """ ucode_str = \ u("""\ ⎡1 ⎤\n\ ⎢─ y z⎥\n\ ⎣x ⎦\ """) assert pretty(M1) == ascii_str assert upretty(M1) == ucode_str ascii_str = \ """\ [[1 y] ]\n\ [[-- -] [z ]]\n\ [[ 2 x] [ y 2 ] [- y*z]]\n\ [[x ] [ - y ] [x ]]\n\ [[ ] [ x ] [ ]]\n\ [[z w] [ ] [ 2 ]]\n\ [[- -] [y*z w*y] [z w*z]]\n\ [[x x] ]\ """ ucode_str = \ u("""\ ⎡⎡1 y⎤ ⎤\n\ ⎢⎢── ─⎥ ⎡z ⎤⎥\n\ ⎢⎢ 2 x⎥ ⎡ y 2 ⎤ ⎢─ y⋅z⎥⎥\n\ ⎢⎢x ⎥ ⎢ ─ y ⎥ ⎢x ⎥⎥\n\ ⎢⎢ ⎥ ⎢ x ⎥ ⎢ ⎥⎥\n\ ⎢⎢z w⎥ ⎢ ⎥ ⎢ 2 ⎥⎥\n\ ⎢⎢─ ─⎥ ⎣y⋅z w⋅y⎦ ⎣z w⋅z⎦⎥\n\ ⎣⎣x x⎦ ⎦\ """) assert pretty(M2) == ascii_str assert upretty(M2) == ucode_str ascii_str = \ """\ [ [1 y] ]\n\ [ [-- -] ]\n\ [ [ 2 x] [ y 2 ]]\n\ [ [x ] [ - y ]]\n\ [ [ ] [ x ]]\n\ [ [z w] [ ]]\n\ [ [- -] [y*z w*y]]\n\ [ [x x] ]\n\ [ ]\n\ [[z ] [ w ]]\n\ [[- y*z] [ - w*y]]\n\ [[x ] [ x ]]\n\ [[ ] [ ]]\n\ [[ 2 ] [ 2 ]]\n\ [[z w*z] [w*z w ]]\ """ ucode_str = \ u("""\ ⎡ ⎡1 y⎤ ⎤\n\ ⎢ ⎢── ─⎥ ⎥\n\ ⎢ ⎢ 2 x⎥ ⎡ y 2 ⎤⎥\n\ ⎢ ⎢x ⎥ ⎢ ─ y ⎥⎥\n\ ⎢ ⎢ ⎥ ⎢ x ⎥⎥\n\ ⎢ ⎢z w⎥ ⎢ ⎥⎥\n\ ⎢ ⎢─ ─⎥ ⎣y⋅z w⋅y⎦⎥\n\ ⎢ ⎣x x⎦ ⎥\n\ ⎢ ⎥\n\ ⎢⎡z ⎤ ⎡ w ⎤⎥\n\ ⎢⎢─ y⋅z⎥ ⎢ ─ w⋅y⎥⎥\n\ ⎢⎢x ⎥ ⎢ x ⎥⎥\n\ ⎢⎢ ⎥ ⎢ ⎥⎥\n\ ⎢⎢ 2 ⎥ ⎢ 2 ⎥⎥\n\ ⎣⎣z w⋅z⎦ ⎣w⋅z w ⎦⎦\ """) assert pretty(M3) == ascii_str assert upretty(M3) == ucode_str Mrow = ArrayType([[x, y, 1 / z]]) Mcolumn = ArrayType([[x], [y], [1 / z]]) Mcol2 = ArrayType([Mcolumn.tolist()]) ascii_str = \ """\ [[ 1]]\n\ [[x y -]]\n\ [[ z]]\ """ ucode_str = \ u("""\ ⎡⎡ 1⎤⎤\n\ ⎢⎢x y ─⎥⎥\n\ ⎣⎣ z⎦⎦\ """) assert pretty(Mrow) == ascii_str assert upretty(Mrow) == ucode_str ascii_str = \ """\ [x]\n\ [ ]\n\ [y]\n\ [ ]\n\ [1]\n\ [-]\n\ [z]\ """ ucode_str = \ u("""\ ⎡x⎤\n\ ⎢ ⎥\n\ ⎢y⎥\n\ ⎢ ⎥\n\ ⎢1⎥\n\ ⎢─⎥\n\ ⎣z⎦\ """) assert pretty(Mcolumn) == ascii_str assert upretty(Mcolumn) == ucode_str ascii_str = \ """\ [[x]]\n\ [[ ]]\n\ [[y]]\n\ [[ ]]\n\ [[1]]\n\ [[-]]\n\ [[z]]\ """ ucode_str = \ u("""\ ⎡⎡x⎤⎤\n\ ⎢⎢ ⎥⎥\n\ ⎢⎢y⎥⎥\n\ ⎢⎢ ⎥⎥\n\ ⎢⎢1⎥⎥\n\ ⎢⎢─⎥⎥\n\ ⎣⎣z⎦⎦\ """) assert pretty(Mcol2) == ascii_str assert upretty(Mcol2) == ucode_str def test_tensor_TensorProduct(): A = MatrixSymbol("A", 3, 3) B = MatrixSymbol("B", 3, 3) assert upretty(TensorProduct(A, B)) == "A\u2297B" assert upretty(TensorProduct(A, B, A)) == "A\u2297B\u2297A" def test_diffgeom_print_WedgeProduct(): from sympy.diffgeom.rn import R2 from sympy.diffgeom import WedgeProduct wp = WedgeProduct(R2.dx, R2.dy) assert upretty(wp) == u("ⅆ x∧ⅆ y") def test_Adjoint(): X = MatrixSymbol('X', 2, 2) Y = MatrixSymbol('Y', 2, 2) assert pretty(Adjoint(X)) == " +\nX " assert pretty(Adjoint(X + Y)) == " +\n(X + Y) " assert pretty(Adjoint(X) + Adjoint(Y)) == " + +\nX + Y " assert pretty(Adjoint(X*Y)) == " +\n(X*Y) " assert pretty(Adjoint(Y)*Adjoint(X)) == " + +\nY *X " assert pretty(Adjoint(X**2)) == " +\n/ 2\\ \n\\X / " assert pretty(Adjoint(X)**2) == " 2\n/ +\\ \n\\X / " assert pretty(Adjoint(Inverse(X))) == " +\n/ -1\\ \n\\X / " assert pretty(Inverse(Adjoint(X))) == " -1\n/ +\\ \n\\X / " assert pretty(Adjoint(Transpose(X))) == " +\n/ T\\ \n\\X / " assert pretty(Transpose(Adjoint(X))) == " T\n/ +\\ \n\\X / " assert upretty(Adjoint(X)) == u" †\nX " assert upretty(Adjoint(X + Y)) == u" †\n(X + Y) " assert upretty(Adjoint(X) + Adjoint(Y)) == u" † †\nX + Y " assert upretty(Adjoint(X*Y)) == u" †\n(X⋅Y) " assert upretty(Adjoint(Y)*Adjoint(X)) == u" † †\nY ⋅X " assert upretty(Adjoint(X**2)) == \ u" †\n⎛ 2⎞ \n⎝X ⎠ " assert upretty(Adjoint(X)**2) == \ u" 2\n⎛ †⎞ \n⎝X ⎠ " assert upretty(Adjoint(Inverse(X))) == \ u" †\n⎛ -1⎞ \n⎝X ⎠ " assert upretty(Inverse(Adjoint(X))) == \ u" -1\n⎛ †⎞ \n⎝X ⎠ " assert upretty(Adjoint(Transpose(X))) == \ u" †\n⎛ T⎞ \n⎝X ⎠ " assert upretty(Transpose(Adjoint(X))) == \ u" T\n⎛ †⎞ \n⎝X ⎠ " def test_pretty_Trace_issue_9044(): X = Matrix([[1, 2], [3, 4]]) Y = Matrix([[2, 4], [6, 8]]) ascii_str_1 = \ """\ /[1 2]\\ tr|[ ]| \\[3 4]/\ """ ucode_str_1 = \ u("""\ ⎛⎡1 2⎤⎞ tr⎜⎢ ⎥⎟ ⎝⎣3 4⎦⎠\ """) ascii_str_2 = \ """\ /[1 2]\\ /[2 4]\\ tr|[ ]| + tr|[ ]| \\[3 4]/ \\[6 8]/\ """ ucode_str_2 = \ u("""\ ⎛⎡1 2⎤⎞ ⎛⎡2 4⎤⎞ tr⎜⎢ ⎥⎟ + tr⎜⎢ ⎥⎟ ⎝⎣3 4⎦⎠ ⎝⎣6 8⎦⎠\ """) assert pretty(Trace(X)) == ascii_str_1 assert upretty(Trace(X)) == ucode_str_1 assert pretty(Trace(X) + Trace(Y)) == ascii_str_2 assert upretty(Trace(X) + Trace(Y)) == ucode_str_2 def test_MatrixExpressions(): n = Symbol('n', integer=True) X = MatrixSymbol('X', n, n) assert pretty(X) == upretty(X) == "X" Y = X[1:2:3, 4:5:6] ascii_str = ucode_str = "X[1:3, 4:6]" assert pretty(Y) == ascii_str assert upretty(Y) == ucode_str Z = X[1:10:2] ascii_str = ucode_str = "X[1:10:2, :n]" assert pretty(Z) == ascii_str assert upretty(Z) == ucode_str # Apply function elementwise (`ElementwiseApplyFunc`): expr = (X.T*X).applyfunc(sin) ascii_str = """\ / T \\\n\ (d -> sin(d)).\\X *X/\ """ ucode_str = u("""\ ⎛ T ⎞\n\ (d ↦ sin(d))˳⎝X ⋅X⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str lamda = Lambda(x, 1/x) expr = (n*X).applyfunc(lamda) ascii_str = """\ / 1\\ \n\ |d -> -|.(n*X)\n\ \\ d/ \ """ ucode_str = u("""\ ⎛ 1⎞ \n\ ⎜d ↦ ─⎟˳(n⋅X)\n\ ⎝ d⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_dotproduct(): from sympy.matrices import Matrix, MatrixSymbol from sympy.matrices.expressions.dotproduct import DotProduct n = symbols("n", integer=True) A = MatrixSymbol('A', n, 1) B = MatrixSymbol('B', n, 1) C = Matrix(1, 3, [1, 2, 3]) D = Matrix(1, 3, [1, 3, 4]) assert pretty(DotProduct(A, B)) == u"A*B" assert pretty(DotProduct(C, D)) == u"[1 2 3]*[1 3 4]" assert upretty(DotProduct(A, B)) == u"A⋅B" assert upretty(DotProduct(C, D)) == u"[1 2 3]⋅[1 3 4]" def test_pretty_piecewise(): expr = Piecewise((x, x < 1), (x**2, True)) ascii_str = \ """\ /x for x < 1\n\ | \n\ < 2 \n\ |x otherwise\n\ \\ \ """ ucode_str = \ u("""\ ⎧x for x < 1\n\ ⎪ \n\ ⎨ 2 \n\ ⎪x otherwise\n\ ⎩ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -Piecewise((x, x < 1), (x**2, True)) ascii_str = \ """\ //x for x < 1\\\n\ || |\n\ -|< 2 |\n\ ||x otherwise|\n\ \\\\ /\ """ ucode_str = \ u("""\ ⎛⎧x for x < 1⎞\n\ ⎜⎪ ⎟\n\ -⎜⎨ 2 ⎟\n\ ⎜⎪x otherwise⎟\n\ ⎝⎩ ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = x + Piecewise((x, x > 0), (y, True)) + Piecewise((x/y, x < 2), (y**2, x > 2), (1, True)) + 1 ascii_str = \ """\ //x \\ \n\ ||- for x < 2| \n\ ||y | \n\ //x for x > 0\\ || | \n\ x + |< | + |< 2 | + 1\n\ \\\\y otherwise/ ||y for x > 2| \n\ || | \n\ ||1 otherwise| \n\ \\\\ / \ """ ucode_str = \ u("""\ ⎛⎧x ⎞ \n\ ⎜⎪─ for x < 2⎟ \n\ ⎜⎪y ⎟ \n\ ⎛⎧x for x > 0⎞ ⎜⎪ ⎟ \n\ x + ⎜⎨ ⎟ + ⎜⎨ 2 ⎟ + 1\n\ ⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟ \n\ ⎜⎪ ⎟ \n\ ⎜⎪1 otherwise⎟ \n\ ⎝⎩ ⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = x - Piecewise((x, x > 0), (y, True)) + Piecewise((x/y, x < 2), (y**2, x > 2), (1, True)) + 1 ascii_str = \ """\ //x \\ \n\ ||- for x < 2| \n\ ||y | \n\ //x for x > 0\\ || | \n\ x - |< | + |< 2 | + 1\n\ \\\\y otherwise/ ||y for x > 2| \n\ || | \n\ ||1 otherwise| \n\ \\\\ / \ """ ucode_str = \ u("""\ ⎛⎧x ⎞ \n\ ⎜⎪─ for x < 2⎟ \n\ ⎜⎪y ⎟ \n\ ⎛⎧x for x > 0⎞ ⎜⎪ ⎟ \n\ x - ⎜⎨ ⎟ + ⎜⎨ 2 ⎟ + 1\n\ ⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟ \n\ ⎜⎪ ⎟ \n\ ⎜⎪1 otherwise⎟ \n\ ⎝⎩ ⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = x*Piecewise((x, x > 0), (y, True)) ascii_str = \ """\ //x for x > 0\\\n\ x*|< |\n\ \\\\y otherwise/\ """ ucode_str = \ u("""\ ⎛⎧x for x > 0⎞\n\ x⋅⎜⎨ ⎟\n\ ⎝⎩y otherwise⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Piecewise((x, x > 0), (y, True))*Piecewise((x/y, x < 2), (y**2, x > 2), (1, True)) ascii_str = \ """\ //x \\\n\ ||- for x < 2|\n\ ||y |\n\ //x for x > 0\\ || |\n\ |< |*|< 2 |\n\ \\\\y otherwise/ ||y for x > 2|\n\ || |\n\ ||1 otherwise|\n\ \\\\ /\ """ ucode_str = \ u("""\ ⎛⎧x ⎞\n\ ⎜⎪─ for x < 2⎟\n\ ⎜⎪y ⎟\n\ ⎛⎧x for x > 0⎞ ⎜⎪ ⎟\n\ ⎜⎨ ⎟⋅⎜⎨ 2 ⎟\n\ ⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟\n\ ⎜⎪ ⎟\n\ ⎜⎪1 otherwise⎟\n\ ⎝⎩ ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -Piecewise((x, x > 0), (y, True))*Piecewise((x/y, x < 2), (y**2, x > 2), (1, True)) ascii_str = \ """\ //x \\\n\ ||- for x < 2|\n\ ||y |\n\ //x for x > 0\\ || |\n\ -|< |*|< 2 |\n\ \\\\y otherwise/ ||y for x > 2|\n\ || |\n\ ||1 otherwise|\n\ \\\\ /\ """ ucode_str = \ u("""\ ⎛⎧x ⎞\n\ ⎜⎪─ for x < 2⎟\n\ ⎜⎪y ⎟\n\ ⎛⎧x for x > 0⎞ ⎜⎪ ⎟\n\ -⎜⎨ ⎟⋅⎜⎨ 2 ⎟\n\ ⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟\n\ ⎜⎪ ⎟\n\ ⎜⎪1 otherwise⎟\n\ ⎝⎩ ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Piecewise((0, Abs(1/y) < 1), (1, Abs(y) < 1), (y*meijerg(((2, 1), ()), ((), (1, 0)), 1/y), True)) ascii_str = \ """\ / 1 \n\ | 0 for --- < 1\n\ | |y| \n\ | \n\ < 1 for |y| < 1\n\ | \n\ | __0, 2 /2, 1 | 1\\ \n\ |y*/__ | | -| otherwise \n\ \\ \\_|2, 2 \\ 1, 0 | y/ \ """ ucode_str = \ u("""\ ⎧ 1 \n\ ⎪ 0 for ─── < 1\n\ ⎪ │y│ \n\ ⎪ \n\ ⎨ 1 for │y│ < 1\n\ ⎪ \n\ ⎪ ╭─╮0, 2 ⎛2, 1 │ 1⎞ \n\ ⎪y⋅│╶┐ ⎜ │ ─⎟ otherwise \n\ ⎩ ╰─╯2, 2 ⎝ 1, 0 │ y⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str # XXX: We have to use evaluate=False here because Piecewise._eval_power # denests the power. expr = Pow(Piecewise((x, x > 0), (y, True)), 2, evaluate=False) ascii_str = \ """\ 2\n\ //x for x > 0\\ \n\ |< | \n\ \\\\y otherwise/ \ """ ucode_str = \ u("""\ 2\n\ ⎛⎧x for x > 0⎞ \n\ ⎜⎨ ⎟ \n\ ⎝⎩y otherwise⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_ITE(): expr = ITE(x, y, z) assert pretty(expr) == ( '/y for x \n' '< \n' '\\z otherwise' ) assert upretty(expr) == u("""\ ⎧y for x \n\ ⎨ \n\ ⎩z otherwise\ """) def test_pretty_seq(): expr = () ascii_str = \ """\ ()\ """ ucode_str = \ u("""\ ()\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = [] ascii_str = \ """\ []\ """ ucode_str = \ u("""\ []\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = {} expr_2 = {} ascii_str = \ """\ {}\ """ ucode_str = \ u("""\ {}\ """) assert pretty(expr) == ascii_str assert pretty(expr_2) == ascii_str assert upretty(expr) == ucode_str assert upretty(expr_2) == ucode_str expr = (1/x,) ascii_str = \ """\ 1 \n\ (-,)\n\ x \ """ ucode_str = \ u("""\ ⎛1 ⎞\n\ ⎜─,⎟\n\ ⎝x ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = [x**2, 1/x, x, y, sin(th)**2/cos(ph)**2] ascii_str = \ """\ 2 \n\ 2 1 sin (theta) \n\ [x , -, x, y, -----------]\n\ x 2 \n\ cos (phi) \ """ ucode_str = \ u("""\ ⎡ 2 ⎤\n\ ⎢ 2 1 sin (θ)⎥\n\ ⎢x , ─, x, y, ───────⎥\n\ ⎢ x 2 ⎥\n\ ⎣ cos (φ)⎦\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (x**2, 1/x, x, y, sin(th)**2/cos(ph)**2) ascii_str = \ """\ 2 \n\ 2 1 sin (theta) \n\ (x , -, x, y, -----------)\n\ x 2 \n\ cos (phi) \ """ ucode_str = \ u("""\ ⎛ 2 ⎞\n\ ⎜ 2 1 sin (θ)⎟\n\ ⎜x , ─, x, y, ───────⎟\n\ ⎜ x 2 ⎟\n\ ⎝ cos (φ)⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Tuple(x**2, 1/x, x, y, sin(th)**2/cos(ph)**2) ascii_str = \ """\ 2 \n\ 2 1 sin (theta) \n\ (x , -, x, y, -----------)\n\ x 2 \n\ cos (phi) \ """ ucode_str = \ u("""\ ⎛ 2 ⎞\n\ ⎜ 2 1 sin (θ)⎟\n\ ⎜x , ─, x, y, ───────⎟\n\ ⎜ x 2 ⎟\n\ ⎝ cos (φ)⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = {x: sin(x)} expr_2 = Dict({x: sin(x)}) ascii_str = \ """\ {x: sin(x)}\ """ ucode_str = \ u("""\ {x: sin(x)}\ """) assert pretty(expr) == ascii_str assert pretty(expr_2) == ascii_str assert upretty(expr) == ucode_str assert upretty(expr_2) == ucode_str expr = {1/x: 1/y, x: sin(x)**2} expr_2 = Dict({1/x: 1/y, x: sin(x)**2}) ascii_str = \ """\ 1 1 2 \n\ {-: -, x: sin (x)}\n\ x y \ """ ucode_str = \ u("""\ ⎧1 1 2 ⎫\n\ ⎨─: ─, x: sin (x)⎬\n\ ⎩x y ⎭\ """) assert pretty(expr) == ascii_str assert pretty(expr_2) == ascii_str assert upretty(expr) == ucode_str assert upretty(expr_2) == ucode_str # There used to be a bug with pretty-printing sequences of even height. expr = [x**2] ascii_str = \ """\ 2 \n\ [x ]\ """ ucode_str = \ u("""\ ⎡ 2⎤\n\ ⎣x ⎦\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (x**2,) ascii_str = \ """\ 2 \n\ (x ,)\ """ ucode_str = \ u("""\ ⎛ 2 ⎞\n\ ⎝x ,⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Tuple(x**2) ascii_str = \ """\ 2 \n\ (x ,)\ """ ucode_str = \ u("""\ ⎛ 2 ⎞\n\ ⎝x ,⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = {x**2: 1} expr_2 = Dict({x**2: 1}) ascii_str = \ """\ 2 \n\ {x : 1}\ """ ucode_str = \ u("""\ ⎧ 2 ⎫\n\ ⎨x : 1⎬\n\ ⎩ ⎭\ """) assert pretty(expr) == ascii_str assert pretty(expr_2) == ascii_str assert upretty(expr) == ucode_str assert upretty(expr_2) == ucode_str def test_any_object_in_sequence(): # Cf. issue 5306 b1 = Basic() b2 = Basic(Basic()) expr = [b2, b1] assert pretty(expr) == "[Basic(Basic()), Basic()]" assert upretty(expr) == u"[Basic(Basic()), Basic()]" expr = {b2, b1} assert pretty(expr) == "{Basic(), Basic(Basic())}" assert upretty(expr) == u"{Basic(), Basic(Basic())}" expr = {b2: b1, b1: b2} expr2 = Dict({b2: b1, b1: b2}) assert pretty(expr) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}" assert pretty( expr2) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}" assert upretty( expr) == u"{Basic(): Basic(Basic()), Basic(Basic()): Basic()}" assert upretty( expr2) == u"{Basic(): Basic(Basic()), Basic(Basic()): Basic()}" def test_print_builtin_set(): assert pretty(set()) == 'set()' assert upretty(set()) == u'set()' assert pretty(frozenset()) == 'frozenset()' assert upretty(frozenset()) == u'frozenset()' s1 = {1/x, x} s2 = frozenset(s1) assert pretty(s1) == \ """\ 1 \n\ {-, x} x \ """ assert upretty(s1) == \ u"""\ ⎧1 ⎫ ⎨─, x⎬ ⎩x ⎭\ """ assert pretty(s2) == \ """\ 1 \n\ frozenset({-, x}) x \ """ assert upretty(s2) == \ u"""\ ⎛⎧1 ⎫⎞ frozenset⎜⎨─, x⎬⎟ ⎝⎩x ⎭⎠\ """ def test_pretty_sets(): s = FiniteSet assert pretty(s(*[x*y, x**2])) == \ """\ 2 \n\ {x , x*y}\ """ assert pretty(s(*range(1, 6))) == "{1, 2, 3, 4, 5}" assert pretty(s(*range(1, 13))) == "{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}" assert pretty(set([x*y, x**2])) == \ """\ 2 \n\ {x , x*y}\ """ assert pretty(set(range(1, 6))) == "{1, 2, 3, 4, 5}" assert pretty(set(range(1, 13))) == \ "{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}" assert pretty(frozenset([x*y, x**2])) == \ """\ 2 \n\ frozenset({x , x*y})\ """ assert pretty(frozenset(range(1, 6))) == "frozenset({1, 2, 3, 4, 5})" assert pretty(frozenset(range(1, 13))) == \ "frozenset({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})" assert pretty(Range(0, 3, 1)) == '{0, 1, 2}' ascii_str = '{0, 1, ..., 29}' ucode_str = u'{0, 1, …, 29}' assert pretty(Range(0, 30, 1)) == ascii_str assert upretty(Range(0, 30, 1)) == ucode_str ascii_str = '{30, 29, ..., 2}' ucode_str = u('{30, 29, …, 2}') assert pretty(Range(30, 1, -1)) == ascii_str assert upretty(Range(30, 1, -1)) == ucode_str ascii_str = '{0, 2, ...}' ucode_str = u'{0, 2, …}' assert pretty(Range(0, oo, 2)) == ascii_str assert upretty(Range(0, oo, 2)) == ucode_str ascii_str = '{..., 2, 0}' ucode_str = u('{…, 2, 0}') assert pretty(Range(oo, -2, -2)) == ascii_str assert upretty(Range(oo, -2, -2)) == ucode_str ascii_str = '{-2, -3, ...}' ucode_str = u('{-2, -3, …}') assert pretty(Range(-2, -oo, -1)) == ascii_str assert upretty(Range(-2, -oo, -1)) == ucode_str def test_pretty_SetExpr(): iv = Interval(1, 3) se = SetExpr(iv) ascii_str = "SetExpr([1, 3])" ucode_str = u("SetExpr([1, 3])") assert pretty(se) == ascii_str assert upretty(se) == ucode_str def test_pretty_ImageSet(): imgset = ImageSet(Lambda((x, y), x + y), {1, 2, 3}, {3, 4}) ascii_str = '{x + y | x in {1, 2, 3} , y in {3, 4}}' ucode_str = u('{x + y | x ∊ {1, 2, 3} , y ∊ {3, 4}}') assert pretty(imgset) == ascii_str assert upretty(imgset) == ucode_str imgset = ImageSet(Lambda(((x, y),), x + y), ProductSet({1, 2, 3}, {3, 4})) ascii_str = '{x + y | (x, y) in {1, 2, 3} x {3, 4}}' ucode_str = u('{x + y | (x, y) ∊ {1, 2, 3} × {3, 4}}') assert pretty(imgset) == ascii_str assert upretty(imgset) == ucode_str imgset = ImageSet(Lambda(x, x**2), S.Naturals) ascii_str = \ ' 2 \n'\ '{x | x in Naturals}' ucode_str = u('''\ ⎧ 2 ⎫\n\ ⎨x | x ∊ ℕ⎬\n\ ⎩ ⎭''') assert pretty(imgset) == ascii_str assert upretty(imgset) == ucode_str def test_pretty_ConditionSet(): from sympy import ConditionSet ascii_str = '{x | x in (-oo, oo) and sin(x) = 0}' ucode_str = u'{x | x ∊ ℝ ∧ (sin(x) = 0)}' assert pretty(ConditionSet(x, Eq(sin(x), 0), S.Reals)) == ascii_str assert upretty(ConditionSet(x, Eq(sin(x), 0), S.Reals)) == ucode_str assert pretty(ConditionSet(x, Contains(x, S.Reals, evaluate=False), FiniteSet(1))) == '{1}' assert upretty(ConditionSet(x, Contains(x, S.Reals, evaluate=False), FiniteSet(1))) == u'{1}' assert pretty(ConditionSet(x, And(x > 1, x < -1), FiniteSet(1, 2, 3))) == "EmptySet" assert upretty(ConditionSet(x, And(x > 1, x < -1), FiniteSet(1, 2, 3))) == u"∅" assert pretty(ConditionSet(x, Or(x > 1, x < -1), FiniteSet(1, 2))) == '{2}' assert upretty(ConditionSet(x, Or(x > 1, x < -1), FiniteSet(1, 2))) == u'{2}' def test_pretty_ComplexRegion(): from sympy import ComplexRegion ucode_str = u'{x + y⋅ⅈ | x, y ∊ [3, 5] × [4, 6]}' assert upretty(ComplexRegion(Interval(3, 5)*Interval(4, 6))) == ucode_str ucode_str = u'{r⋅(ⅈ⋅sin(θ) + cos(θ)) | r, θ ∊ [0, 1] × [0, 2⋅π)}' assert upretty(ComplexRegion(Interval(0, 1)*Interval(0, 2*pi), polar=True)) == ucode_str def test_pretty_Union_issue_10414(): a, b = Interval(2, 3), Interval(4, 7) ucode_str = u'[2, 3] ∪ [4, 7]' ascii_str = '[2, 3] U [4, 7]' assert upretty(Union(a, b)) == ucode_str assert pretty(Union(a, b)) == ascii_str def test_pretty_Intersection_issue_10414(): x, y, z, w = symbols('x, y, z, w') a, b = Interval(x, y), Interval(z, w) ucode_str = u'[x, y] ∩ [z, w]' ascii_str = '[x, y] n [z, w]' assert upretty(Intersection(a, b)) == ucode_str assert pretty(Intersection(a, b)) == ascii_str def test_ProductSet_exponent(): ucode_str = ' 1\n[0, 1] ' assert upretty(Interval(0, 1)**1) == ucode_str ucode_str = ' 2\n[0, 1] ' assert upretty(Interval(0, 1)**2) == ucode_str def test_ProductSet_parenthesis(): ucode_str = u'([4, 7] × {1, 2}) ∪ ([2, 3] × [4, 7])' a, b = Interval(2, 3), Interval(4, 7) assert upretty(Union(a*b, b*FiniteSet(1, 2))) == ucode_str def test_ProductSet_prod_char_issue_10413(): ascii_str = '[2, 3] x [4, 7]' ucode_str = u'[2, 3] × [4, 7]' a, b = Interval(2, 3), Interval(4, 7) assert pretty(a*b) == ascii_str assert upretty(a*b) == ucode_str def test_pretty_sequences(): s1 = SeqFormula(a**2, (0, oo)) s2 = SeqPer((1, 2)) ascii_str = '[0, 1, 4, 9, ...]' ucode_str = u'[0, 1, 4, 9, …]' assert pretty(s1) == ascii_str assert upretty(s1) == ucode_str ascii_str = '[1, 2, 1, 2, ...]' ucode_str = u'[1, 2, 1, 2, …]' assert pretty(s2) == ascii_str assert upretty(s2) == ucode_str s3 = SeqFormula(a**2, (0, 2)) s4 = SeqPer((1, 2), (0, 2)) ascii_str = '[0, 1, 4]' ucode_str = u'[0, 1, 4]' assert pretty(s3) == ascii_str assert upretty(s3) == ucode_str ascii_str = '[1, 2, 1]' ucode_str = u'[1, 2, 1]' assert pretty(s4) == ascii_str assert upretty(s4) == ucode_str s5 = SeqFormula(a**2, (-oo, 0)) s6 = SeqPer((1, 2), (-oo, 0)) ascii_str = '[..., 9, 4, 1, 0]' ucode_str = u'[…, 9, 4, 1, 0]' assert pretty(s5) == ascii_str assert upretty(s5) == ucode_str ascii_str = '[..., 2, 1, 2, 1]' ucode_str = u'[…, 2, 1, 2, 1]' assert pretty(s6) == ascii_str assert upretty(s6) == ucode_str ascii_str = '[1, 3, 5, 11, ...]' ucode_str = u'[1, 3, 5, 11, …]' assert pretty(SeqAdd(s1, s2)) == ascii_str assert upretty(SeqAdd(s1, s2)) == ucode_str ascii_str = '[1, 3, 5]' ucode_str = u'[1, 3, 5]' assert pretty(SeqAdd(s3, s4)) == ascii_str assert upretty(SeqAdd(s3, s4)) == ucode_str ascii_str = '[..., 11, 5, 3, 1]' ucode_str = u'[…, 11, 5, 3, 1]' assert pretty(SeqAdd(s5, s6)) == ascii_str assert upretty(SeqAdd(s5, s6)) == ucode_str ascii_str = '[0, 2, 4, 18, ...]' ucode_str = u'[0, 2, 4, 18, …]' assert pretty(SeqMul(s1, s2)) == ascii_str assert upretty(SeqMul(s1, s2)) == ucode_str ascii_str = '[0, 2, 4]' ucode_str = u'[0, 2, 4]' assert pretty(SeqMul(s3, s4)) == ascii_str assert upretty(SeqMul(s3, s4)) == ucode_str ascii_str = '[..., 18, 4, 2, 0]' ucode_str = u'[…, 18, 4, 2, 0]' assert pretty(SeqMul(s5, s6)) == ascii_str assert upretty(SeqMul(s5, s6)) == ucode_str # Sequences with symbolic limits, issue 12629 s7 = SeqFormula(a**2, (a, 0, x)) raises(NotImplementedError, lambda: pretty(s7)) raises(NotImplementedError, lambda: upretty(s7)) b = Symbol('b') s8 = SeqFormula(b*a**2, (a, 0, 2)) ascii_str = u'[0, b, 4*b]' ucode_str = u'[0, b, 4⋅b]' assert pretty(s8) == ascii_str assert upretty(s8) == ucode_str def test_pretty_FourierSeries(): f = fourier_series(x, (x, -pi, pi)) ascii_str = \ """\ 2*sin(3*x) \n\ 2*sin(x) - sin(2*x) + ---------- + ...\n\ 3 \ """ ucode_str = \ u("""\ 2⋅sin(3⋅x) \n\ 2⋅sin(x) - sin(2⋅x) + ────────── + …\n\ 3 \ """) assert pretty(f) == ascii_str assert upretty(f) == ucode_str def test_pretty_FormalPowerSeries(): f = fps(log(1 + x)) ascii_str = \ """\ oo \n\ ____ \n\ \\ ` \n\ \\ -k k \n\ \\ -(-1) *x \n\ / -----------\n\ / k \n\ /___, \n\ k = 1 \ """ ucode_str = \ u("""\ ∞ \n\ ____ \n\ ╲ \n\ ╲ -k k \n\ ╲ -(-1) ⋅x \n\ ╱ ───────────\n\ ╱ k \n\ ╱ \n\ ‾‾‾‾ \n\ k = 1 \ """) assert pretty(f) == ascii_str assert upretty(f) == ucode_str def test_pretty_limits(): expr = Limit(x, x, oo) ascii_str = \ """\ lim x\n\ x->oo \ """ ucode_str = \ u("""\ lim x\n\ x─→∞ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(x**2, x, 0) ascii_str = \ """\ 2\n\ lim x \n\ x->0+ \ """ ucode_str = \ u("""\ 2\n\ lim x \n\ x─→0⁺ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(1/x, x, 0) ascii_str = \ """\ 1\n\ lim -\n\ x->0+x\ """ ucode_str = \ u("""\ 1\n\ lim ─\n\ x─→0⁺x\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(sin(x)/x, x, 0) ascii_str = \ """\ /sin(x)\\\n\ lim |------|\n\ x->0+\\ x /\ """ ucode_str = \ u("""\ ⎛sin(x)⎞\n\ lim ⎜──────⎟\n\ x─→0⁺⎝ x ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(sin(x)/x, x, 0, "-") ascii_str = \ """\ /sin(x)\\\n\ lim |------|\n\ x->0-\\ x /\ """ ucode_str = \ u("""\ ⎛sin(x)⎞\n\ lim ⎜──────⎟\n\ x─→0⁻⎝ x ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(x + sin(x), x, 0) ascii_str = \ """\ lim (x + sin(x))\n\ x->0+ \ """ ucode_str = \ u("""\ lim (x + sin(x))\n\ x─→0⁺ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(x, x, 0)**2 ascii_str = \ """\ 2\n\ / lim x\\ \n\ \\x->0+ / \ """ ucode_str = \ u("""\ 2\n\ ⎛ lim x⎞ \n\ ⎝x─→0⁺ ⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(x*Limit(y/2,y,0), x, 0) ascii_str = \ """\ / /y\\\\\n\ lim |x* lim |-||\n\ x->0+\\ y->0+\\2//\ """ ucode_str = \ u("""\ ⎛ ⎛y⎞⎞\n\ lim ⎜x⋅ lim ⎜─⎟⎟\n\ x─→0⁺⎝ y─→0⁺⎝2⎠⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = 2*Limit(x*Limit(y/2,y,0), x, 0) ascii_str = \ """\ / /y\\\\\n\ 2* lim |x* lim |-||\n\ x->0+\\ y->0+\\2//\ """ ucode_str = \ u("""\ ⎛ ⎛y⎞⎞\n\ 2⋅ lim ⎜x⋅ lim ⎜─⎟⎟\n\ x─→0⁺⎝ y─→0⁺⎝2⎠⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Limit(sin(x), x, 0, dir='+-') ascii_str = \ """\ lim sin(x)\n\ x->0 \ """ ucode_str = \ u("""\ lim sin(x)\n\ x─→0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_ComplexRootOf(): expr = rootof(x**5 + 11*x - 2, 0) ascii_str = \ """\ / 5 \\\n\ CRootOf\\x + 11*x - 2, 0/\ """ ucode_str = \ u("""\ ⎛ 5 ⎞\n\ CRootOf⎝x + 11⋅x - 2, 0⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_RootSum(): expr = RootSum(x**5 + 11*x - 2, auto=False) ascii_str = \ """\ / 5 \\\n\ RootSum\\x + 11*x - 2/\ """ ucode_str = \ u("""\ ⎛ 5 ⎞\n\ RootSum⎝x + 11⋅x - 2⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = RootSum(x**5 + 11*x - 2, Lambda(z, exp(z))) ascii_str = \ """\ / 5 z\\\n\ RootSum\\x + 11*x - 2, z -> e /\ """ ucode_str = \ u("""\ ⎛ 5 z⎞\n\ RootSum⎝x + 11⋅x - 2, z ↦ ℯ ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_GroebnerBasis(): expr = groebner([], x, y) ascii_str = \ """\ GroebnerBasis([], x, y, domain=ZZ, order=lex)\ """ ucode_str = \ u("""\ GroebnerBasis([], x, y, domain=ℤ, order=lex)\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1] expr = groebner(F, x, y, order='grlex') ascii_str = \ """\ /[ 2 2 ] \\\n\ GroebnerBasis\\[x - x - 3*y + 1, y - 2*x + y - 1], x, y, domain=ZZ, order=grlex/\ """ ucode_str = \ u("""\ ⎛⎡ 2 2 ⎤ ⎞\n\ GroebnerBasis⎝⎣x - x - 3⋅y + 1, y - 2⋅x + y - 1⎦, x, y, domain=ℤ, order=grlex⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = expr.fglm('lex') ascii_str = \ """\ /[ 2 4 3 2 ] \\\n\ GroebnerBasis\\[2*x - y - y + 1, y + 2*y - 3*y - 16*y + 7], x, y, domain=ZZ, order=lex/\ """ ucode_str = \ u("""\ ⎛⎡ 2 4 3 2 ⎤ ⎞\n\ GroebnerBasis⎝⎣2⋅x - y - y + 1, y + 2⋅y - 3⋅y - 16⋅y + 7⎦, x, y, domain=ℤ, order=lex⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_UniversalSet(): assert pretty(S.UniversalSet) == "UniversalSet" assert upretty(S.UniversalSet) == u'𝕌' def test_pretty_Boolean(): expr = Not(x, evaluate=False) assert pretty(expr) == "Not(x)" assert upretty(expr) == u"¬x" expr = And(x, y) assert pretty(expr) == "And(x, y)" assert upretty(expr) == u"x ∧ y" expr = Or(x, y) assert pretty(expr) == "Or(x, y)" assert upretty(expr) == u"x ∨ y" syms = symbols('a:f') expr = And(*syms) assert pretty(expr) == "And(a, b, c, d, e, f)" assert upretty(expr) == u"a ∧ b ∧ c ∧ d ∧ e ∧ f" expr = Or(*syms) assert pretty(expr) == "Or(a, b, c, d, e, f)" assert upretty(expr) == u"a ∨ b ∨ c ∨ d ∨ e ∨ f" expr = Xor(x, y, evaluate=False) assert pretty(expr) == "Xor(x, y)" assert upretty(expr) == u"x ⊻ y" expr = Nand(x, y, evaluate=False) assert pretty(expr) == "Nand(x, y)" assert upretty(expr) == u"x ⊼ y" expr = Nor(x, y, evaluate=False) assert pretty(expr) == "Nor(x, y)" assert upretty(expr) == u"x ⊽ y" expr = Implies(x, y, evaluate=False) assert pretty(expr) == "Implies(x, y)" assert upretty(expr) == u"x → y" # don't sort args expr = Implies(y, x, evaluate=False) assert pretty(expr) == "Implies(y, x)" assert upretty(expr) == u"y → x" expr = Equivalent(x, y, evaluate=False) assert pretty(expr) == "Equivalent(x, y)" assert upretty(expr) == u"x ⇔ y" expr = Equivalent(y, x, evaluate=False) assert pretty(expr) == "Equivalent(x, y)" assert upretty(expr) == u"x ⇔ y" def test_pretty_Domain(): expr = FF(23) assert pretty(expr) == "GF(23)" assert upretty(expr) == u"ℤ₂₃" expr = ZZ assert pretty(expr) == "ZZ" assert upretty(expr) == u"ℤ" expr = QQ assert pretty(expr) == "QQ" assert upretty(expr) == u"ℚ" expr = RR assert pretty(expr) == "RR" assert upretty(expr) == u"ℝ" expr = QQ[x] assert pretty(expr) == "QQ[x]" assert upretty(expr) == u"ℚ[x]" expr = QQ[x, y] assert pretty(expr) == "QQ[x, y]" assert upretty(expr) == u"ℚ[x, y]" expr = ZZ.frac_field(x) assert pretty(expr) == "ZZ(x)" assert upretty(expr) == u"ℤ(x)" expr = ZZ.frac_field(x, y) assert pretty(expr) == "ZZ(x, y)" assert upretty(expr) == u"ℤ(x, y)" expr = QQ.poly_ring(x, y, order=grlex) assert pretty(expr) == "QQ[x, y, order=grlex]" assert upretty(expr) == u"ℚ[x, y, order=grlex]" expr = QQ.poly_ring(x, y, order=ilex) assert pretty(expr) == "QQ[x, y, order=ilex]" assert upretty(expr) == u"ℚ[x, y, order=ilex]" def test_pretty_prec(): assert xpretty(S("0.3"), full_prec=True, wrap_line=False) == "0.300000000000000" assert xpretty(S("0.3"), full_prec="auto", wrap_line=False) == "0.300000000000000" assert xpretty(S("0.3"), full_prec=False, wrap_line=False) == "0.3" assert xpretty(S("0.3")*x, full_prec=True, use_unicode=False, wrap_line=False) in [ "0.300000000000000*x", "x*0.300000000000000" ] assert xpretty(S("0.3")*x, full_prec="auto", use_unicode=False, wrap_line=False) in [ "0.3*x", "x*0.3" ] assert xpretty(S("0.3")*x, full_prec=False, use_unicode=False, wrap_line=False) in [ "0.3*x", "x*0.3" ] def test_pprint(): import sys from sympy.core.compatibility import StringIO fd = StringIO() sso = sys.stdout sys.stdout = fd try: pprint(pi, use_unicode=False, wrap_line=False) finally: sys.stdout = sso assert fd.getvalue() == 'pi\n' def test_pretty_class(): """Test that the printer dispatcher correctly handles classes.""" class C: pass # C has no .__class__ and this was causing problems class D(object): pass assert pretty( C ) == str( C ) assert pretty( D ) == str( D ) def test_pretty_no_wrap_line(): huge_expr = 0 for i in range(20): huge_expr += i*sin(i + x) assert xpretty(huge_expr ).find('\n') != -1 assert xpretty(huge_expr, wrap_line=False).find('\n') == -1 def test_settings(): raises(TypeError, lambda: pretty(S(4), method="garbage")) def test_pretty_sum(): from sympy.abc import x, a, b, k, m, n expr = Sum(k**k, (k, 0, n)) ascii_str = \ """\ n \n\ ___ \n\ \\ ` \n\ \\ k\n\ / k \n\ /__, \n\ k = 0 \ """ ucode_str = \ u("""\ n \n\ ___ \n\ ╲ \n\ ╲ k\n\ ╱ k \n\ ╱ \n\ ‾‾‾ \n\ k = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(k**k, (k, oo, n)) ascii_str = \ """\ n \n\ ___ \n\ \\ ` \n\ \\ k\n\ / k \n\ /__, \n\ k = oo \ """ ucode_str = \ u("""\ n \n\ ___ \n\ ╲ \n\ ╲ k\n\ ╱ k \n\ ╱ \n\ ‾‾‾ \n\ k = ∞ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(k**(Integral(x**n, (x, -oo, oo))), (k, 0, n**n)) ascii_str = \ """\ n \n\ n \n\ ______ \n\ \\ ` \n\ \\ oo \n\ \\ / \n\ \\ | \n\ \\ | n \n\ ) | x dx\n\ / | \n\ / / \n\ / -oo \n\ / k \n\ /_____, \n\ k = 0 \ """ ucode_str = \ u("""\ n \n\ n \n\ ______ \n\ ╲ \n\ ╲ \n\ ╲ ∞ \n\ ╲ ⌠ \n\ ╲ ⎮ n \n\ ╱ ⎮ x dx\n\ ╱ ⌡ \n\ ╱ -∞ \n\ ╱ k \n\ ╱ \n\ ‾‾‾‾‾‾ \n\ k = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(k**( Integral(x**n, (x, -oo, oo))), (k, 0, Integral(x**x, (x, -oo, oo)))) ascii_str = \ """\ oo \n\ / \n\ | \n\ | x \n\ | x dx \n\ | \n\ / \n\ -oo \n\ ______ \n\ \\ ` \n\ \\ oo \n\ \\ / \n\ \\ | \n\ \\ | n \n\ ) | x dx\n\ / | \n\ / / \n\ / -oo \n\ / k \n\ /_____, \n\ k = 0 \ """ ucode_str = \ u("""\ ∞ \n\ ⌠ \n\ ⎮ x \n\ ⎮ x dx \n\ ⌡ \n\ -∞ \n\ ______ \n\ ╲ \n\ ╲ \n\ ╲ ∞ \n\ ╲ ⌠ \n\ ╲ ⎮ n \n\ ╱ ⎮ x dx\n\ ╱ ⌡ \n\ ╱ -∞ \n\ ╱ k \n\ ╱ \n\ ‾‾‾‾‾‾ \n\ k = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(k**(Integral(x**n, (x, -oo, oo))), ( k, x + n + x**2 + n**2 + (x/n) + (1/x), Integral(x**x, (x, -oo, oo)))) ascii_str = \ """\ oo \n\ / \n\ | \n\ | x \n\ | x dx \n\ | \n\ / \n\ -oo \n\ ______ \n\ \\ ` \n\ \\ oo \n\ \\ / \n\ \\ | \n\ \\ | n \n\ ) | x dx\n\ / | \n\ / / \n\ / -oo \n\ / k \n\ /_____, \n\ 2 2 1 x \n\ k = n + n + x + x + - + - \n\ x n \ """ ucode_str = \ u("""\ ∞ \n\ ⌠ \n\ ⎮ x \n\ ⎮ x dx \n\ ⌡ \n\ -∞ \n\ ______ \n\ ╲ \n\ ╲ \n\ ╲ ∞ \n\ ╲ ⌠ \n\ ╲ ⎮ n \n\ ╱ ⎮ x dx\n\ ╱ ⌡ \n\ ╱ -∞ \n\ ╱ k \n\ ╱ \n\ ‾‾‾‾‾‾ \n\ 2 2 1 x \n\ k = n + n + x + x + ─ + ─ \n\ x n \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(k**( Integral(x**n, (x, -oo, oo))), (k, 0, x + n + x**2 + n**2 + (x/n) + (1/x))) ascii_str = \ """\ 2 2 1 x \n\ n + n + x + x + - + - \n\ x n \n\ ______ \n\ \\ ` \n\ \\ oo \n\ \\ / \n\ \\ | \n\ \\ | n \n\ ) | x dx\n\ / | \n\ / / \n\ / -oo \n\ / k \n\ /_____, \n\ k = 0 \ """ ucode_str = \ u("""\ 2 2 1 x \n\ n + n + x + x + ─ + ─ \n\ x n \n\ ______ \n\ ╲ \n\ ╲ \n\ ╲ ∞ \n\ ╲ ⌠ \n\ ╲ ⎮ n \n\ ╱ ⎮ x dx\n\ ╱ ⌡ \n\ ╱ -∞ \n\ ╱ k \n\ ╱ \n\ ‾‾‾‾‾‾ \n\ k = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(x, (x, 0, oo)) ascii_str = \ """\ oo \n\ __ \n\ \\ ` \n\ ) x\n\ /_, \n\ x = 0 \ """ ucode_str = \ u("""\ ∞ \n\ ___ \n\ ╲ \n\ ╲ \n\ ╱ x\n\ ╱ \n\ ‾‾‾ \n\ x = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(x**2, (x, 0, oo)) ascii_str = \ u("""\ oo \n\ ___ \n\ \\ ` \n\ \\ 2\n\ / x \n\ /__, \n\ x = 0 \ """) ucode_str = \ u("""\ ∞ \n\ ___ \n\ ╲ \n\ ╲ 2\n\ ╱ x \n\ ╱ \n\ ‾‾‾ \n\ x = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(x/2, (x, 0, oo)) ascii_str = \ """\ oo \n\ ___ \n\ \\ ` \n\ \\ x\n\ ) -\n\ / 2\n\ /__, \n\ x = 0 \ """ ucode_str = \ u("""\ ∞ \n\ ____ \n\ ╲ \n\ ╲ \n\ ╲ x\n\ ╱ ─\n\ ╱ 2\n\ ╱ \n\ ‾‾‾‾ \n\ x = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(x**3/2, (x, 0, oo)) ascii_str = \ """\ oo \n\ ____ \n\ \\ ` \n\ \\ 3\n\ \\ x \n\ / --\n\ / 2 \n\ /___, \n\ x = 0 \ """ ucode_str = \ u("""\ ∞ \n\ ____ \n\ ╲ \n\ ╲ 3\n\ ╲ x \n\ ╱ ──\n\ ╱ 2 \n\ ╱ \n\ ‾‾‾‾ \n\ x = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum((x**3*y**(x/2))**n, (x, 0, oo)) ascii_str = \ """\ oo \n\ ____ \n\ \\ ` \n\ \\ n\n\ \\ / x\\ \n\ ) | -| \n\ / | 3 2| \n\ / \\x *y / \n\ /___, \n\ x = 0 \ """ ucode_str = \ u("""\ ∞ \n\ _____ \n\ ╲ \n\ ╲ \n\ ╲ n\n\ ╲ ⎛ x⎞ \n\ ╱ ⎜ ─⎟ \n\ ╱ ⎜ 3 2⎟ \n\ ╱ ⎝x ⋅y ⎠ \n\ ╱ \n\ ‾‾‾‾‾ \n\ x = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(1/x**2, (x, 0, oo)) ascii_str = \ """\ oo \n\ ____ \n\ \\ ` \n\ \\ 1 \n\ \\ --\n\ / 2\n\ / x \n\ /___, \n\ x = 0 \ """ ucode_str = \ u("""\ ∞ \n\ ____ \n\ ╲ \n\ ╲ 1 \n\ ╲ ──\n\ ╱ 2\n\ ╱ x \n\ ╱ \n\ ‾‾‾‾ \n\ x = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(1/y**(a/b), (x, 0, oo)) ascii_str = \ """\ oo \n\ ____ \n\ \\ ` \n\ \\ -a \n\ \\ ---\n\ / b \n\ / y \n\ /___, \n\ x = 0 \ """ ucode_str = \ u("""\ ∞ \n\ ____ \n\ ╲ \n\ ╲ -a \n\ ╲ ───\n\ ╱ b \n\ ╱ y \n\ ╱ \n\ ‾‾‾‾ \n\ x = 0 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Sum(1/y**(a/b), (x, 0, oo), (y, 1, 2)) ascii_str = \ """\ 2 oo \n\ ____ ____ \n\ \\ ` \\ ` \n\ \\ \\ -a\n\ \\ \\ --\n\ / / b \n\ / / y \n\ /___, /___, \n\ y = 1 x = 0 \ """ ucode_str = \ u("""\ 2 ∞ \n\ ____ ____ \n\ ╲ ╲ \n\ ╲ ╲ -a\n\ ╲ ╲ ──\n\ ╱ ╱ b \n\ ╱ ╱ y \n\ ╱ ╱ \n\ ‾‾‾‾ ‾‾‾‾ \n\ y = 1 x = 0 \ """) expr = Sum(1/(1 + 1/( 1 + 1/k)) + 1, (k, 111, 1 + 1/n), (k, 1/(1 + m), oo)) + 1/(1 + 1/k) ascii_str = \ """\ 1 \n\ 1 + - \n\ oo n \n\ _____ _____ \n\ \\ ` \\ ` \n\ \\ \\ / 1 \\ \n\ \\ \\ |1 + ---------| \n\ \\ \\ | 1 | 1 \n\ ) ) | 1 + -----| + -----\n\ / / | 1| 1\n\ / / | 1 + -| 1 + -\n\ / / \\ k/ k\n\ /____, /____, \n\ 1 k = 111 \n\ k = ----- \n\ m + 1 \ """ ucode_str = \ u("""\ 1 \n\ 1 + ─ \n\ ∞ n \n\ ______ ______ \n\ ╲ ╲ \n\ ╲ ╲ \n\ ╲ ╲ ⎛ 1 ⎞ \n\ ╲ ╲ ⎜1 + ─────────⎟ \n\ ╲ ╲ ⎜ 1 ⎟ 1 \n\ ╱ ╱ ⎜ 1 + ─────⎟ + ─────\n\ ╱ ╱ ⎜ 1⎟ 1\n\ ╱ ╱ ⎜ 1 + ─⎟ 1 + ─\n\ ╱ ╱ ⎝ k⎠ k\n\ ╱ ╱ \n\ ‾‾‾‾‾‾ ‾‾‾‾‾‾ \n\ 1 k = 111 \n\ k = ───── \n\ m + 1 \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_units(): expr = joule ascii_str1 = \ """\ 2\n\ kilogram*meter \n\ ---------------\n\ 2 \n\ second \ """ unicode_str1 = \ u("""\ 2\n\ kilogram⋅meter \n\ ───────────────\n\ 2 \n\ second \ """) ascii_str2 = \ """\ 2\n\ 3*x*y*kilogram*meter \n\ ---------------------\n\ 2 \n\ second \ """ unicode_str2 = \ u("""\ 2\n\ 3⋅x⋅y⋅kilogram⋅meter \n\ ─────────────────────\n\ 2 \n\ second \ """) from sympy.physics.units import kg, m, s assert upretty(expr) == u("joule") assert pretty(expr) == "joule" assert upretty(expr.convert_to(kg*m**2/s**2)) == unicode_str1 assert pretty(expr.convert_to(kg*m**2/s**2)) == ascii_str1 assert upretty(3*kg*x*m**2*y/s**2) == unicode_str2 assert pretty(3*kg*x*m**2*y/s**2) == ascii_str2 def test_pretty_Subs(): f = Function('f') expr = Subs(f(x), x, ph**2) ascii_str = \ """\ (f(x))| 2\n\ |x=phi \ """ unicode_str = \ u("""\ (f(x))│ 2\n\ │x=φ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == unicode_str expr = Subs(f(x).diff(x), x, 0) ascii_str = \ """\ /d \\| \n\ |--(f(x))|| \n\ \\dx /|x=0\ """ unicode_str = \ u("""\ ⎛d ⎞│ \n\ ⎜──(f(x))⎟│ \n\ ⎝dx ⎠│x=0\ """) assert pretty(expr) == ascii_str assert upretty(expr) == unicode_str expr = Subs(f(x).diff(x)/y, (x, y), (0, Rational(1, 2))) ascii_str = \ """\ /d \\| \n\ |--(f(x))|| \n\ |dx || \n\ |--------|| \n\ \\ y /|x=0, y=1/2\ """ unicode_str = \ u("""\ ⎛d ⎞│ \n\ ⎜──(f(x))⎟│ \n\ ⎜dx ⎟│ \n\ ⎜────────⎟│ \n\ ⎝ y ⎠│x=0, y=1/2\ """) assert pretty(expr) == ascii_str assert upretty(expr) == unicode_str def test_gammas(): assert upretty(lowergamma(x, y)) == u"γ(x, y)" assert upretty(uppergamma(x, y)) == u"Γ(x, y)" assert xpretty(gamma(x), use_unicode=True) == u'Γ(x)' assert xpretty(gamma, use_unicode=True) == u'Γ' assert xpretty(symbols('gamma', cls=Function)(x), use_unicode=True) == u'γ(x)' assert xpretty(symbols('gamma', cls=Function), use_unicode=True) == u'γ' def test_beta(): assert xpretty(beta(x,y), use_unicode=True) == u'Β(x, y)' assert xpretty(beta(x,y), use_unicode=False) == u'B(x, y)' assert xpretty(beta, use_unicode=True) == u'Β' assert xpretty(beta, use_unicode=False) == u'B' mybeta = Function('beta') assert xpretty(mybeta(x), use_unicode=True) == u'β(x)' assert xpretty(mybeta(x, y, z), use_unicode=False) == u'beta(x, y, z)' assert xpretty(mybeta, use_unicode=True) == u'β' # test that notation passes to subclasses of the same name only def test_function_subclass_different_name(): class mygamma(gamma): pass assert xpretty(mygamma, use_unicode=True) == r"mygamma" assert xpretty(mygamma(x), use_unicode=True) == r"mygamma(x)" def test_SingularityFunction(): assert xpretty(SingularityFunction(x, 0, n), use_unicode=True) == ( """\ n\n\ <x> \ """) assert xpretty(SingularityFunction(x, 1, n), use_unicode=True) == ( """\ n\n\ <x - 1> \ """) assert xpretty(SingularityFunction(x, -1, n), use_unicode=True) == ( """\ n\n\ <x + 1> \ """) assert xpretty(SingularityFunction(x, a, n), use_unicode=True) == ( """\ n\n\ <-a + x> \ """) assert xpretty(SingularityFunction(x, y, n), use_unicode=True) == ( """\ n\n\ <x - y> \ """) assert xpretty(SingularityFunction(x, 0, n), use_unicode=False) == ( """\ n\n\ <x> \ """) assert xpretty(SingularityFunction(x, 1, n), use_unicode=False) == ( """\ n\n\ <x - 1> \ """) assert xpretty(SingularityFunction(x, -1, n), use_unicode=False) == ( """\ n\n\ <x + 1> \ """) assert xpretty(SingularityFunction(x, a, n), use_unicode=False) == ( """\ n\n\ <-a + x> \ """) assert xpretty(SingularityFunction(x, y, n), use_unicode=False) == ( """\ n\n\ <x - y> \ """) def test_deltas(): assert xpretty(DiracDelta(x), use_unicode=True) == u'δ(x)' assert xpretty(DiracDelta(x, 1), use_unicode=True) == \ u("""\ (1) \n\ δ (x)\ """) assert xpretty(x*DiracDelta(x, 1), use_unicode=True) == \ u("""\ (1) \n\ x⋅δ (x)\ """) def test_hyper(): expr = hyper((), (), z) ucode_str = \ u("""\ ┌─ ⎛ │ ⎞\n\ ├─ ⎜ │ z⎟\n\ 0╵ 0 ⎝ │ ⎠\ """) ascii_str = \ """\ _ \n\ |_ / | \\\n\ | | | z|\n\ 0 0 \\ | /\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = hyper((), (1,), x) ucode_str = \ u("""\ ┌─ ⎛ │ ⎞\n\ ├─ ⎜ │ x⎟\n\ 0╵ 1 ⎝1 │ ⎠\ """) ascii_str = \ """\ _ \n\ |_ / | \\\n\ | | | x|\n\ 0 1 \\1 | /\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = hyper([2], [1], x) ucode_str = \ u("""\ ┌─ ⎛2 │ ⎞\n\ ├─ ⎜ │ x⎟\n\ 1╵ 1 ⎝1 │ ⎠\ """) ascii_str = \ """\ _ \n\ |_ /2 | \\\n\ | | | x|\n\ 1 1 \\1 | /\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = hyper((pi/3, -2*k), (3, 4, 5, -3), x) ucode_str = \ u("""\ ⎛ π │ ⎞\n\ ┌─ ⎜ ─, -2⋅k │ ⎟\n\ ├─ ⎜ 3 │ x⎟\n\ 2╵ 4 ⎜ │ ⎟\n\ ⎝3, 4, 5, -3 │ ⎠\ """) ascii_str = \ """\ \n\ _ / pi | \\\n\ |_ | --, -2*k | |\n\ | | 3 | x|\n\ 2 4 | | |\n\ \\3, 4, 5, -3 | /\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = hyper((pi, S('2/3'), -2*k), (3, 4, 5, -3), x**2) ucode_str = \ u("""\ ┌─ ⎛π, 2/3, -2⋅k │ 2⎞\n\ ├─ ⎜ │ x ⎟\n\ 3╵ 4 ⎝3, 4, 5, -3 │ ⎠\ """) ascii_str = \ """\ _ \n\ |_ /pi, 2/3, -2*k | 2\\\n\ | | | x |\n\ 3 4 \\ 3, 4, 5, -3 | /\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = hyper([1, 2], [3, 4], 1/(1/(1/(1/x + 1) + 1) + 1)) ucode_str = \ u("""\ ⎛ │ 1 ⎞\n\ ⎜ │ ─────────────⎟\n\ ⎜ │ 1 ⎟\n\ ┌─ ⎜1, 2 │ 1 + ─────────⎟\n\ ├─ ⎜ │ 1 ⎟\n\ 2╵ 2 ⎜3, 4 │ 1 + ─────⎟\n\ ⎜ │ 1⎟\n\ ⎜ │ 1 + ─⎟\n\ ⎝ │ x⎠\ """) ascii_str = \ """\ \n\ / | 1 \\\n\ | | -------------|\n\ _ | | 1 |\n\ |_ |1, 2 | 1 + ---------|\n\ | | | 1 |\n\ 2 2 |3, 4 | 1 + -----|\n\ | | 1|\n\ | | 1 + -|\n\ \\ | x/\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_meijerg(): expr = meijerg([pi, pi, x], [1], [0, 1], [1, 2, 3], z) ucode_str = \ u("""\ ╭─╮2, 3 ⎛π, π, x 1 │ ⎞\n\ │╶┐ ⎜ │ z⎟\n\ ╰─╯4, 5 ⎝ 0, 1 1, 2, 3 │ ⎠\ """) ascii_str = \ """\ __2, 3 /pi, pi, x 1 | \\\n\ /__ | | z|\n\ \\_|4, 5 \\ 0, 1 1, 2, 3 | /\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = meijerg([1, pi/7], [2, pi, 5], [], [], z**2) ucode_str = \ u("""\ ⎛ π │ ⎞\n\ ╭─╮0, 2 ⎜1, ─ 2, π, 5 │ 2⎟\n\ │╶┐ ⎜ 7 │ z ⎟\n\ ╰─╯5, 0 ⎜ │ ⎟\n\ ⎝ │ ⎠\ """) ascii_str = \ """\ / pi | \\\n\ __0, 2 |1, -- 2, pi, 5 | 2|\n\ /__ | 7 | z |\n\ \\_|5, 0 | | |\n\ \\ | /\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str ucode_str = \ u("""\ ╭─╮ 1, 10 ⎛1, 1, 1, 1, 1, 1, 1, 1, 1, 1 1 │ ⎞\n\ │╶┐ ⎜ │ z⎟\n\ ╰─╯11, 2 ⎝ 1 1 │ ⎠\ """) ascii_str = \ """\ __ 1, 10 /1, 1, 1, 1, 1, 1, 1, 1, 1, 1 1 | \\\n\ /__ | | z|\n\ \\_|11, 2 \\ 1 1 | /\ """ expr = meijerg([1]*10, [1], [1], [1], z) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = meijerg([1, 2, ], [4, 3], [3], [4, 5], 1/(1/(1/(1/x + 1) + 1) + 1)) ucode_str = \ u("""\ ⎛ │ 1 ⎞\n\ ⎜ │ ─────────────⎟\n\ ⎜ │ 1 ⎟\n\ ╭─╮1, 2 ⎜1, 2 4, 3 │ 1 + ─────────⎟\n\ │╶┐ ⎜ │ 1 ⎟\n\ ╰─╯4, 3 ⎜ 3 4, 5 │ 1 + ─────⎟\n\ ⎜ │ 1⎟\n\ ⎜ │ 1 + ─⎟\n\ ⎝ │ x⎠\ """) ascii_str = \ """\ / | 1 \\\n\ | | -------------|\n\ | | 1 |\n\ __1, 2 |1, 2 4, 3 | 1 + ---------|\n\ /__ | | 1 |\n\ \\_|4, 3 | 3 4, 5 | 1 + -----|\n\ | | 1|\n\ | | 1 + -|\n\ \\ | x/\ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = Integral(expr, x) ucode_str = \ u("""\ ⌠ \n\ ⎮ ⎛ │ 1 ⎞ \n\ ⎮ ⎜ │ ─────────────⎟ \n\ ⎮ ⎜ │ 1 ⎟ \n\ ⎮ ╭─╮1, 2 ⎜1, 2 4, 3 │ 1 + ─────────⎟ \n\ ⎮ │╶┐ ⎜ │ 1 ⎟ dx\n\ ⎮ ╰─╯4, 3 ⎜ 3 4, 5 │ 1 + ─────⎟ \n\ ⎮ ⎜ │ 1⎟ \n\ ⎮ ⎜ │ 1 + ─⎟ \n\ ⎮ ⎝ │ x⎠ \n\ ⌡ \ """) ascii_str = \ """\ / \n\ | \n\ | / | 1 \\ \n\ | | | -------------| \n\ | | | 1 | \n\ | __1, 2 |1, 2 4, 3 | 1 + ---------| \n\ | /__ | | 1 | dx\n\ | \\_|4, 3 | 3 4, 5 | 1 + -----| \n\ | | | 1| \n\ | | | 1 + -| \n\ | \\ | x/ \n\ | \n\ / \ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_noncommutative(): A, B, C = symbols('A,B,C', commutative=False) expr = A*B*C**-1 ascii_str = \ """\ -1\n\ A*B*C \ """ ucode_str = \ u("""\ -1\n\ A⋅B⋅C \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = C**-1*A*B ascii_str = \ """\ -1 \n\ C *A*B\ """ ucode_str = \ u("""\ -1 \n\ C ⋅A⋅B\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A*C**-1*B ascii_str = \ """\ -1 \n\ A*C *B\ """ ucode_str = \ u("""\ -1 \n\ A⋅C ⋅B\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A*C**-1*B/x ascii_str = \ """\ -1 \n\ A*C *B\n\ -------\n\ x \ """ ucode_str = \ u("""\ -1 \n\ A⋅C ⋅B\n\ ───────\n\ x \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_special_functions(): x, y = symbols("x y") # atan2 expr = atan2(y/sqrt(200), sqrt(x)) ascii_str = \ """\ / ___ \\\n\ |\\/ 2 *y ___|\n\ atan2|-------, \\/ x |\n\ \\ 20 /\ """ ucode_str = \ u("""\ ⎛√2⋅y ⎞\n\ atan2⎜────, √x⎟\n\ ⎝ 20 ⎠\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_geometry(): e = Segment((0, 1), (0, 2)) assert pretty(e) == 'Segment2D(Point2D(0, 1), Point2D(0, 2))' e = Ray((1, 1), angle=4.02*pi) assert pretty(e) == 'Ray2D(Point2D(1, 1), Point2D(2, tan(pi/50) + 1))' def test_expint(): expr = Ei(x) string = 'Ei(x)' assert pretty(expr) == string assert upretty(expr) == string expr = expint(1, z) ucode_str = u"E₁(z)" ascii_str = "expint(1, z)" assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str assert pretty(Shi(x)) == 'Shi(x)' assert pretty(Si(x)) == 'Si(x)' assert pretty(Ci(x)) == 'Ci(x)' assert pretty(Chi(x)) == 'Chi(x)' assert upretty(Shi(x)) == 'Shi(x)' assert upretty(Si(x)) == 'Si(x)' assert upretty(Ci(x)) == 'Ci(x)' assert upretty(Chi(x)) == 'Chi(x)' def test_elliptic_functions(): ascii_str = \ """\ / 1 \\\n\ K|-----|\n\ \\z + 1/\ """ ucode_str = \ u("""\ ⎛ 1 ⎞\n\ K⎜─────⎟\n\ ⎝z + 1⎠\ """) expr = elliptic_k(1/(z + 1)) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str ascii_str = \ """\ / | 1 \\\n\ F|1|-----|\n\ \\ |z + 1/\ """ ucode_str = \ u("""\ ⎛ │ 1 ⎞\n\ F⎜1│─────⎟\n\ ⎝ │z + 1⎠\ """) expr = elliptic_f(1, 1/(1 + z)) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str ascii_str = \ """\ / 1 \\\n\ E|-----|\n\ \\z + 1/\ """ ucode_str = \ u("""\ ⎛ 1 ⎞\n\ E⎜─────⎟\n\ ⎝z + 1⎠\ """) expr = elliptic_e(1/(z + 1)) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str ascii_str = \ """\ / | 1 \\\n\ E|1|-----|\n\ \\ |z + 1/\ """ ucode_str = \ u("""\ ⎛ │ 1 ⎞\n\ E⎜1│─────⎟\n\ ⎝ │z + 1⎠\ """) expr = elliptic_e(1, 1/(1 + z)) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str ascii_str = \ """\ / |4\\\n\ Pi|3|-|\n\ \\ |x/\ """ ucode_str = \ u("""\ ⎛ │4⎞\n\ Π⎜3│─⎟\n\ ⎝ │x⎠\ """) expr = elliptic_pi(3, 4/x) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str ascii_str = \ """\ / 4| \\\n\ Pi|3; -|6|\n\ \\ x| /\ """ ucode_str = \ u("""\ ⎛ 4│ ⎞\n\ Π⎜3; ─│6⎟\n\ ⎝ x│ ⎠\ """) expr = elliptic_pi(3, 4/x, 6) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_RandomDomain(): from sympy.stats import Normal, Die, Exponential, pspace, where X = Normal('x1', 0, 1) assert upretty(where(X > 0)) == u"Domain: 0 < x₁ ∧ x₁ < ∞" D = Die('d1', 6) assert upretty(where(D > 4)) == u'Domain: d₁ = 5 ∨ d₁ = 6' A = Exponential('a', 1) B = Exponential('b', 1) assert upretty(pspace(Tuple(A, B)).domain) == \ u'Domain: 0 ≤ a ∧ 0 ≤ b ∧ a < ∞ ∧ b < ∞' def test_PrettyPoly(): F = QQ.frac_field(x, y) R = QQ.poly_ring(x, y) expr = F.convert(x/(x + y)) assert pretty(expr) == "x/(x + y)" assert upretty(expr) == u"x/(x + y)" expr = R.convert(x + y) assert pretty(expr) == "x + y" assert upretty(expr) == u"x + y" def test_issue_6285(): assert pretty(Pow(2, -5, evaluate=False)) == '1 \n--\n 5\n2 ' assert pretty(Pow(x, (1/pi))) == 'pi___\n\\/ x ' def test_issue_6359(): assert pretty(Integral(x**2, x)**2) == \ """\ 2 / / \\ \n\ | | | \n\ | | 2 | \n\ | | x dx| \n\ | | | \n\ \\/ / \ """ assert upretty(Integral(x**2, x)**2) == \ u("""\ 2 ⎛⌠ ⎞ \n\ ⎜⎮ 2 ⎟ \n\ ⎜⎮ x dx⎟ \n\ ⎝⌡ ⎠ \ """) assert pretty(Sum(x**2, (x, 0, 1))**2) == \ """\ 2 / 1 \\ \n\ | ___ | \n\ | \\ ` | \n\ | \\ 2| \n\ | / x | \n\ | /__, | \n\ \\x = 0 / \ """ assert upretty(Sum(x**2, (x, 0, 1))**2) == \ u("""\ 2 ⎛ 1 ⎞ \n\ ⎜ ___ ⎟ \n\ ⎜ ╲ ⎟ \n\ ⎜ ╲ 2⎟ \n\ ⎜ ╱ x ⎟ \n\ ⎜ ╱ ⎟ \n\ ⎜ ‾‾‾ ⎟ \n\ ⎝x = 0 ⎠ \ """) assert pretty(Product(x**2, (x, 1, 2))**2) == \ """\ 2 / 2 \\ \n\ |______ | \n\ | | | 2| \n\ | | | x | \n\ | | | | \n\ \\x = 1 / \ """ assert upretty(Product(x**2, (x, 1, 2))**2) == \ u("""\ 2 ⎛ 2 ⎞ \n\ ⎜─┬──┬─ ⎟ \n\ ⎜ │ │ 2⎟ \n\ ⎜ │ │ x ⎟ \n\ ⎜ │ │ ⎟ \n\ ⎝x = 1 ⎠ \ """) f = Function('f') assert pretty(Derivative(f(x), x)**2) == \ """\ 2 /d \\ \n\ |--(f(x))| \n\ \\dx / \ """ assert upretty(Derivative(f(x), x)**2) == \ u("""\ 2 ⎛d ⎞ \n\ ⎜──(f(x))⎟ \n\ ⎝dx ⎠ \ """) def test_issue_6739(): ascii_str = \ """\ 1 \n\ -----\n\ ___\n\ \\/ x \ """ ucode_str = \ u("""\ 1 \n\ ──\n\ √x\ """) assert pretty(1/sqrt(x)) == ascii_str assert upretty(1/sqrt(x)) == ucode_str def test_complicated_symbol_unchanged(): for symb_name in ["dexpr2_d1tau", "dexpr2^d1tau"]: assert pretty(Symbol(symb_name)) == symb_name def test_categories(): from sympy.categories import (Object, IdentityMorphism, NamedMorphism, Category, Diagram, DiagramGrid) A1 = Object("A1") A2 = Object("A2") A3 = Object("A3") f1 = NamedMorphism(A1, A2, "f1") f2 = NamedMorphism(A2, A3, "f2") id_A1 = IdentityMorphism(A1) K1 = Category("K1") assert pretty(A1) == "A1" assert upretty(A1) == u"A₁" assert pretty(f1) == "f1:A1-->A2" assert upretty(f1) == u"f₁:A₁——▶A₂" assert pretty(id_A1) == "id:A1-->A1" assert upretty(id_A1) == u"id:A₁——▶A₁" assert pretty(f2*f1) == "f2*f1:A1-->A3" assert upretty(f2*f1) == u"f₂∘f₁:A₁——▶A₃" assert pretty(K1) == "K1" assert upretty(K1) == u"K₁" # Test how diagrams are printed. d = Diagram() assert pretty(d) == "EmptySet" assert upretty(d) == u"∅" d = Diagram({f1: "unique", f2: S.EmptySet}) assert pretty(d) == "{f2*f1:A1-->A3: EmptySet, id:A1-->A1: " \ "EmptySet, id:A2-->A2: EmptySet, id:A3-->A3: " \ "EmptySet, f1:A1-->A2: {unique}, f2:A2-->A3: EmptySet}" assert upretty(d) == u("{f₂∘f₁:A₁——▶A₃: ∅, id:A₁——▶A₁: ∅, " \ "id:A₂——▶A₂: ∅, id:A₃——▶A₃: ∅, f₁:A₁——▶A₂: {unique}, f₂:A₂——▶A₃: ∅}") d = Diagram({f1: "unique", f2: S.EmptySet}, {f2 * f1: "unique"}) assert pretty(d) == "{f2*f1:A1-->A3: EmptySet, id:A1-->A1: " \ "EmptySet, id:A2-->A2: EmptySet, id:A3-->A3: " \ "EmptySet, f1:A1-->A2: {unique}, f2:A2-->A3: EmptySet}" \ " ==> {f2*f1:A1-->A3: {unique}}" assert upretty(d) == u("{f₂∘f₁:A₁——▶A₃: ∅, id:A₁——▶A₁: ∅, id:A₂——▶A₂: " \ "∅, id:A₃——▶A₃: ∅, f₁:A₁——▶A₂: {unique}, f₂:A₂——▶A₃: ∅}" \ " ══▶ {f₂∘f₁:A₁——▶A₃: {unique}}") grid = DiagramGrid(d) assert pretty(grid) == "A1 A2\n \nA3 " assert upretty(grid) == u"A₁ A₂\n \nA₃ " def test_PrettyModules(): R = QQ.old_poly_ring(x, y) F = R.free_module(2) M = F.submodule([x, y], [1, x**2]) ucode_str = \ u("""\ 2\n\ ℚ[x, y] \ """) ascii_str = \ """\ 2\n\ QQ[x, y] \ """ assert upretty(F) == ucode_str assert pretty(F) == ascii_str ucode_str = \ u("""\ ╱ ⎡ 2⎤╲\n\ ╲[x, y], ⎣1, x ⎦╱\ """) ascii_str = \ """\ 2 \n\ <[x, y], [1, x ]>\ """ assert upretty(M) == ucode_str assert pretty(M) == ascii_str I = R.ideal(x**2, y) ucode_str = \ u("""\ ╱ 2 ╲\n\ ╲x , y╱\ """) ascii_str = \ """\ 2 \n\ <x , y>\ """ assert upretty(I) == ucode_str assert pretty(I) == ascii_str Q = F / M ucode_str = \ u("""\ 2 \n\ ℚ[x, y] \n\ ─────────────────\n\ ╱ ⎡ 2⎤╲\n\ ╲[x, y], ⎣1, x ⎦╱\ """) ascii_str = \ """\ 2 \n\ QQ[x, y] \n\ -----------------\n\ 2 \n\ <[x, y], [1, x ]>\ """ assert upretty(Q) == ucode_str assert pretty(Q) == ascii_str ucode_str = \ u("""\ ╱⎡ 3⎤ ╲\n\ │⎢ x ⎥ ╱ ⎡ 2⎤╲ ╱ ⎡ 2⎤╲│\n\ │⎢1, ──⎥ + ╲[x, y], ⎣1, x ⎦╱, [2, y] + ╲[x, y], ⎣1, x ⎦╱│\n\ ╲⎣ 2 ⎦ ╱\ """) ascii_str = \ """\ 3 \n\ x 2 2 \n\ <[1, --] + <[x, y], [1, x ]>, [2, y] + <[x, y], [1, x ]>>\n\ 2 \ """ def test_QuotientRing(): R = QQ.old_poly_ring(x)/[x**2 + 1] ucode_str = \ u("""\ ℚ[x] \n\ ────────\n\ ╱ 2 ╲\n\ ╲x + 1╱\ """) ascii_str = \ """\ QQ[x] \n\ --------\n\ 2 \n\ <x + 1>\ """ assert upretty(R) == ucode_str assert pretty(R) == ascii_str ucode_str = \ u("""\ ╱ 2 ╲\n\ 1 + ╲x + 1╱\ """) ascii_str = \ """\ 2 \n\ 1 + <x + 1>\ """ assert upretty(R.one) == ucode_str assert pretty(R.one) == ascii_str def test_Homomorphism(): from sympy.polys.agca import homomorphism R = QQ.old_poly_ring(x) expr = homomorphism(R.free_module(1), R.free_module(1), [0]) ucode_str = \ u("""\ 1 1\n\ [0] : ℚ[x] ──> ℚ[x] \ """) ascii_str = \ """\ 1 1\n\ [0] : QQ[x] --> QQ[x] \ """ assert upretty(expr) == ucode_str assert pretty(expr) == ascii_str expr = homomorphism(R.free_module(2), R.free_module(2), [0, 0]) ucode_str = \ u("""\ ⎡0 0⎤ 2 2\n\ ⎢ ⎥ : ℚ[x] ──> ℚ[x] \n\ ⎣0 0⎦ \ """) ascii_str = \ """\ [0 0] 2 2\n\ [ ] : QQ[x] --> QQ[x] \n\ [0 0] \ """ assert upretty(expr) == ucode_str assert pretty(expr) == ascii_str expr = homomorphism(R.free_module(1), R.free_module(1) / [[x]], [0]) ucode_str = \ u("""\ 1\n\ 1 ℚ[x] \n\ [0] : ℚ[x] ──> ─────\n\ <[x]>\ """) ascii_str = \ """\ 1\n\ 1 QQ[x] \n\ [0] : QQ[x] --> ------\n\ <[x]> \ """ assert upretty(expr) == ucode_str assert pretty(expr) == ascii_str def test_Tr(): A, B = symbols('A B', commutative=False) t = Tr(A*B) assert pretty(t) == r'Tr(A*B)' assert upretty(t) == u'Tr(A⋅B)' def test_pretty_Add(): eq = Mul(-2, x - 2, evaluate=False) + 5 assert pretty(eq) == '5 - 2*(x - 2)' def test_issue_7179(): assert upretty(Not(Equivalent(x, y))) == u'x ⇎ y' assert upretty(Not(Implies(x, y))) == u'x ↛ y' def test_issue_7180(): assert upretty(Equivalent(x, y)) == u'x ⇔ y' def test_pretty_Complement(): assert pretty(S.Reals - S.Naturals) == '(-oo, oo) \\ Naturals' assert upretty(S.Reals - S.Naturals) == u'ℝ \\ ℕ' assert pretty(S.Reals - S.Naturals0) == '(-oo, oo) \\ Naturals0' assert upretty(S.Reals - S.Naturals0) == u'ℝ \\ ℕ₀' def test_pretty_SymmetricDifference(): from sympy import SymmetricDifference, Interval from sympy.utilities.pytest import raises assert upretty(SymmetricDifference(Interval(2,3), Interval(3,5), \ evaluate = False)) == u'[2, 3] ∆ [3, 5]' with raises(NotImplementedError): pretty(SymmetricDifference(Interval(2,3), Interval(3,5), evaluate = False)) def test_pretty_Contains(): assert pretty(Contains(x, S.Integers)) == 'Contains(x, Integers)' assert upretty(Contains(x, S.Integers)) == u'x ∈ ℤ' def test_issue_8292(): from sympy.core import sympify e = sympify('((x+x**4)/(x-1))-(2*(x-1)**4/(x-1)**4)', evaluate=False) ucode_str = \ u("""\ 4 4 \n\ 2⋅(x - 1) x + x\n\ - ────────── + ──────\n\ 4 x - 1 \n\ (x - 1) \ """) ascii_str = \ """\ 4 4 \n\ 2*(x - 1) x + x\n\ - ---------- + ------\n\ 4 x - 1 \n\ (x - 1) \ """ assert pretty(e) == ascii_str assert upretty(e) == ucode_str def test_issue_4335(): y = Function('y') expr = -y(x).diff(x) ucode_str = \ u("""\ d \n\ -──(y(x))\n\ dx \ """) ascii_str = \ """\ d \n\ - --(y(x))\n\ dx \ """ assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_issue_8344(): from sympy.core import sympify e = sympify('2*x*y**2/1**2 + 1', evaluate=False) ucode_str = \ u("""\ 2 \n\ 2⋅x⋅y \n\ ────── + 1\n\ 2 \n\ 1 \ """) assert upretty(e) == ucode_str def test_issue_6324(): x = Pow(2, 3, evaluate=False) y = Pow(10, -2, evaluate=False) e = Mul(x, y, evaluate=False) ucode_str = \ u("""\ 3\n\ 2 \n\ ───\n\ 2\n\ 10 \ """) assert upretty(e) == ucode_str def test_issue_7927(): e = sin(x/2)**cos(x/2) ucode_str = \ u("""\ ⎛x⎞\n\ cos⎜─⎟\n\ ⎝2⎠\n\ ⎛ ⎛x⎞⎞ \n\ ⎜sin⎜─⎟⎟ \n\ ⎝ ⎝2⎠⎠ \ """) assert upretty(e) == ucode_str e = sin(x)**(S(11)/13) ucode_str = \ u("""\ 11\n\ ──\n\ 13\n\ (sin(x)) \ """) assert upretty(e) == ucode_str def test_issue_6134(): from sympy.abc import lamda, t phi = Function('phi') e = lamda*x*Integral(phi(t)*pi*sin(pi*t), (t, 0, 1)) + lamda*x**2*Integral(phi(t)*2*pi*sin(2*pi*t), (t, 0, 1)) ucode_str = \ u("""\ 1 1 \n\ 2 ⌠ ⌠ \n\ λ⋅x ⋅⎮ 2⋅π⋅φ(t)⋅sin(2⋅π⋅t) dt + λ⋅x⋅⎮ π⋅φ(t)⋅sin(π⋅t) dt\n\ ⌡ ⌡ \n\ 0 0 \ """) assert upretty(e) == ucode_str def test_issue_9877(): ucode_str1 = u'(2, 3) ∪ ([1, 2] \\ {x})' a, b, c = Interval(2, 3, True, True), Interval(1, 2), FiniteSet(x) assert upretty(Union(a, Complement(b, c))) == ucode_str1 ucode_str2 = u'{x} ∩ {y} ∩ ({z} \\ [1, 2])' d, e, f, g = FiniteSet(x), FiniteSet(y), FiniteSet(z), Interval(1, 2) assert upretty(Intersection(d, e, Complement(f, g))) == ucode_str2 def test_issue_13651(): expr1 = c + Mul(-1, a + b, evaluate=False) assert pretty(expr1) == 'c - (a + b)' expr2 = c + Mul(-1, a - b + d, evaluate=False) assert pretty(expr2) == 'c - (a - b + d)' def test_pretty_primenu(): from sympy.ntheory.factor_ import primenu ascii_str1 = "nu(n)" ucode_str1 = u("ν(n)") n = symbols('n', integer=True) assert pretty(primenu(n)) == ascii_str1 assert upretty(primenu(n)) == ucode_str1 def test_pretty_primeomega(): from sympy.ntheory.factor_ import primeomega ascii_str1 = "Omega(n)" ucode_str1 = u("Ω(n)") n = symbols('n', integer=True) assert pretty(primeomega(n)) == ascii_str1 assert upretty(primeomega(n)) == ucode_str1 def test_pretty_Mod(): from sympy.core import Mod ascii_str1 = "x mod 7" ucode_str1 = u("x mod 7") ascii_str2 = "(x + 1) mod 7" ucode_str2 = u("(x + 1) mod 7") ascii_str3 = "2*x mod 7" ucode_str3 = u("2⋅x mod 7") ascii_str4 = "(x mod 7) + 1" ucode_str4 = u("(x mod 7) + 1") ascii_str5 = "2*(x mod 7)" ucode_str5 = u("2⋅(x mod 7)") x = symbols('x', integer=True) assert pretty(Mod(x, 7)) == ascii_str1 assert upretty(Mod(x, 7)) == ucode_str1 assert pretty(Mod(x + 1, 7)) == ascii_str2 assert upretty(Mod(x + 1, 7)) == ucode_str2 assert pretty(Mod(2 * x, 7)) == ascii_str3 assert upretty(Mod(2 * x, 7)) == ucode_str3 assert pretty(Mod(x, 7) + 1) == ascii_str4 assert upretty(Mod(x, 7) + 1) == ucode_str4 assert pretty(2 * Mod(x, 7)) == ascii_str5 assert upretty(2 * Mod(x, 7)) == ucode_str5 def test_issue_11801(): assert pretty(Symbol("")) == "" assert upretty(Symbol("")) == "" def test_pretty_UnevaluatedExpr(): x = symbols('x') he = UnevaluatedExpr(1/x) ucode_str = \ u("""\ 1\n\ ─\n\ x\ """) assert upretty(he) == ucode_str ucode_str = \ u("""\ 2\n\ ⎛1⎞ \n\ ⎜─⎟ \n\ ⎝x⎠ \ """) assert upretty(he**2) == ucode_str ucode_str = \ u("""\ 1\n\ 1 + ─\n\ x\ """) assert upretty(he + 1) == ucode_str ucode_str = \ u('''\ 1\n\ x⋅─\n\ x\ ''') assert upretty(x*he) == ucode_str def test_issue_10472(): M = (Matrix([[0, 0], [0, 0]]), Matrix([0, 0])) ucode_str = \ u("""\ ⎛⎡0 0⎤ ⎡0⎤⎞ ⎜⎢ ⎥, ⎢ ⎥⎟ ⎝⎣0 0⎦ ⎣0⎦⎠\ """) assert upretty(M) == ucode_str def test_MatrixElement_printing(): # test cases for issue #11821 A = MatrixSymbol("A", 1, 3) B = MatrixSymbol("B", 1, 3) C = MatrixSymbol("C", 1, 3) ascii_str1 = "A_00" ucode_str1 = u("A₀₀") assert pretty(A[0, 0]) == ascii_str1 assert upretty(A[0, 0]) == ucode_str1 ascii_str1 = "3*A_00" ucode_str1 = u("3⋅A₀₀") assert pretty(3*A[0, 0]) == ascii_str1 assert upretty(3*A[0, 0]) == ucode_str1 ascii_str1 = "(-B + A)[0, 0]" ucode_str1 = u("(-B + A)[0, 0]") F = C[0, 0].subs(C, A - B) assert pretty(F) == ascii_str1 assert upretty(F) == ucode_str1 def test_issue_12675(): from sympy.vector import CoordSys3D x, y, t, j = symbols('x y t j') e = CoordSys3D('e') ucode_str = \ u("""\ ⎛ t⎞ \n\ ⎜⎛x⎞ ⎟ j_e\n\ ⎜⎜─⎟ ⎟ \n\ ⎝⎝y⎠ ⎠ \ """) assert upretty((x/y)**t*e.j) == ucode_str ucode_str = \ u("""\ ⎛1⎞ \n\ ⎜─⎟ j_e\n\ ⎝y⎠ \ """) assert upretty((1/y)*e.j) == ucode_str def test_MatrixSymbol_printing(): # test cases for issue #14237 A = MatrixSymbol("A", 3, 3) B = MatrixSymbol("B", 3, 3) C = MatrixSymbol("C", 3, 3) assert pretty(-A*B*C) == "-A*B*C" assert pretty(A - B) == "-B + A" assert pretty(A*B*C - A*B - B*C) == "-A*B -B*C + A*B*C" # issue #14814 x = MatrixSymbol('x', n, n) y = MatrixSymbol('y*', n, n) assert pretty(x + y) == "x + y*" ascii_str = \ """\ 2 \n\ -2*y* -a*x\ """ assert pretty(-a*x + -2*y*y) == ascii_str def test_degree_printing(): expr1 = 90*degree assert pretty(expr1) == u'90°' expr2 = x*degree assert pretty(expr2) == u'x°' expr3 = cos(x*degree + 90*degree) assert pretty(expr3) == u'cos(x° + 90°)' def test_vector_expr_pretty_printing(): A = CoordSys3D('A') assert upretty(Cross(A.i, A.x*A.i+3*A.y*A.j)) == u("(i_A)×((x_A) i_A + (3⋅y_A) j_A)") assert upretty(x*Cross(A.i, A.j)) == u('x⋅(i_A)×(j_A)') assert upretty(Curl(A.x*A.i + 3*A.y*A.j)) == u("∇×((x_A) i_A + (3⋅y_A) j_A)") assert upretty(Divergence(A.x*A.i + 3*A.y*A.j)) == u("∇⋅((x_A) i_A + (3⋅y_A) j_A)") assert upretty(Dot(A.i, A.x*A.i+3*A.y*A.j)) == u("(i_A)⋅((x_A) i_A + (3⋅y_A) j_A)") assert upretty(Gradient(A.x+3*A.y)) == u("∇(x_A + 3⋅y_A)") assert upretty(Laplacian(A.x+3*A.y)) == u("∆(x_A + 3⋅y_A)") # TODO: add support for ASCII pretty. def test_pretty_print_tensor_expr(): L = TensorIndexType("L") i, j, k = tensor_indices("i j k", L) i0 = tensor_indices("i_0", L) A, B, C, D = tensor_heads("A B C D", [L]) H = TensorHead("H", [L, L]) expr = -i ascii_str = \ """\ -i\ """ ucode_str = \ u("""\ -i\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A(i) ascii_str = \ """\ i\n\ A \n\ \ """ ucode_str = \ u("""\ i\n\ A \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A(i0) ascii_str = \ """\ i_0\n\ A \n\ \ """ ucode_str = \ u("""\ i₀\n\ A \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A(-i) ascii_str = \ """\ \n\ A \n\ i\ """ ucode_str = \ u("""\ \n\ A \n\ i\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = -3*A(-i) ascii_str = \ """\ \n\ -3*A \n\ i\ """ ucode_str = \ u("""\ \n\ -3⋅A \n\ i\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = H(i, -j) ascii_str = \ """\ i \n\ H \n\ j\ """ ucode_str = \ u("""\ i \n\ H \n\ j\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = H(i, -i) ascii_str = \ """\ L_0 \n\ H \n\ L_0\ """ ucode_str = \ u("""\ L₀ \n\ H \n\ L₀\ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = H(i, -j)*A(j)*B(k) ascii_str = \ """\ i L_0 k\n\ H *A *B \n\ L_0 \ """ ucode_str = \ u("""\ i L₀ k\n\ H ⋅A ⋅B \n\ L₀ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (1+x)*A(i) ascii_str = \ """\ i\n\ (x + 1)*A \n\ \ """ ucode_str = \ u("""\ i\n\ (x + 1)⋅A \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A(i) + 3*B(i) ascii_str = \ """\ i i\n\ A + 3*B \n\ \ """ ucode_str = \ u("""\ i i\n\ A + 3⋅B \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_pretty_print_tensor_partial_deriv(): from sympy.tensor.toperators import PartialDerivative from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, tensor_heads L = TensorIndexType("L") i, j, k = tensor_indices("i j k", L) A, B, C, D = tensor_heads("A B C D", [L]) H = TensorHead("H", [L, L]) expr = PartialDerivative(A(i), A(j)) ascii_str = \ """\ d / i\\\n\ ---|A |\n\ j\\ /\n\ dA \n\ \ """ ucode_str = \ u("""\ ∂ ⎛ i⎞\n\ ───⎜A ⎟\n\ j⎝ ⎠\n\ ∂A \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A(i)*PartialDerivative(H(k, -i), A(j)) ascii_str = \ """\ L_0 d / k \\\n\ A *---|H |\n\ j\\ L_0/\n\ dA \n\ \ """ ucode_str = \ u("""\ L₀ ∂ ⎛ k ⎞\n\ A ⋅───⎜H ⎟\n\ j⎝ L₀⎠\n\ ∂A \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = A(i)*PartialDerivative(B(k)*C(-i) + 3*H(k, -i), A(j)) ascii_str = \ """\ L_0 d / k k \\\n\ A *---|B *C + 3*H |\n\ j\\ L_0 L_0/\n\ dA \n\ \ """ ucode_str = \ u("""\ L₀ ∂ ⎛ k k ⎞\n\ A ⋅───⎜B ⋅C + 3⋅H ⎟\n\ j⎝ L₀ L₀⎠\n\ ∂A \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (A(i) + B(i))*PartialDerivative(C(j), D(j)) ascii_str = \ """\ / i i\\ d / L_0\\\n\ |A + B |*-----|C |\n\ \\ / L_0\\ /\n\ dD \n\ \ """ ucode_str = \ u("""\ ⎛ i i⎞ ∂ ⎛ L₀⎞\n\ ⎜A + B ⎟⋅────⎜C ⎟\n\ ⎝ ⎠ L₀⎝ ⎠\n\ ∂D \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = (A(i) + B(i))*PartialDerivative(C(-i), D(j)) ascii_str = \ """\ / L_0 L_0\\ d / \\\n\ |A + B |*---|C |\n\ \\ / j\\ L_0/\n\ dD \n\ \ """ ucode_str = \ u("""\ ⎛ L₀ L₀⎞ ∂ ⎛ ⎞\n\ ⎜A + B ⎟⋅───⎜C ⎟\n\ ⎝ ⎠ j⎝ L₀⎠\n\ ∂D \n\ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = PartialDerivative(B(-i) + A(-i), A(-j), A(-n)) ucode_str = u("""\ 2 \n\ ∂ ⎛ ⎞\n\ ───────⎜A + B ⎟\n\ ⎝ i i⎠\n\ ∂A ∂A \n\ n j \ """) assert upretty(expr) == ucode_str expr = PartialDerivative(3*A(-i), A(-j), A(-n)) ucode_str = u("""\ 2 \n\ ∂ ⎛ ⎞\n\ ───────⎜3⋅A ⎟\n\ ⎝ i⎠\n\ ∂A ∂A \n\ n j \ """) assert upretty(expr) == ucode_str expr = TensorElement(H(i, j), {i:1}) ascii_str = \ """\ i=1,j\n\ H \n\ \ """ ucode_str = ascii_str assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = TensorElement(H(i, j), {i: 1, j: 1}) ascii_str = \ """\ i=1,j=1\n\ H \n\ \ """ ucode_str = ascii_str assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = TensorElement(H(i, j), {j: 1}) ascii_str = \ """\ i,j=1\n\ H \n\ \ """ ucode_str = ascii_str expr = TensorElement(H(-i, j), {-i: 1}) ascii_str = \ """\ j\n\ H \n\ i=1 \ """ ucode_str = ascii_str assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_issue_15560(): a = MatrixSymbol('a', 1, 1) e = pretty(a*(KroneckerProduct(a, a))) result = 'a*(a x a)' assert e == result def test_print_lerchphi(): # Part of issue 6013 a = Symbol('a') pretty(lerchphi(a, 1, 2)) uresult = u'Φ(a, 1, 2)' aresult = 'lerchphi(a, 1, 2)' assert pretty(lerchphi(a, 1, 2)) == aresult assert upretty(lerchphi(a, 1, 2)) == uresult def test_issue_15583(): N = mechanics.ReferenceFrame('N') result = '(n_x, n_y, n_z)' e = pretty((N.x, N.y, N.z)) assert e == result def test_matrixSymbolBold(): # Issue 15871 def boldpretty(expr): return xpretty(expr, use_unicode=True, wrap_line=False, mat_symbol_style="bold") from sympy import trace A = MatrixSymbol("A", 2, 2) assert boldpretty(trace(A)) == u'tr(𝐀)' A = MatrixSymbol("A", 3, 3) B = MatrixSymbol("B", 3, 3) C = MatrixSymbol("C", 3, 3) assert boldpretty(-A) == u'-𝐀' assert boldpretty(A - A*B - B) == u'-𝐁 -𝐀⋅𝐁 + 𝐀' assert boldpretty(-A*B - A*B*C - B) == u'-𝐁 -𝐀⋅𝐁 -𝐀⋅𝐁⋅𝐂' A = MatrixSymbol("Addot", 3, 3) assert boldpretty(A) == u'𝐀̈' omega = MatrixSymbol("omega", 3, 3) assert boldpretty(omega) == u'ω' omega = MatrixSymbol("omeganorm", 3, 3) assert boldpretty(omega) == u'‖ω‖' a = Symbol('alpha') b = Symbol('b') c = MatrixSymbol("c", 3, 1) d = MatrixSymbol("d", 3, 1) assert boldpretty(a*B*c+b*d) == u'b⋅𝐝 + α⋅𝐁⋅𝐜' d = MatrixSymbol("delta", 3, 1) B = MatrixSymbol("Beta", 3, 3) assert boldpretty(a*B*c+b*d) == u'b⋅δ + α⋅Β⋅𝐜' A = MatrixSymbol("A_2", 3, 3) assert boldpretty(A) == u'𝐀₂' def test_center_accent(): assert center_accent('a', u'\N{COMBINING TILDE}') == u'ã' assert center_accent('aa', u'\N{COMBINING TILDE}') == u'aã' assert center_accent('aaa', u'\N{COMBINING TILDE}') == u'aãa' assert center_accent('aaaa', u'\N{COMBINING TILDE}') == u'aaãa' assert center_accent('aaaaa', u'\N{COMBINING TILDE}') == u'aaãaa' assert center_accent('abcdefg', u'\N{COMBINING FOUR DOTS ABOVE}') == u'abcd⃜efg' def test_imaginary_unit(): from sympy import pretty # As it is redefined above assert pretty(1 + I, use_unicode=False) == '1 + I' assert pretty(1 + I, use_unicode=True) == u'1 + ⅈ' assert pretty(1 + I, use_unicode=False, imaginary_unit='j') == '1 + I' assert pretty(1 + I, use_unicode=True, imaginary_unit='j') == u'1 + ⅉ' raises(TypeError, lambda: pretty(I, imaginary_unit=I)) raises(ValueError, lambda: pretty(I, imaginary_unit="kkk")) def test_str_special_matrices(): from sympy.matrices import Identity, ZeroMatrix, OneMatrix assert pretty(Identity(4)) == 'I' assert upretty(Identity(4)) == u'𝕀' assert pretty(ZeroMatrix(2, 2)) == '0' assert upretty(ZeroMatrix(2, 2)) == u'𝟘' assert pretty(OneMatrix(2, 2)) == '1' assert upretty(OneMatrix(2, 2)) == u'𝟙' def test_pretty_misc_functions(): assert pretty(LambertW(x)) == 'W(x)' assert upretty(LambertW(x)) == u'W(x)' assert pretty(LambertW(x, y)) == 'W(x, y)' assert upretty(LambertW(x, y)) == u'W(x, y)' assert pretty(airyai(x)) == 'Ai(x)' assert upretty(airyai(x)) == u'Ai(x)' assert pretty(airybi(x)) == 'Bi(x)' assert upretty(airybi(x)) == u'Bi(x)' assert pretty(airyaiprime(x)) == "Ai'(x)" assert upretty(airyaiprime(x)) == u"Ai'(x)" assert pretty(airybiprime(x)) == "Bi'(x)" assert upretty(airybiprime(x)) == u"Bi'(x)" assert pretty(fresnelc(x)) == 'C(x)' assert upretty(fresnelc(x)) == u'C(x)' assert pretty(fresnels(x)) == 'S(x)' assert upretty(fresnels(x)) == u'S(x)' assert pretty(Heaviside(x)) == 'Heaviside(x)' assert upretty(Heaviside(x)) == u'θ(x)' assert pretty(Heaviside(x, y)) == 'Heaviside(x, y)' assert upretty(Heaviside(x, y)) == u'θ(x, y)' assert pretty(dirichlet_eta(x)) == 'dirichlet_eta(x)' assert upretty(dirichlet_eta(x)) == u'η(x)' def test_hadamard_power(): m, n, p = symbols('m, n, p', integer=True) A = MatrixSymbol('A', m, n) B = MatrixSymbol('B', m, n) # Testing printer: expr = hadamard_power(A, n) ascii_str = \ """\ .n\n\ A \ """ ucode_str = \ u("""\ ∘n\n\ A \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = hadamard_power(A, 1+n) ascii_str = \ """\ .(n + 1)\n\ A \ """ ucode_str = \ u("""\ ∘(n + 1)\n\ A \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str expr = hadamard_power(A*B.T, 1+n) ascii_str = \ """\ .(n + 1)\n\ / T\\ \n\ \\A*B / \ """ ucode_str = \ u("""\ ∘(n + 1)\n\ ⎛ T⎞ \n\ ⎝A⋅B ⎠ \ """) assert pretty(expr) == ascii_str assert upretty(expr) == ucode_str def test_issue_17258(): n = Symbol('n', integer=True) assert pretty(Sum(n, (n, -oo, 1))) == \ ' 1 \n'\ ' __ \n'\ ' \\ ` \n'\ ' ) n\n'\ ' /_, \n'\ 'n = -oo ' assert upretty(Sum(n, (n, -oo, 1))) == \ u("""\ 1 \n\ ___ \n\ ╲ \n\ ╲ \n\ ╱ n\n\ ╱ \n\ ‾‾‾ \n\ n = -∞ \ """) def test_is_combining(): line = u("v̇_m") assert [is_combining(sym) for sym in line] == \ [False, True, False, False] def test_issue_17857(): assert pretty(Range(-oo, oo)) == '{..., -1, 0, 1, ...}' assert pretty(Range(oo, -oo, -1)) == '{..., 1, 0, -1, ...}'
2f3c96f97fc7ed84c0b2a29ce1dea733a35207ac4a17086750b60ba473d4f54a
from sympy import ( Abs, acos, acosh, Add, And, asin, asinh, atan, Ci, cos, sinh, cosh, tanh, Derivative, diff, DiracDelta, E, Ei, Eq, exp, erf, erfc, erfi, EulerGamma, Expr, factor, Function, gamma, gammasimp, I, Idx, im, IndexedBase, integrate, Interval, Lambda, LambertW, log, Matrix, Max, meijerg, Min, nan, Ne, O, oo, pi, Piecewise, polar_lift, Poly, polygamma, Rational, re, S, Si, sign, simplify, sin, sinc, SingularityFunction, sqrt, sstr, Sum, Symbol, symbols, sympify, tan, trigsimp, Tuple, lerchphi, exp_polar, li, hyper ) from sympy.core.compatibility import range from sympy.core.expr import unchanged from sympy.functions.elementary.complexes import periodic_argument from sympy.functions.elementary.integers import floor from sympy.integrals.integrals import Integral from sympy.integrals.risch import NonElementaryIntegral from sympy.physics import units from sympy.utilities.pytest import (raises, slow, skip, ON_TRAVIS, warns_deprecated_sympy) from sympy.utilities.randtest import verify_numerically x, y, a, t, x_1, x_2, z, s, b = symbols('x y a t x_1 x_2 z s b') n = Symbol('n', integer=True) f = Function('f') def NS(e, n=15, **options): return sstr(sympify(e).evalf(n, **options), full_prec=True) def test_principal_value(): g = 1 / x assert Integral(g, (x, -oo, oo)).principal_value() == 0 assert Integral(g, (y, -oo, oo)).principal_value() == oo * sign(1 / x) raises(ValueError, lambda: Integral(g, (x)).principal_value()) raises(ValueError, lambda: Integral(g).principal_value()) l = 1 / ((x ** 3) - 1) assert Integral(l, (x, -oo, oo)).principal_value() == -sqrt(3)*pi/3 raises(ValueError, lambda: Integral(l, (x, -oo, 1)).principal_value()) d = 1 / (x ** 2 - 1) assert Integral(d, (x, -oo, oo)).principal_value() == 0 assert Integral(d, (x, -2, 2)).principal_value() == -log(3) v = x / (x ** 2 - 1) assert Integral(v, (x, -oo, oo)).principal_value() == 0 assert Integral(v, (x, -2, 2)).principal_value() == 0 s = x ** 2 / (x ** 2 - 1) assert Integral(s, (x, -oo, oo)).principal_value() is oo assert Integral(s, (x, -2, 2)).principal_value() == -log(3) + 4 f = 1 / ((x ** 2 - 1) * (1 + x ** 2)) assert Integral(f, (x, -oo, oo)).principal_value() == -pi / 2 assert Integral(f, (x, -2, 2)).principal_value() == -atan(2) - log(3) / 2 def diff_test(i): """Return the set of symbols, s, which were used in testing that i.diff(s) agrees with i.doit().diff(s). If there is an error then the assertion will fail, causing the test to fail.""" syms = i.free_symbols for s in syms: assert (i.diff(s).doit() - i.doit().diff(s)).expand() == 0 return syms def test_improper_integral(): assert integrate(log(x), (x, 0, 1)) == -1 assert integrate(x**(-2), (x, 1, oo)) == 1 assert integrate(1/(1 + exp(x)), (x, 0, oo)) == log(2) def test_constructor(): # this is shared by Sum, so testing Integral's constructor # is equivalent to testing Sum's s1 = Integral(n, n) assert s1.limits == (Tuple(n),) s2 = Integral(n, (n,)) assert s2.limits == (Tuple(n),) s3 = Integral(Sum(x, (x, 1, y))) assert s3.limits == (Tuple(y),) s4 = Integral(n, Tuple(n,)) assert s4.limits == (Tuple(n),) s5 = Integral(n, (n, Interval(1, 2))) assert s5.limits == (Tuple(n, 1, 2),) # Testing constructor with inequalities: s6 = Integral(n, n > 10) assert s6.limits == (Tuple(n, 10, oo),) s7 = Integral(n, (n > 2) & (n < 5)) assert s7.limits == (Tuple(n, 2, 5),) def test_basics(): assert Integral(0, x) != 0 assert Integral(x, (x, 1, 1)) != 0 assert Integral(oo, x) != oo assert Integral(S.NaN, x) is S.NaN assert diff(Integral(y, y), x) == 0 assert diff(Integral(x, (x, 0, 1)), x) == 0 assert diff(Integral(x, x), x) == x assert diff(Integral(t, (t, 0, x)), x) == x e = (t + 1)**2 assert diff(integrate(e, (t, 0, x)), x) == \ diff(Integral(e, (t, 0, x)), x).doit().expand() == \ ((1 + x)**2).expand() assert diff(integrate(e, (t, 0, x)), t) == \ diff(Integral(e, (t, 0, x)), t) == 0 assert diff(integrate(e, (t, 0, x)), a) == \ diff(Integral(e, (t, 0, x)), a) == 0 assert diff(integrate(e, t), a) == diff(Integral(e, t), a) == 0 assert integrate(e, (t, a, x)).diff(x) == \ Integral(e, (t, a, x)).diff(x).doit().expand() assert Integral(e, (t, a, x)).diff(x).doit() == ((1 + x)**2) assert integrate(e, (t, x, a)).diff(x).doit() == (-(1 + x)**2).expand() assert integrate(t**2, (t, x, 2*x)).diff(x) == 7*x**2 assert Integral(x, x).atoms() == {x} assert Integral(f(x), (x, 0, 1)).atoms() == {S.Zero, S.One, x} assert diff_test(Integral(x, (x, 3*y))) == {y} assert diff_test(Integral(x, (a, 3*y))) == {x, y} assert integrate(x, (x, oo, oo)) == 0 #issue 8171 assert integrate(x, (x, -oo, -oo)) == 0 # sum integral of terms assert integrate(y + x + exp(x), x) == x*y + x**2/2 + exp(x) assert Integral(x).is_commutative n = Symbol('n', commutative=False) assert Integral(n + x, x).is_commutative is False def test_diff_wrt(): class Test(Expr): _diff_wrt = True is_commutative = True t = Test() assert integrate(t + 1, t) == t**2/2 + t assert integrate(t + 1, (t, 0, 1)) == Rational(3, 2) raises(ValueError, lambda: integrate(x + 1, x + 1)) raises(ValueError, lambda: integrate(x + 1, (x + 1, 0, 1))) def test_basics_multiple(): assert diff_test(Integral(x, (x, 3*x, 5*y), (y, x, 2*x))) == {x} assert diff_test(Integral(x, (x, 5*y), (y, x, 2*x))) == {x} assert diff_test(Integral(x, (x, 5*y), (y, y, 2*x))) == {x, y} assert diff_test(Integral(y, y, x)) == {x, y} assert diff_test(Integral(y*x, x, y)) == {x, y} assert diff_test(Integral(x + y, y, (y, 1, x))) == {x} assert diff_test(Integral(x + y, (x, x, y), (y, y, x))) == {x, y} def test_conjugate_transpose(): A, B = symbols("A B", commutative=False) x = Symbol("x", complex=True) p = Integral(A*B, (x,)) assert p.adjoint().doit() == p.doit().adjoint() assert p.conjugate().doit() == p.doit().conjugate() assert p.transpose().doit() == p.doit().transpose() x = Symbol("x", real=True) p = Integral(A*B, (x,)) assert p.adjoint().doit() == p.doit().adjoint() assert p.conjugate().doit() == p.doit().conjugate() assert p.transpose().doit() == p.doit().transpose() def test_integration(): assert integrate(0, (t, 0, x)) == 0 assert integrate(3, (t, 0, x)) == 3*x assert integrate(t, (t, 0, x)) == x**2/2 assert integrate(3*t, (t, 0, x)) == 3*x**2/2 assert integrate(3*t**2, (t, 0, x)) == x**3 assert integrate(1/t, (t, 1, x)) == log(x) assert integrate(-1/t**2, (t, 1, x)) == 1/x - 1 assert integrate(t**2 + 5*t - 8, (t, 0, x)) == x**3/3 + 5*x**2/2 - 8*x assert integrate(x**2, x) == x**3/3 assert integrate((3*t*x)**5, x) == (3*t)**5 * x**6 / 6 b = Symbol("b") c = Symbol("c") assert integrate(a*t, (t, 0, x)) == a*x**2/2 assert integrate(a*t**4, (t, 0, x)) == a*x**5/5 assert integrate(a*t**2 + b*t + c, (t, 0, x)) == a*x**3/3 + b*x**2/2 + c*x def test_multiple_integration(): assert integrate((x**2)*(y**2), (x, 0, 1), (y, -1, 2)) == Rational(1) assert integrate((y**2)*(x**2), x, y) == Rational(1, 9)*(x**3)*(y**3) assert integrate(1/(x + 3)/(1 + x)**3, x) == \ log(3 + x)*Rational(-1, 8) + log(1 + x)*Rational(1, 8) + x/(4 + 8*x + 4*x**2) assert integrate(sin(x*y)*y, (x, 0, 1), (y, 0, 1)) == -sin(1) + 1 def test_issue_3532(): assert integrate(exp(-x), (x, 0, oo)) == 1 def test_issue_3560(): assert integrate(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert integrate(sqrt(x), x) == 2*sqrt(x)**3/3 assert integrate(1/sqrt(x)**3, x) == -2/sqrt(x) def test_issue_18038(): raises(AttributeError, lambda: integrate((x, x))) def test_integrate_poly(): p = Poly(x + x**2*y + y**3, x, y) qx = integrate(p, x) qy = integrate(p, y) assert isinstance(qx, Poly) is True assert isinstance(qy, Poly) is True assert qx.gens == (x, y) assert qy.gens == (x, y) assert qx.as_expr() == x**2/2 + x**3*y/3 + x*y**3 assert qy.as_expr() == x*y + x**2*y**2/2 + y**4/4 def test_integrate_poly_defined(): p = Poly(x + x**2*y + y**3, x, y) Qx = integrate(p, (x, 0, 1)) Qy = integrate(p, (y, 0, pi)) assert isinstance(Qx, Poly) is True assert isinstance(Qy, Poly) is True assert Qx.gens == (y,) assert Qy.gens == (x,) assert Qx.as_expr() == S.Half + y/3 + y**3 assert Qy.as_expr() == pi**4/4 + pi*x + pi**2*x**2/2 def test_integrate_omit_var(): y = Symbol('y') assert integrate(x) == x**2/2 raises(ValueError, lambda: integrate(2)) raises(ValueError, lambda: integrate(x*y)) def test_integrate_poly_accurately(): y = Symbol('y') assert integrate(x*sin(y), x) == x**2*sin(y)/2 # when passed to risch_norman, this will be a CPU hog, so this really # checks, that integrated function is recognized as polynomial assert integrate(x**1000*sin(y), x) == x**1001*sin(y)/1001 def test_issue_3635(): y = Symbol('y') assert integrate(x**2, y) == x**2*y assert integrate(x**2, (y, -1, 1)) == 2*x**2 # works in sympy and py.test but hangs in `setup.py test` def test_integrate_linearterm_pow(): # check integrate((a*x+b)^c, x) -- issue 3499 y = Symbol('y', positive=True) # TODO: Remove conds='none' below, let the assumption take care of it. assert integrate(x**y, x, conds='none') == x**(y + 1)/(y + 1) assert integrate((exp(y)*x + 1/y)**(1 + sin(y)), x, conds='none') == \ exp(-y)*(exp(y)*x + 1/y)**(2 + sin(y)) / (2 + sin(y)) def test_issue_3618(): assert integrate(pi*sqrt(x), x) == 2*pi*sqrt(x)**3/3 assert integrate(pi*sqrt(x) + E*sqrt(x)**3, x) == \ 2*pi*sqrt(x)**3/3 + 2*E *sqrt(x)**5/5 def test_issue_3623(): assert integrate(cos((n + 1)*x), x) == Piecewise( (sin(x*(n + 1))/(n + 1), Ne(n + 1, 0)), (x, True)) assert integrate(cos((n - 1)*x), x) == Piecewise( (sin(x*(n - 1))/(n - 1), Ne(n - 1, 0)), (x, True)) assert integrate(cos((n + 1)*x) + cos((n - 1)*x), x) == \ Piecewise((sin(x*(n - 1))/(n - 1), Ne(n - 1, 0)), (x, True)) + \ Piecewise((sin(x*(n + 1))/(n + 1), Ne(n + 1, 0)), (x, True)) def test_issue_3664(): n = Symbol('n', integer=True, nonzero=True) assert integrate(-1./2 * x * sin(n * pi * x/2), [x, -2, 0]) == \ 2.0*cos(pi*n)/(pi*n) assert integrate(x * sin(n * pi * x/2) * Rational(-1, 2), [x, -2, 0]) == \ 2*cos(pi*n)/(pi*n) def test_issue_3679(): # definite integration of rational functions gives wrong answers assert NS(Integral(1/(x**2 - 8*x + 17), (x, 2, 4))) == '1.10714871779409' def test_issue_3686(): # remove this when fresnel itegrals are implemented from sympy import expand_func, fresnels assert expand_func(integrate(sin(x**2), x)) == \ sqrt(2)*sqrt(pi)*fresnels(sqrt(2)*x/sqrt(pi))/2 def test_integrate_units(): m = units.m s = units.s assert integrate(x * m/s, (x, 1*s, 5*s)) == 12*m*s def test_transcendental_functions(): assert integrate(LambertW(2*x), x) == \ -x + x*LambertW(2*x) + x/LambertW(2*x) def test_log_polylog(): assert integrate(log(1 - x)/x, (x, 0, 1)) == -pi**2/6 assert integrate(log(x)*(1 - x)**(-1), (x, 0, 1)) == -pi**2/6 def test_issue_3740(): f = 4*log(x) - 2*log(x)**2 fid = diff(integrate(f, x), x) assert abs(f.subs(x, 42).evalf() - fid.subs(x, 42).evalf()) < 1e-10 def test_issue_3788(): assert integrate(1/(1 + x**2), x) == atan(x) def test_issue_3952(): f = sin(x) assert integrate(f, x) == -cos(x) raises(ValueError, lambda: integrate(f, 2*x)) def test_issue_4516(): assert integrate(2**x - 2*x, x) == 2**x/log(2) - x**2 def test_issue_7450(): ans = integrate(exp(-(1 + I)*x), (x, 0, oo)) assert re(ans) == S.Half and im(ans) == Rational(-1, 2) def test_issue_8623(): assert integrate((1 + cos(2*x)) / (3 - 2*cos(2*x)), (x, 0, pi)) == -pi/2 + sqrt(5)*pi/2 assert integrate((1 + cos(2*x))/(3 - 2*cos(2*x))) == -x/2 + sqrt(5)*(atan(sqrt(5)*tan(x)) + \ pi*floor((x - pi/2)/pi))/2 def test_issue_9569(): assert integrate(1 / (2 - cos(x)), (x, 0, pi)) == pi/sqrt(3) assert integrate(1/(2 - cos(x))) == 2*sqrt(3)*(atan(sqrt(3)*tan(x/2)) + pi*floor((x/2 - pi/2)/pi))/3 def test_issue_13749(): assert integrate(1 / (2 + cos(x)), (x, 0, pi)) == pi/sqrt(3) assert integrate(1/(2 + cos(x))) == 2*sqrt(3)*(atan(sqrt(3)*tan(x/2)/3) + pi*floor((x/2 - pi/2)/pi))/3 def test_issue_18133(): assert integrate(exp(x)/(1 + x)**2, x) == NonElementaryIntegral(exp(x)/(x + 1)**2, x) def test_matrices(): M = Matrix(2, 2, lambda i, j: (i + j + 1)*sin((i + j + 1)*x)) assert integrate(M, x) == Matrix([ [-cos(x), -cos(2*x)], [-cos(2*x), -cos(3*x)], ]) def test_integrate_functions(): # issue 4111 assert integrate(f(x), x) == Integral(f(x), x) assert integrate(f(x), (x, 0, 1)) == Integral(f(x), (x, 0, 1)) assert integrate(f(x)*diff(f(x), x), x) == f(x)**2/2 assert integrate(diff(f(x), x) / f(x), x) == log(f(x)) def test_integrate_derivatives(): assert integrate(Derivative(f(x), x), x) == f(x) assert integrate(Derivative(f(y), y), x) == x*Derivative(f(y), y) assert integrate(Derivative(f(x), x)**2, x) == \ Integral(Derivative(f(x), x)**2, x) def test_transform(): a = Integral(x**2 + 1, (x, -1, 2)) fx = x fy = 3*y + 1 assert a.doit() == a.transform(fx, fy).doit() assert a.transform(fx, fy).transform(fy, fx) == a fx = 3*x + 1 fy = y assert a.transform(fx, fy).transform(fy, fx) == a a = Integral(sin(1/x), (x, 0, 1)) assert a.transform(x, 1/y) == Integral(sin(y)/y**2, (y, 1, oo)) assert a.transform(x, 1/y).transform(y, 1/x) == a a = Integral(exp(-x**2), (x, -oo, oo)) assert a.transform(x, 2*y) == Integral(2*exp(-4*y**2), (y, -oo, oo)) # < 3 arg limit handled properly assert Integral(x, x).transform(x, a*y).doit() == \ Integral(y*a**2, y).doit() _3 = S(3) assert Integral(x, (x, 0, -_3)).transform(x, 1/y).doit() == \ Integral(-1/x**3, (x, -oo, -1/_3)).doit() assert Integral(x, (x, 0, _3)).transform(x, 1/y) == \ Integral(y**(-3), (y, 1/_3, oo)) # issue 8400 i = Integral(x + y, (x, 1, 2), (y, 1, 2)) assert i.transform(x, (x + 2*y, x)).doit() == \ i.transform(x, (x + 2*z, x)).doit() == 3 i = Integral(x, (x, a, b)) assert i.transform(x, 2*s) == Integral(4*s, (s, a/2, b/2)) raises(ValueError, lambda: i.transform(x, 1)) raises(ValueError, lambda: i.transform(x, s*t)) raises(ValueError, lambda: i.transform(x, -s)) raises(ValueError, lambda: i.transform(x, (s, t))) raises(ValueError, lambda: i.transform(2*x, 2*s)) i = Integral(x**2, (x, 1, 2)) raises(ValueError, lambda: i.transform(x**2, s)) am = Symbol('a', negative=True) bp = Symbol('b', positive=True) i = Integral(x, (x, bp, am)) i.transform(x, 2*s) assert i.transform(x, 2*s) == Integral(-4*s, (s, am/2, bp/2)) i = Integral(x, (x, a)) assert i.transform(x, 2*s) == Integral(4*s, (s, a/2)) def test_issue_4052(): f = S.Half*asin(x) + x*sqrt(1 - x**2)/2 assert integrate(cos(asin(x)), x) == f assert integrate(sin(acos(x)), x) == f @slow def test_evalf_integrals(): assert NS(Integral(x, (x, 2, 5)), 15) == '10.5000000000000' gauss = Integral(exp(-x**2), (x, -oo, oo)) assert NS(gauss, 15) == '1.77245385090552' assert NS(gauss**2 - pi + E*Rational( 1, 10**20), 15) in ('2.71828182845904e-20', '2.71828182845905e-20') # A monster of an integral from http://mathworld.wolfram.com/DefiniteIntegral.html t = Symbol('t') a = 8*sqrt(3)/(1 + 3*t**2) b = 16*sqrt(2)*(3*t + 1)*sqrt(4*t**2 + t + 1)**3 c = (3*t**2 + 1)*(11*t**2 + 2*t + 3)**2 d = sqrt(2)*(249*t**2 + 54*t + 65)/(11*t**2 + 2*t + 3)**2 f = a - b/c - d assert NS(Integral(f, (t, 0, 1)), 50) == \ NS((3*sqrt(2) - 49*pi + 162*atan(sqrt(2)))/12, 50) # http://mathworld.wolfram.com/VardisIntegral.html assert NS(Integral(log(log(1/x))/(1 + x + x**2), (x, 0, 1)), 15) == \ NS('pi/sqrt(3) * log(2*pi**(5/6) / gamma(1/6))', 15) # http://mathworld.wolfram.com/AhmedsIntegral.html assert NS(Integral(atan(sqrt(x**2 + 2))/(sqrt(x**2 + 2)*(x**2 + 1)), (x, 0, 1)), 15) == NS(5*pi**2/96, 15) # http://mathworld.wolfram.com/AbelsIntegral.html assert NS(Integral(x/((exp(pi*x) - exp( -pi*x))*(x**2 + 1)), (x, 0, oo)), 15) == NS('log(2)/2-1/4', 15) # Complex part trimming # http://mathworld.wolfram.com/VardisIntegral.html assert NS(Integral(log(log(sin(x)/cos(x))), (x, pi/4, pi/2)), 15, chop=True) == \ NS('pi/4*log(4*pi**3/gamma(1/4)**4)', 15) # # Endpoints causing trouble (rounding error in integration points -> complex log) assert NS( 2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 17, chop=True) == NS(2, 17) assert NS( 2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 20, chop=True) == NS(2, 20) assert NS( 2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 22, chop=True) == NS(2, 22) # Needs zero handling assert NS(pi - 4*Integral( 'sqrt(1-x**2)', (x, 0, 1)), 15, maxn=30, chop=True) in ('0.0', '0') # Oscillatory quadrature a = Integral(sin(x)/x**2, (x, 1, oo)).evalf(maxn=15) assert 0.49 < a < 0.51 assert NS( Integral(sin(x)/x**2, (x, 1, oo)), quad='osc') == '0.504067061906928' assert NS(Integral( cos(pi*x + 1)/x, (x, -oo, -1)), quad='osc') == '0.276374705640365' # indefinite integrals aren't evaluated assert NS(Integral(x, x)) == 'Integral(x, x)' assert NS(Integral(x, (x, y))) == 'Integral(x, (x, y))' def test_evalf_issue_939(): # https://github.com/sympy/sympy/issues/4038 # The output form of an integral may differ by a step function between # revisions, making this test a bit useless. This can't be said about # other two tests. For now, all values of this evaluation are used here, # but in future this should be reconsidered. assert NS(integrate(1/(x**5 + 1), x).subs(x, 4), chop=True) in \ ['-0.000976138910649103', '0.965906660135753', '1.93278945918216'] assert NS(Integral(1/(x**5 + 1), (x, 2, 4))) == '0.0144361088886740' assert NS( integrate(1/(x**5 + 1), (x, 2, 4)), chop=True) == '0.0144361088886740' def test_double_previously_failing_integrals(): # Double integrals not implemented <- Sure it is! res = integrate(sqrt(x) + x*y, (x, 1, 2), (y, -1, 1)) # Old numerical test assert NS(res, 15) == '2.43790283299492' # Symbolic test assert res == Rational(-4, 3) + 8*sqrt(2)/3 # double integral + zero detection assert integrate(sin(x + x*y), (x, -1, 1), (y, -1, 1)) is S.Zero def test_integrate_SingularityFunction(): in_1 = SingularityFunction(x, a, 3) + SingularityFunction(x, 5, -1) out_1 = SingularityFunction(x, a, 4)/4 + SingularityFunction(x, 5, 0) assert integrate(in_1, x) == out_1 in_2 = 10*SingularityFunction(x, 4, 0) - 5*SingularityFunction(x, -6, -2) out_2 = 10*SingularityFunction(x, 4, 1) - 5*SingularityFunction(x, -6, -1) assert integrate(in_2, x) == out_2 in_3 = 2*x**2*y -10*SingularityFunction(x, -4, 7) - 2*SingularityFunction(y, 10, -2) out_3_1 = 2*x**3*y/3 - 2*x*SingularityFunction(y, 10, -2) - 5*SingularityFunction(x, -4, 8)/4 out_3_2 = x**2*y**2 - 10*y*SingularityFunction(x, -4, 7) - 2*SingularityFunction(y, 10, -1) assert integrate(in_3, x) == out_3_1 assert integrate(in_3, y) == out_3_2 assert unchanged(Integral, in_3, (x,)) assert Integral(in_3, x) == Integral(in_3, (x,)) assert Integral(in_3, x).doit() == out_3_1 in_4 = 10*SingularityFunction(x, -4, 7) - 2*SingularityFunction(x, 10, -2) out_4 = 5*SingularityFunction(x, -4, 8)/4 - 2*SingularityFunction(x, 10, -1) assert integrate(in_4, (x, -oo, x)) == out_4 assert integrate(SingularityFunction(x, 5, -1), x) == SingularityFunction(x, 5, 0) assert integrate(SingularityFunction(x, 0, -1), (x, -oo, oo)) == 1 assert integrate(5*SingularityFunction(x, 5, -1), (x, -oo, oo)) == 5 assert integrate(SingularityFunction(x, 5, -1) * f(x), (x, -oo, oo)) == f(5) def test_integrate_DiracDelta(): # This is here to check that deltaintegrate is being called, but also # to test definite integrals. More tests are in test_deltafunctions.py assert integrate(DiracDelta(x) * f(x), (x, -oo, oo)) == f(0) assert integrate(DiracDelta(x)**2, (x, -oo, oo)) == DiracDelta(0) # issue 4522 assert integrate(integrate((4 - 4*x + x*y - 4*y) * \ DiracDelta(x)*DiracDelta(y - 1), (x, 0, 1)), (y, 0, 1)) == 0 # issue 5729 p = exp(-(x**2 + y**2))/pi assert integrate(p*DiracDelta(x - 10*y), (x, -oo, oo), (y, -oo, oo)) == \ integrate(p*DiracDelta(x - 10*y), (y, -oo, oo), (x, -oo, oo)) == \ integrate(p*DiracDelta(10*x - y), (x, -oo, oo), (y, -oo, oo)) == \ integrate(p*DiracDelta(10*x - y), (y, -oo, oo), (x, -oo, oo)) == \ 1/sqrt(101*pi) def test_integrate_returns_piecewise(): assert integrate(x**y, x) == Piecewise( (x**(y + 1)/(y + 1), Ne(y, -1)), (log(x), True)) assert integrate(x**y, y) == Piecewise( (x**y/log(x), Ne(log(x), 0)), (y, True)) assert integrate(exp(n*x), x) == Piecewise( (exp(n*x)/n, Ne(n, 0)), (x, True)) assert integrate(x*exp(n*x), x) == Piecewise( ((n*x - 1)*exp(n*x)/n**2, Ne(n**2, 0)), (x**2/2, True)) assert integrate(x**(n*y), x) == Piecewise( (x**(n*y + 1)/(n*y + 1), Ne(n*y, -1)), (log(x), True)) assert integrate(x**(n*y), y) == Piecewise( (x**(n*y)/(n*log(x)), Ne(n*log(x), 0)), (y, True)) assert integrate(cos(n*x), x) == Piecewise( (sin(n*x)/n, Ne(n, 0)), (x, True)) assert integrate(cos(n*x)**2, x) == Piecewise( ((n*x/2 + sin(n*x)*cos(n*x)/2)/n, Ne(n, 0)), (x, True)) assert integrate(x*cos(n*x), x) == Piecewise( (x*sin(n*x)/n + cos(n*x)/n**2, Ne(n, 0)), (x**2/2, True)) assert integrate(sin(n*x), x) == Piecewise( (-cos(n*x)/n, Ne(n, 0)), (0, True)) assert integrate(sin(n*x)**2, x) == Piecewise( ((n*x/2 - sin(n*x)*cos(n*x)/2)/n, Ne(n, 0)), (0, True)) assert integrate(x*sin(n*x), x) == Piecewise( (-x*cos(n*x)/n + sin(n*x)/n**2, Ne(n, 0)), (0, True)) assert integrate(exp(x*y), (x, 0, z)) == Piecewise( (exp(y*z)/y - 1/y, (y > -oo) & (y < oo) & Ne(y, 0)), (z, True)) def test_integrate_max_min(): x = symbols('x', real=True) assert integrate(Min(x, 2), (x, 0, 3)) == 4 assert integrate(Max(x**2, x**3), (x, 0, 2)) == Rational(49, 12) assert integrate(Min(exp(x), exp(-x))**2, x) == Piecewise( \ (exp(2*x)/2, x <= 0), (1 - exp(-2*x)/2, True)) # issue 7907 c = symbols('c', extended_real=True) int1 = integrate(Max(c, x)*exp(-x**2), (x, -oo, oo)) int2 = integrate(c*exp(-x**2), (x, -oo, c)) int3 = integrate(x*exp(-x**2), (x, c, oo)) assert int1 == int2 + int3 == sqrt(pi)*c*erf(c)/2 + \ sqrt(pi)*c/2 + exp(-c**2)/2 def test_integrate_Abs_sign(): assert integrate(Abs(x), (x, -2, 1)) == Rational(5, 2) assert integrate(Abs(x), (x, 0, 1)) == S.Half assert integrate(Abs(x + 1), (x, 0, 1)) == Rational(3, 2) assert integrate(Abs(x**2 - 1), (x, -2, 2)) == 4 assert integrate(Abs(x**2 - 3*x), (x, -15, 15)) == 2259 assert integrate(sign(x), (x, -1, 2)) == 1 assert integrate(sign(x)*sin(x), (x, -pi, pi)) == 4 assert integrate(sign(x - 2) * x**2, (x, 0, 3)) == Rational(11, 3) t, s = symbols('t s', real=True) assert integrate(Abs(t), t) == Piecewise( (-t**2/2, t <= 0), (t**2/2, True)) assert integrate(Abs(2*t - 6), t) == Piecewise( (-t**2 + 6*t, t <= 3), (t**2 - 6*t + 18, True)) assert (integrate(abs(t - s**2), (t, 0, 2)) == 2*s**2*Min(2, s**2) - 2*s**2 - Min(2, s**2)**2 + 2) assert integrate(exp(-Abs(t)), t) == Piecewise( (exp(t), t <= 0), (2 - exp(-t), True)) assert integrate(sign(2*t - 6), t) == Piecewise( (-t, t < 3), (t - 6, True)) assert integrate(2*t*sign(t**2 - 1), t) == Piecewise( (t**2, t < -1), (-t**2 + 2, t < 1), (t**2, True)) assert integrate(sign(t), (t, s + 1)) == Piecewise( (s + 1, s + 1 > 0), (-s - 1, s + 1 < 0), (0, True)) def test_subs1(): e = Integral(exp(x - y), x) assert e.subs(y, 3) == Integral(exp(x - 3), x) e = Integral(exp(x - y), (x, 0, 1)) assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 1)) f = Lambda(x, exp(-x**2)) conv = Integral(f(x - y)*f(y), (y, -oo, oo)) assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo)) def test_subs2(): e = Integral(exp(x - y), x, t) assert e.subs(y, 3) == Integral(exp(x - 3), x, t) e = Integral(exp(x - y), (x, 0, 1), (t, 0, 1)) assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 1), (t, 0, 1)) f = Lambda(x, exp(-x**2)) conv = Integral(f(x - y)*f(y), (y, -oo, oo), (t, 0, 1)) assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1)) def test_subs3(): e = Integral(exp(x - y), (x, 0, y), (t, y, 1)) assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 3), (t, 3, 1)) f = Lambda(x, exp(-x**2)) conv = Integral(f(x - y)*f(y), (y, -oo, oo), (t, x, 1)) assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1)) def test_subs4(): e = Integral(exp(x), (x, 0, y), (t, y, 1)) assert e.subs(y, 3) == Integral(exp(x), (x, 0, 3), (t, 3, 1)) f = Lambda(x, exp(-x**2)) conv = Integral(f(y)*f(y), (y, -oo, oo), (t, x, 1)) assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1)) def test_subs5(): e = Integral(exp(-x**2), (x, -oo, oo)) assert e.subs(x, 5) == e e = Integral(exp(-x**2 + y), x) assert e.subs(y, 5) == Integral(exp(-x**2 + 5), x) e = Integral(exp(-x**2 + y), (x, x)) assert e.subs(x, 5) == Integral(exp(y - x**2), (x, 5)) assert e.subs(y, 5) == Integral(exp(-x**2 + 5), x) e = Integral(exp(-x**2 + y), (y, -oo, oo), (x, -oo, oo)) assert e.subs(x, 5) == e assert e.subs(y, 5) == e # Test evaluation of antiderivatives e = Integral(exp(-x**2), (x, x)) assert e.subs(x, 5) == Integral(exp(-x**2), (x, 5)) e = Integral(exp(x), x) assert (e.subs(x,1) - e.subs(x,0) - Integral(exp(x), (x, 0, 1)) ).doit().is_zero def test_subs6(): a, b = symbols('a b') e = Integral(x*y, (x, f(x), f(y))) assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y))) assert e.subs(y, 1) == Integral(x, (x, f(x), f(1))) e = Integral(x*y, (x, f(x), f(y)), (y, f(x), f(y))) assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y)), (y, f(1), f(y))) assert e.subs(y, 1) == Integral(x*y, (x, f(x), f(y)), (y, f(x), f(1))) e = Integral(x*y, (x, f(x), f(a)), (y, f(x), f(a))) assert e.subs(a, 1) == Integral(x*y, (x, f(x), f(1)), (y, f(x), f(1))) def test_subs7(): e = Integral(x, (x, 1, y), (y, 1, 2)) assert e.subs({x: 1, y: 2}) == e e = Integral(sin(x) + sin(y), (x, sin(x), sin(y)), (y, 1, 2)) assert e.subs(sin(y), 1) == e assert e.subs(sin(x), 1) == Integral(sin(x) + sin(y), (x, 1, sin(y)), (y, 1, 2)) def test_expand(): e = Integral(f(x)+f(x**2), (x, 1, y)) assert e.expand() == Integral(f(x), (x, 1, y)) + Integral(f(x**2), (x, 1, y)) def test_integration_variable(): raises(ValueError, lambda: Integral(exp(-x**2), 3)) raises(ValueError, lambda: Integral(exp(-x**2), (3, -oo, oo))) def test_expand_integral(): assert Integral(cos(x**2)*(sin(x**2) + 1), (x, 0, 1)).expand() == \ Integral(cos(x**2)*sin(x**2), (x, 0, 1)) + \ Integral(cos(x**2), (x, 0, 1)) assert Integral(cos(x**2)*(sin(x**2) + 1), x).expand() == \ Integral(cos(x**2)*sin(x**2), x) + \ Integral(cos(x**2), x) def test_as_sum_midpoint1(): e = Integral(sqrt(x**3 + 1), (x, 2, 10)) assert e.as_sum(1, method="midpoint") == 8*sqrt(217) assert e.as_sum(2, method="midpoint") == 4*sqrt(65) + 12*sqrt(57) assert e.as_sum(3, method="midpoint") == 8*sqrt(217)/3 + \ 8*sqrt(3081)/27 + 8*sqrt(52809)/27 assert e.as_sum(4, method="midpoint") == 2*sqrt(730) + \ 4*sqrt(7) + 4*sqrt(86) + 6*sqrt(14) assert abs(e.as_sum(4, method="midpoint").n() - e.n()) < 0.5 e = Integral(sqrt(x**3 + y**3), (x, 2, 10), (y, 0, 10)) raises(NotImplementedError, lambda: e.as_sum(4)) def test_as_sum_midpoint2(): e = Integral((x + y)**2, (x, 0, 1)) n = Symbol('n', positive=True, integer=True) assert e.as_sum(1, method="midpoint").expand() == Rational(1, 4) + y + y**2 assert e.as_sum(2, method="midpoint").expand() == Rational(5, 16) + y + y**2 assert e.as_sum(3, method="midpoint").expand() == Rational(35, 108) + y + y**2 assert e.as_sum(4, method="midpoint").expand() == Rational(21, 64) + y + y**2 assert e.as_sum(n, method="midpoint").expand() == \ y**2 + y + Rational(1, 3) - 1/(12*n**2) def test_as_sum_left(): e = Integral((x + y)**2, (x, 0, 1)) assert e.as_sum(1, method="left").expand() == y**2 assert e.as_sum(2, method="left").expand() == Rational(1, 8) + y/2 + y**2 assert e.as_sum(3, method="left").expand() == Rational(5, 27) + y*Rational(2, 3) + y**2 assert e.as_sum(4, method="left").expand() == Rational(7, 32) + y*Rational(3, 4) + y**2 assert e.as_sum(n, method="left").expand() == \ y**2 + y + Rational(1, 3) - y/n - 1/(2*n) + 1/(6*n**2) assert e.as_sum(10, method="left", evaluate=False).has(Sum) def test_as_sum_right(): e = Integral((x + y)**2, (x, 0, 1)) assert e.as_sum(1, method="right").expand() == 1 + 2*y + y**2 assert e.as_sum(2, method="right").expand() == Rational(5, 8) + y*Rational(3, 2) + y**2 assert e.as_sum(3, method="right").expand() == Rational(14, 27) + y*Rational(4, 3) + y**2 assert e.as_sum(4, method="right").expand() == Rational(15, 32) + y*Rational(5, 4) + y**2 assert e.as_sum(n, method="right").expand() == \ y**2 + y + Rational(1, 3) + y/n + 1/(2*n) + 1/(6*n**2) def test_as_sum_trapezoid(): e = Integral((x + y)**2, (x, 0, 1)) assert e.as_sum(1, method="trapezoid").expand() == y**2 + y + S.Half assert e.as_sum(2, method="trapezoid").expand() == y**2 + y + Rational(3, 8) assert e.as_sum(3, method="trapezoid").expand() == y**2 + y + Rational(19, 54) assert e.as_sum(4, method="trapezoid").expand() == y**2 + y + Rational(11, 32) assert e.as_sum(n, method="trapezoid").expand() == \ y**2 + y + Rational(1, 3) + 1/(6*n**2) assert Integral(sign(x), (x, 0, 1)).as_sum(1, 'trapezoid') == S.Half def test_as_sum_raises(): e = Integral((x + y)**2, (x, 0, 1)) raises(ValueError, lambda: e.as_sum(-1)) raises(ValueError, lambda: e.as_sum(0)) raises(ValueError, lambda: Integral(x).as_sum(3)) raises(ValueError, lambda: e.as_sum(oo)) raises(ValueError, lambda: e.as_sum(3, method='xxxx2')) def test_nested_doit(): e = Integral(Integral(x, x), x) f = Integral(x, x, x) assert e.doit() == f.doit() def test_issue_4665(): # Allow only upper or lower limit evaluation e = Integral(x**2, (x, None, 1)) f = Integral(x**2, (x, 1, None)) assert e.doit() == Rational(1, 3) assert f.doit() == Rational(-1, 3) assert Integral(x*y, (x, None, y)).subs(y, t) == Integral(x*t, (x, None, t)) assert Integral(x*y, (x, y, None)).subs(y, t) == Integral(x*t, (x, t, None)) assert integrate(x**2, (x, None, 1)) == Rational(1, 3) assert integrate(x**2, (x, 1, None)) == Rational(-1, 3) assert integrate("x**2", ("x", "1", None)) == Rational(-1, 3) def test_integral_reconstruct(): e = Integral(x**2, (x, -1, 1)) assert e == Integral(*e.args) def test_doit_integrals(): e = Integral(Integral(2*x), (x, 0, 1)) assert e.doit() == Rational(1, 3) assert e.doit(deep=False) == Rational(1, 3) f = Function('f') # doesn't matter if the integral can't be performed assert Integral(f(x), (x, 1, 1)).doit() == 0 # doesn't matter if the limits can't be evaluated assert Integral(0, (x, 1, Integral(f(x), x))).doit() == 0 assert Integral(x, (a, 0)).doit() == 0 limits = ((a, 1, exp(x)), (x, 0)) assert Integral(a, *limits).doit() == Rational(1, 4) assert Integral(a, *list(reversed(limits))).doit() == 0 def test_issue_4884(): assert integrate(sqrt(x)*(1 + x)) == \ Piecewise( (2*sqrt(x)*(x + 1)**2/5 - 2*sqrt(x)*(x + 1)/15 - 4*sqrt(x)/15, Abs(x + 1) > 1), (2*I*sqrt(-x)*(x + 1)**2/5 - 2*I*sqrt(-x)*(x + 1)/15 - 4*I*sqrt(-x)/15, True)) assert integrate(x**x*(1 + log(x))) == x**x def test_issue_18153(): assert integrate(x**n*log(x),x) == \ Piecewise( (n*x*x**n*log(x)/(n**2 + 2*n + 1) + x*x**n*log(x)/(n**2 + 2*n + 1) - x*x**n/(n**2 + 2*n + 1) , Ne(n, -1)), (log(x)**2/2, True) ) def test_is_number(): from sympy.abc import x, y, z from sympy import cos, sin assert Integral(x).is_number is False assert Integral(1, x).is_number is False assert Integral(1, (x, 1)).is_number is True assert Integral(1, (x, 1, 2)).is_number is True assert Integral(1, (x, 1, y)).is_number is False assert Integral(1, (x, y)).is_number is False assert Integral(x, y).is_number is False assert Integral(x, (y, 1, x)).is_number is False assert Integral(x, (y, 1, 2)).is_number is False assert Integral(x, (x, 1, 2)).is_number is True # `foo.is_number` should always be equivalent to `not foo.free_symbols` # in each of these cases, there are pseudo-free symbols i = Integral(x, (y, 1, 1)) assert i.is_number is False and i.n() == 0 i = Integral(x, (y, z, z)) assert i.is_number is False and i.n() == 0 i = Integral(1, (y, z, z + 2)) assert i.is_number is False and i.n() == 2 assert Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number is True assert Integral(x*y, (x, 1, 2), (y, 1, z)).is_number is False assert Integral(x, (x, 1)).is_number is True assert Integral(x, (x, 1, Integral(y, (y, 1, 2)))).is_number is True assert Integral(Sum(z, (z, 1, 2)), (x, 1, 2)).is_number is True # it is possible to get a false negative if the integrand is # actually an unsimplified zero, but this is true of is_number in general. assert Integral(sin(x)**2 + cos(x)**2 - 1, x).is_number is False assert Integral(f(x), (x, 0, 1)).is_number is True def test_symbols(): from sympy.abc import x, y, z assert Integral(0, x).free_symbols == {x} assert Integral(x).free_symbols == {x} assert Integral(x, (x, None, y)).free_symbols == {y} assert Integral(x, (x, y, None)).free_symbols == {y} assert Integral(x, (x, 1, y)).free_symbols == {y} assert Integral(x, (x, y, 1)).free_symbols == {y} assert Integral(x, (x, x, y)).free_symbols == {x, y} assert Integral(x, x, y).free_symbols == {x, y} assert Integral(x, (x, 1, 2)).free_symbols == set() assert Integral(x, (y, 1, 2)).free_symbols == {x} # pseudo-free in this case assert Integral(x, (y, z, z)).free_symbols == {x, z} assert Integral(x, (y, 1, 2), (y, None, None)).free_symbols == {x, y} assert Integral(x, (y, 1, 2), (x, 1, y)).free_symbols == {y} assert Integral(2, (y, 1, 2), (y, 1, x), (x, 1, 2)).free_symbols == set() assert Integral(2, (y, x, 2), (y, 1, x), (x, 1, 2)).free_symbols == set() assert Integral(2, (x, 1, 2), (y, x, 2), (y, 1, 2)).free_symbols == \ {x} def test_is_zero(): from sympy.abc import x, m assert Integral(0, (x, 1, x)).is_zero assert Integral(1, (x, 1, 1)).is_zero assert Integral(1, (x, 1, 2), (y, 2)).is_zero is False assert Integral(x, (m, 0)).is_zero assert Integral(x + m, (m, 0)).is_zero is None i = Integral(m, (m, 1, exp(x)), (x, 0)) assert i.is_zero is None assert Integral(m, (x, 0), (m, 1, exp(x))).is_zero is True assert Integral(x, (x, oo, oo)).is_zero # issue 8171 assert Integral(x, (x, -oo, -oo)).is_zero # this is zero but is beyond the scope of what is_zero # should be doing assert Integral(sin(x), (x, 0, 2*pi)).is_zero is None def test_series(): from sympy.abc import x i = Integral(cos(x), (x, x)) e = i.lseries(x) assert i.nseries(x, n=8).removeO() == Add(*[next(e) for j in range(4)]) def test_trig_nonelementary_integrals(): x = Symbol('x') assert integrate((1 + sin(x))/x, x) == log(x) + Si(x) # next one comes out as log(x) + log(x**2)/2 + Ci(x) # so not hardcoding this log ugliness assert integrate((cos(x) + 2)/x, x).has(Ci) def test_issue_4403(): x = Symbol('x') y = Symbol('y') z = Symbol('z', positive=True) assert integrate(sqrt(x**2 + z**2), x) == \ z**2*asinh(x/z)/2 + x*sqrt(x**2 + z**2)/2 assert integrate(sqrt(x**2 - z**2), x) == \ -z**2*acosh(x/z)/2 + x*sqrt(x**2 - z**2)/2 x = Symbol('x', real=True) y = Symbol('y', positive=True) assert integrate(1/(x**2 + y**2)**S('3/2'), x) == \ x/(y**2*sqrt(x**2 + y**2)) # If y is real and nonzero, we get x*Abs(y)/(y**3*sqrt(x**2 + y**2)), # which results from sqrt(1 + x**2/y**2) = sqrt(x**2 + y**2)/|y|. def test_issue_4403_2(): assert integrate(sqrt(-x**2 - 4), x) == \ -2*atan(x/sqrt(-4 - x**2)) + x*sqrt(-4 - x**2)/2 def test_issue_4100(): R = Symbol('R', positive=True) assert integrate(sqrt(R**2 - x**2), (x, 0, R)) == pi*R**2/4 def test_issue_5167(): from sympy.abc import w, x, y, z f = Function('f') assert Integral(Integral(f(x), x), x) == Integral(f(x), x, x) assert Integral(f(x)).args == (f(x), Tuple(x)) assert Integral(Integral(f(x))).args == (f(x), Tuple(x), Tuple(x)) assert Integral(Integral(f(x)), y).args == (f(x), Tuple(x), Tuple(y)) assert Integral(Integral(f(x), z), y).args == (f(x), Tuple(z), Tuple(y)) assert Integral(Integral(Integral(f(x), x), y), z).args == \ (f(x), Tuple(x), Tuple(y), Tuple(z)) assert integrate(Integral(f(x), x), x) == Integral(f(x), x, x) assert integrate(Integral(f(x), y), x) == y*Integral(f(x), x) assert integrate(Integral(f(x), x), y) in [Integral(y*f(x), x), y*Integral(f(x), x)] assert integrate(Integral(2, x), x) == x**2 assert integrate(Integral(2, x), y) == 2*x*y # don't re-order given limits assert Integral(1, x, y).args != Integral(1, y, x).args # do as many as possible assert Integral(f(x), y, x, y, x).doit() == y**2*Integral(f(x), x, x)/2 assert Integral(f(x), (x, 1, 2), (w, 1, x), (z, 1, y)).doit() == \ y*(x - 1)*Integral(f(x), (x, 1, 2)) - (x - 1)*Integral(f(x), (x, 1, 2)) def test_issue_4890(): z = Symbol('z', positive=True) assert integrate(exp(-log(x)**2), x) == \ sqrt(pi)*exp(Rational(1, 4))*erf(log(x) - S.Half)/2 assert integrate(exp(log(x)**2), x) == \ sqrt(pi)*exp(Rational(-1, 4))*erfi(log(x)+S.Half)/2 assert integrate(exp(-z*log(x)**2), x) == \ sqrt(pi)*exp(1/(4*z))*erf(sqrt(z)*log(x) - 1/(2*sqrt(z)))/(2*sqrt(z)) def test_issue_4551(): assert not integrate(1/(x*sqrt(1 - x**2)), x).has(Integral) def test_issue_4376(): n = Symbol('n', integer=True, positive=True) assert simplify(integrate(n*(x**(1/n) - 1), (x, 0, S.Half)) - (n**2 - 2**(1/n)*n**2 - n*2**(1/n))/(2**(1 + 1/n) + n*2**(1 + 1/n))) == 0 def test_issue_4517(): assert integrate((sqrt(x) - x**3)/x**Rational(1, 3), x) == \ 6*x**Rational(7, 6)/7 - 3*x**Rational(11, 3)/11 def test_issue_4527(): k, m = symbols('k m', integer=True) assert integrate(sin(k*x)*sin(m*x), (x, 0, pi)).simplify() == \ Piecewise((0, Eq(k, 0) | Eq(m, 0)), (-pi/2, Eq(k, -m) | (Eq(k, 0) & Eq(m, 0))), (pi/2, Eq(k, m) | (Eq(k, 0) & Eq(m, 0))), (0, True)) # Should be possible to further simplify to: # Piecewise( # (0, Eq(k, 0) | Eq(m, 0)), # (-pi/2, Eq(k, -m)), # (pi/2, Eq(k, m)), # (0, True)) assert integrate(sin(k*x)*sin(m*x), (x,)) == Piecewise( (0, And(Eq(k, 0), Eq(m, 0))), (-x*sin(m*x)**2/2 - x*cos(m*x)**2/2 + sin(m*x)*cos(m*x)/(2*m), Eq(k, -m)), (x*sin(m*x)**2/2 + x*cos(m*x)**2/2 - sin(m*x)*cos(m*x)/(2*m), Eq(k, m)), (m*sin(k*x)*cos(m*x)/(k**2 - m**2) - k*sin(m*x)*cos(k*x)/(k**2 - m**2), True)) def test_issue_4199(): ypos = Symbol('y', positive=True) # TODO: Remove conds='none' below, let the assumption take care of it. assert integrate(exp(-I*2*pi*ypos*x)*x, (x, -oo, oo), conds='none') == \ Integral(exp(-I*2*pi*ypos*x)*x, (x, -oo, oo)) @slow def test_issue_3940(): a, b, c, d = symbols('a:d', positive=True, finite=True) assert integrate(exp(-x**2 + I*c*x), x) == \ -sqrt(pi)*exp(-c**2/4)*erf(I*c/2 - x)/2 assert integrate(exp(a*x**2 + b*x + c), x) == \ sqrt(pi)*exp(c)*exp(-b**2/(4*a))*erfi(sqrt(a)*x + b/(2*sqrt(a)))/(2*sqrt(a)) from sympy import expand_mul from sympy.abc import k assert expand_mul(integrate(exp(-x**2)*exp(I*k*x), (x, -oo, oo))) == \ sqrt(pi)*exp(-k**2/4) a, d = symbols('a d', positive=True) assert expand_mul(integrate(exp(-a*x**2 + 2*d*x), (x, -oo, oo))) == \ sqrt(pi)*exp(d**2/a)/sqrt(a) def test_issue_5413(): # Note that this is not the same as testing ratint() because integrate() # pulls out the coefficient. assert integrate(-a/(a**2 + x**2), x) == I*log(-I*a + x)/2 - I*log(I*a + x)/2 def test_issue_4892a(): A, z = symbols('A z') c = Symbol('c', nonzero=True) P1 = -A*exp(-z) P2 = -A/(c*t)*(sin(x)**2 + cos(y)**2) h1 = -sin(x)**2 - cos(y)**2 h2 = -sin(x)**2 + sin(y)**2 - 1 # there is still some non-deterministic behavior in integrate # or trigsimp which permits one of the following assert integrate(c*(P2 - P1), t) in [ c*(-A*(-h1)*log(c*t)/c + A*t*exp(-z)), c*(-A*(-h2)*log(c*t)/c + A*t*exp(-z)), c*( A* h1 *log(c*t)/c + A*t*exp(-z)), c*( A* h2 *log(c*t)/c + A*t*exp(-z)), (A*c*t - A*(-h1)*log(t)*exp(z))*exp(-z), (A*c*t - A*(-h2)*log(t)*exp(z))*exp(-z), ] def test_issue_4892b(): # Issues relating to issue 4596 are making the actual result of this hard # to test. The answer should be something like # # (-sin(y) + sqrt(-72 + 48*cos(y) - 8*cos(y)**2)/2)*log(x + sqrt(-72 + # 48*cos(y) - 8*cos(y)**2)/(2*(3 - cos(y)))) + (-sin(y) - sqrt(-72 + # 48*cos(y) - 8*cos(y)**2)/2)*log(x - sqrt(-72 + 48*cos(y) - # 8*cos(y)**2)/(2*(3 - cos(y)))) + x**2*sin(y)/2 + 2*x*cos(y) expr = (sin(y)*x**3 + 2*cos(y)*x**2 + 12)/(x**2 + 2) assert trigsimp(factor(integrate(expr, x).diff(x) - expr)) == 0 def test_issue_5178(): assert integrate(sin(x)*f(y, z), (x, 0, pi), (y, 0, pi), (z, 0, pi)) == \ 2*Integral(f(y, z), (y, 0, pi), (z, 0, pi)) def test_integrate_series(): f = sin(x).series(x, 0, 10) g = x**2/2 - x**4/24 + x**6/720 - x**8/40320 + x**10/3628800 + O(x**11) assert integrate(f, x) == g assert diff(integrate(f, x), x) == f assert integrate(O(x**5), x) == O(x**6) def test_atom_bug(): from sympy import meijerg from sympy.integrals.heurisch import heurisch assert heurisch(meijerg([], [], [1], [], x), x) is None def test_limit_bug(): z = Symbol('z', zero=False) assert integrate(sin(x*y*z), (x, 0, pi), (y, 0, pi)) == \ (log(z) + EulerGamma + log(pi))/z - Ci(pi**2*z)/z + log(pi)/z def test_issue_4703(): g = Function('g') assert integrate(exp(x)*g(x), x).has(Integral) def test_issue_1888(): f = Function('f') assert integrate(f(x).diff(x)**2, x).has(Integral) # The following tests work using meijerint. def test_issue_3558(): from sympy import Si assert integrate(cos(x*y), (x, -pi/2, pi/2), (y, 0, pi)) == 2*Si(pi**2/2) def test_issue_4422(): assert integrate(1/sqrt(16 + 4*x**2), x) == asinh(x/2) / 2 def test_issue_4493(): from sympy import simplify assert simplify(integrate(x*sqrt(1 + 2*x), x)) == \ sqrt(2*x + 1)*(6*x**2 + x - 1)/15 def test_issue_4737(): assert integrate(sin(x)/x, (x, -oo, oo)) == pi assert integrate(sin(x)/x, (x, 0, oo)) == pi/2 assert integrate(sin(x)/x, x) == Si(x) def test_issue_4992(): # Note: psi in _check_antecedents becomes NaN. from sympy import simplify, expand_func, polygamma, gamma a = Symbol('a', positive=True) assert simplify(expand_func(integrate(exp(-x)*log(x)*x**a, (x, 0, oo)))) == \ (a*polygamma(0, a) + 1)*gamma(a) def test_issue_4487(): from sympy import lowergamma, simplify assert simplify(integrate(exp(-x)*x**y, x)) == lowergamma(y + 1, x) def test_issue_4215(): x = Symbol("x") assert integrate(1/(x**2), (x, -1, 1)) is oo def test_issue_4400(): n = Symbol('n', integer=True, positive=True) assert integrate((x**n)*log(x), x) == \ n*x*x**n*log(x)/(n**2 + 2*n + 1) + x*x**n*log(x)/(n**2 + 2*n + 1) - \ x*x**n/(n**2 + 2*n + 1) def test_issue_6253(): # Note: this used to raise NotImplementedError # Note: psi in _check_antecedents becomes NaN. assert integrate((sqrt(1 - x) + sqrt(1 + x))**2/x, x, meijerg=True) == \ Integral((sqrt(-x + 1) + sqrt(x + 1))**2/x, x) def test_issue_4153(): assert integrate(1/(1 + x + y + z), (x, 0, 1), (y, 0, 1), (z, 0, 1)) in [ -12*log(3) - 3*log(6)/2 + 3*log(8)/2 + 5*log(2) + 7*log(4), 6*log(2) + 8*log(4) - 27*log(3)/2, 22*log(2) - 27*log(3)/2, -12*log(3) - 3*log(6)/2 + 47*log(2)/2] def test_issue_4326(): R, b, h = symbols('R b h') # It doesn't matter if we can do the integral. Just make sure the result # doesn't contain nan. This is really a test against _eval_interval. e = integrate(((h*(x - R + b))/b)*sqrt(R**2 - x**2), (x, R - b, R)) assert not e.has(nan) # See that it evaluates assert not e.has(Integral) def test_powers(): assert integrate(2**x + 3**x, x) == 2**x/log(2) + 3**x/log(3) def test_manual_option(): raises(ValueError, lambda: integrate(1/x, x, manual=True, meijerg=True)) # an example of a function that manual integration cannot handle assert integrate(log(1+x)/x, (x, 0, 1), manual=True).has(Integral) def test_meijerg_option(): raises(ValueError, lambda: integrate(1/x, x, meijerg=True, risch=True)) # an example of a function that meijerg integration cannot handle assert integrate(tan(x), x, meijerg=True) == Integral(tan(x), x) def test_risch_option(): # risch=True only allowed on indefinite integrals raises(ValueError, lambda: integrate(1/log(x), (x, 0, oo), risch=True)) assert integrate(exp(-x**2), x, risch=True) == NonElementaryIntegral(exp(-x**2), x) assert integrate(log(1/x)*y, x, y, risch=True) == y**2*(x*log(1/x)/2 + x/2) assert integrate(erf(x), x, risch=True) == Integral(erf(x), x) # TODO: How to test risch=False? def test_heurisch_option(): raises(ValueError, lambda: integrate(1/x, x, risch=True, heurisch=True)) # an integral that heurisch can handle assert integrate(exp(x**2), x, heurisch=True) == sqrt(pi)*erfi(x)/2 # an integral that heurisch currently cannot handle assert integrate(exp(x)/x, x, heurisch=True) == Integral(exp(x)/x, x) # an integral where heurisch currently hangs, issue 15471 assert integrate(log(x)*cos(log(x))/x**Rational(3, 4), x, heurisch=False) == ( -128*x**Rational(1, 4)*sin(log(x))/289 + 240*x**Rational(1, 4)*cos(log(x))/289 + (16*x**Rational(1, 4)*sin(log(x))/17 + 4*x**Rational(1, 4)*cos(log(x))/17)*log(x)) def test_issue_6828(): f = 1/(1.08*x**2 - 4.3) g = integrate(f, x).diff(x) assert verify_numerically(f, g, tol=1e-12) def test_issue_4803(): x_max = Symbol("x_max") assert integrate(y/pi*exp(-(x_max - x)/cos(a)), x) == \ y*exp((x - x_max)/cos(a))*cos(a)/pi def test_issue_4234(): assert integrate(1/sqrt(1 + tan(x)**2)) == tan(x)/sqrt(1 + tan(x)**2) def test_issue_4492(): assert simplify(integrate(x**2 * sqrt(5 - x**2), x)) == Piecewise( (I*(2*x**5 - 15*x**3 + 25*x - 25*sqrt(x**2 - 5)*acosh(sqrt(5)*x/5)) / (8*sqrt(x**2 - 5)), 1 < Abs(x**2)/5), ((-2*x**5 + 15*x**3 - 25*x + 25*sqrt(-x**2 + 5)*asin(sqrt(5)*x/5)) / (8*sqrt(-x**2 + 5)), True)) def test_issue_2708(): # This test needs to use an integration function that can # not be evaluated in closed form. Update as needed. f = 1/(a + z + log(z)) integral_f = NonElementaryIntegral(f, (z, 2, 3)) assert Integral(f, (z, 2, 3)).doit() == integral_f assert integrate(f + exp(z), (z, 2, 3)) == integral_f - exp(2) + exp(3) assert integrate(2*f + exp(z), (z, 2, 3)) == \ 2*integral_f - exp(2) + exp(3) assert integrate(exp(1.2*n*s*z*(-t + z)/t), (z, 0, x)) == \ NonElementaryIntegral(exp(-1.2*n*s*z)*exp(1.2*n*s*z**2/t), (z, 0, x)) def test_issue_2884(): f = (4.000002016020*x + 4.000002016020*y + 4.000006024032)*exp(10.0*x) e = integrate(f, (x, 0.1, 0.2)) assert str(e) == '1.86831064982608*y + 2.16387491480008' def test_issue_8368(): assert integrate(exp(-s*x)*cosh(x), (x, 0, oo)) == \ Piecewise( ( pi*Piecewise( ( -s/(pi*(-s**2 + 1)), Abs(s**2) < 1), ( 1/(pi*s*(1 - 1/s**2)), Abs(s**(-2)) < 1), ( meijerg( ((S.Half,), (0, 0)), ((0, S.Half), (0,)), polar_lift(s)**2), True) ), And( Abs(periodic_argument(polar_lift(s)**2, oo)) < pi, cos(Abs(periodic_argument(polar_lift(s)**2, oo))/2)*sqrt(Abs(s**2)) - 1 > 0, Ne(s**2, 1)) ), ( Integral(exp(-s*x)*cosh(x), (x, 0, oo)), True)) assert integrate(exp(-s*x)*sinh(x), (x, 0, oo)) == \ Piecewise( ( -1/(s + 1)/2 - 1/(-s + 1)/2, And( Ne(1/s, 1), Abs(periodic_argument(s, oo)) < pi/2, Abs(periodic_argument(s, oo)) <= pi/2, cos(Abs(periodic_argument(s, oo)))*Abs(s) - 1 > 0)), ( Integral(exp(-s*x)*sinh(x), (x, 0, oo)), True)) def test_issue_8901(): assert integrate(sinh(1.0*x)) == 1.0*cosh(1.0*x) assert integrate(tanh(1.0*x)) == 1.0*x - 1.0*log(tanh(1.0*x) + 1) assert integrate(tanh(x)) == x - log(tanh(x) + 1) @slow def test_issue_8945(): assert integrate(sin(x)**3/x, (x, 0, 1)) == -Si(3)/4 + 3*Si(1)/4 assert integrate(sin(x)**3/x, (x, 0, oo)) == pi/4 assert integrate(cos(x)**2/x**2, x) == -Si(2*x) - cos(2*x)/(2*x) - 1/(2*x) @slow def test_issue_7130(): if ON_TRAVIS: skip("Too slow for travis.") i, L, a, b = symbols('i L a b') integrand = (cos(pi*i*x/L)**2 / (a + b*x)).rewrite(exp) assert x not in integrate(integrand, (x, 0, L)).free_symbols def test_issue_10567(): a, b, c, t = symbols('a b c t') vt = Matrix([a*t, b, c]) assert integrate(vt, t) == Integral(vt, t).doit() assert integrate(vt, t) == Matrix([[a*t**2/2], [b*t], [c*t]]) def test_issue_11856(): t = symbols('t') assert integrate(sinc(pi*t), t) == Si(pi*t)/pi @slow def test_issue_11876(): assert integrate(sqrt(log(1/x)), (x, 0, 1)) == sqrt(pi)/2 def test_issue_4950(): assert integrate((-60*exp(x) - 19.2*exp(4*x))*exp(4*x), x) ==\ -2.4*exp(8*x) - 12.0*exp(5*x) def test_issue_4968(): assert integrate(sin(log(x**2))) == x*sin(2*log(x))/5 - 2*x*cos(2*log(x))/5 def test_singularities(): assert integrate(1/x**2, (x, -oo, oo)) is oo assert integrate(1/x**2, (x, -1, 1)) is oo assert integrate(1/(x - 1)**2, (x, -2, 2)) is oo assert integrate(1/x**2, (x, 1, -1)) is -oo assert integrate(1/(x - 1)**2, (x, 2, -2)) is -oo def test_issue_12645(): x, y = symbols('x y', real=True) assert (integrate(sin(x*x*x + y*y), (x, -sqrt(pi - y*y), sqrt(pi - y*y)), (y, -sqrt(pi), sqrt(pi))) == Integral(sin(x**3 + y**2), (x, -sqrt(-y**2 + pi), sqrt(-y**2 + pi)), (y, -sqrt(pi), sqrt(pi)))) def test_issue_12677(): assert integrate(sin(x) / (cos(x)**3) , (x, 0, pi/6)) == Rational(1,6) def test_issue_14078(): assert integrate((cos(3*x)-cos(x))/x, (x, 0, oo)) == -log(3) def test_issue_14064(): assert integrate(1/cosh(x), (x, 0, oo)) == pi/2 def test_issue_14027(): assert integrate(1/(1 + exp(x - S.Half)/(1 + exp(x))), x) == \ x - exp(S.Half)*log(exp(x) + exp(S.Half)/(1 + exp(S.Half)))/(exp(S.Half) + E) def test_issue_8170(): assert integrate(tan(x), (x, 0, pi/2)) is S.Infinity def test_issue_8440_14040(): assert integrate(1/x, (x, -1, 1)) is S.NaN assert integrate(1/(x + 1), (x, -2, 3)) is S.NaN def test_issue_14096(): assert integrate(1/(x + y)**2, (x, 0, 1)) == -1/(y + 1) + 1/y assert integrate(1/(1 + x + y + z)**2, (x, 0, 1), (y, 0, 1), (z, 0, 1)) == \ -4*log(4) - 6*log(2) + 9*log(3) def test_issue_14144(): assert Abs(integrate(1/sqrt(1 - x**3), (x, 0, 1)).n() - 1.402182) < 1e-6 assert Abs(integrate(sqrt(1 - x**3), (x, 0, 1)).n() - 0.841309) < 1e-6 def test_issue_14375(): # This raised a TypeError. The antiderivative has exp_polar, which # may be possible to unpolarify, so the exact output is not asserted here. assert integrate(exp(I*x)*log(x), x).has(Ei) def test_issue_14437(): f = Function('f')(x, y, z) assert integrate(f, (x, 0, 1), (y, 0, 2), (z, 0, 3)) == \ Integral(f, (x, 0, 1), (y, 0, 2), (z, 0, 3)) def test_issue_14470(): assert integrate(1/sqrt(exp(x) + 1), x) == \ log(-1 + 1/sqrt(exp(x) + 1)) - log(1 + 1/sqrt(exp(x) + 1)) def test_issue_14877(): f = exp(1 - exp(x**2)*x + 2*x**2)*(2*x**3 + x)/(1 - exp(x**2)*x)**2 assert integrate(f, x) == \ -exp(2*x**2 - x*exp(x**2) + 1)/(x*exp(3*x**2) - exp(2*x**2)) def test_issue_14782(): f = sqrt(-x**2 + 1)*(-x**2 + x) assert integrate(f, [x, -1, 1]) == - pi / 8 @slow def test_issue_14782_slow(): f = sqrt(-x**2 + 1)*(-x**2 + x) assert integrate(f, [x, 0, 1]) == S.One / 3 - pi / 16 def test_issue_12081(): f = x**(Rational(-3, 2))*exp(-x) assert integrate(f, [x, 0, oo]) is oo def test_issue_15285(): y = 1/x - 1 f = 4*y*exp(-2*y)/x**2 assert integrate(f, [x, 0, 1]) == 1 def test_issue_15432(): assert integrate(x**n * exp(-x) * log(x), (x, 0, oo)).gammasimp() == Piecewise( (gamma(n + 1)*polygamma(0, n) + gamma(n + 1)/n, re(n) + 1 > 0), (Integral(x**n*exp(-x)*log(x), (x, 0, oo)), True)) def test_issue_15124(): omega = IndexedBase('omega') m, p = symbols('m p', cls=Idx) assert integrate(exp(x*I*(omega[m] + omega[p])), x, conds='none') == \ -I*exp(I*x*omega[m])*exp(I*x*omega[p])/(omega[m] + omega[p]) def test_issue_15218(): with warns_deprecated_sympy(): Integral(Eq(x, y)) with warns_deprecated_sympy(): assert Integral(Eq(x, y), x) == Eq(Integral(x, x), Integral(y, x)) with warns_deprecated_sympy(): assert Integral(Eq(x, y), x).doit() == Eq(x**2/2, x*y) with warns_deprecated_sympy(): assert Eq(x, y).integrate(x) == Eq(x**2/2, x*y) # These are not deprecated because they are definite integrals assert integrate(Eq(x, y), (x, 0, 1)) == Eq(S.Half, y) assert Eq(x, y).integrate((x, 0, 1)) == Eq(S.Half, y) def test_issue_15292(): res = integrate(exp(-x**2*cos(2*t)) * cos(x**2*sin(2*t)), (x, 0, oo)) assert isinstance(res, Piecewise) assert gammasimp((res - sqrt(pi)/2 * cos(t)).subs(t, pi/6)) == 0 def test_issue_4514(): assert integrate(sin(2*x)/sin(x), x) == 2*sin(x) def test_issue_15457(): x, a, b = symbols('x a b', real=True) definite = integrate(exp(Abs(x-2)), (x, a, b)) indefinite = integrate(exp(Abs(x-2)), x) assert definite.subs({a: 1, b: 3}) == -2 + 2*E assert indefinite.subs(x, 3) - indefinite.subs(x, 1) == -2 + 2*E assert definite.subs({a: -3, b: -1}) == -exp(3) + exp(5) assert indefinite.subs(x, -1) - indefinite.subs(x, -3) == -exp(3) + exp(5) def test_issue_15431(): assert integrate(x*exp(x)*log(x), x) == \ (x*exp(x) - exp(x))*log(x) - exp(x) + Ei(x) def test_issue_15640_log_substitutions(): f = x/log(x) F = Ei(2*log(x)) assert integrate(f, x) == F and F.diff(x) == f f = x**3/log(x)**2 F = -x**4/log(x) + 4*Ei(4*log(x)) assert integrate(f, x) == F and F.diff(x) == f f = sqrt(log(x))/x**2 F = -sqrt(pi)*erfc(sqrt(log(x)))/2 - sqrt(log(x))/x assert integrate(f, x) == F and F.diff(x) == f def test_issue_15509(): from sympy.vector import CoordSys3D N = CoordSys3D('N') x = N.x assert integrate(cos(a*x + b), (x, x_1, x_2), heurisch=True) == Piecewise( (-sin(a*x_1 + b)/a + sin(a*x_2 + b)/a, (a > -oo) & (a < oo) & Ne(a, 0)), \ (-x_1*cos(b) + x_2*cos(b), True)) def test_issue_4311_fast(): x = symbols('x', real=True) assert integrate(x*abs(9-x**2), x) == Piecewise( (x**4/4 - 9*x**2/2, x <= -3), (-x**4/4 + 9*x**2/2 - Rational(81, 2), x <= 3), (x**4/4 - 9*x**2/2, True)) def test_integrate_with_complex_constants(): K = Symbol('K', real=True, positive=True) x = Symbol('x', real=True) m = Symbol('m', real=True) assert integrate(exp(-I*K*x**2+m*x), x) == sqrt(I)*sqrt(pi)*exp(-I*m**2 /(4*K))*erfi((-2*I*K*x + m)/(2*sqrt(K)*sqrt(-I)))/(2*sqrt(K)) assert integrate(1/(1 + I*x**2), x) == -sqrt(I)*log(x - sqrt(I))/2 +\ sqrt(I)*log(x + sqrt(I))/2 assert integrate(exp(-I*x**2), x) == sqrt(pi)*erf(sqrt(I)*x)/(2*sqrt(I)) def test_issue_14241(): x = Symbol('x') n = Symbol('n', positive=True, integer=True) assert integrate(n * x ** (n - 1) / (x + 1), x) == \ n**2*x**n*lerchphi(x*exp_polar(I*pi), 1, n)*gamma(n)/gamma(n + 1) def test_issue_13112(): assert integrate(sin(t)**2 / (5 - 4*cos(t)), [t, 0, 2*pi]) == pi / 4 def test_issue_14709b(): h = Symbol('h', positive=True) i = integrate(x*acos(1 - 2*x/h), (x, 0, h)) assert i == 5*h**2*pi/16 def test_issue_8614(): x = Symbol('x') t = Symbol('t') assert integrate(exp(t)/t, (t, -oo, x)) == Ei(x) assert integrate((exp(-x) - exp(-2*x))/x, (x, 0, oo)) == log(2) def test_issue_15494(): s = symbols('s', real=True, positive=True) integrand = (exp(s/2) - 2*exp(1.6*s) + exp(s))*exp(s) solution = integrate(integrand, s) assert solution != S.NaN # Not sure how to test this properly as it is a symbolic expression with floats # assert str(solution) == '0.666666666666667*exp(1.5*s) + 0.5*exp(2.0*s) - 0.769230769230769*exp(2.6*s)' # Maybe assert abs(solution.subs(s, 1) - (-3.67440080236188)) <= 1e-8 integrand = (exp(s/2) - 2*exp(S(8)/5*s) + exp(s))*exp(s) assert integrate(integrand, s) == -10*exp(13*s/5)/13 + 2*exp(3*s/2)/3 + exp(2*s)/2 def test_li_integral(): y = Symbol('y') assert Integral(li(y*x**2), x).doit() == Piecewise( (x*li(x**2*y) - x*Ei(3*log(x) + 3*log(y)/2)/(sqrt(y)*sqrt(x**2)), Ne(y, 0)), (0, True)) def test_issue_17473(): x = Symbol('x') n = Symbol('n') assert integrate(sin(x**n), x) == \ x*x**n*gamma(S(1)/2 + 1/(2*n))*hyper((S(1)/2 + 1/(2*n),), (S(3)/2, S(3)/2 + 1/(2*n)), -x**(2*n)/4)/(2*n*gamma(S(3)/2 + 1/(2*n))) def test_issue_17671(): assert integrate(log(log(x)) / x**2, [x, 1, oo]) == -EulerGamma assert integrate(log(log(x)) / x**3, [x, 1, oo]) == -log(2)/2 - EulerGamma/2 assert integrate(log(log(x)) / x**10, [x, 1, oo]) == -2*log(3)/9 - EulerGamma/9
85429abfbd5b857edbd2328d0b480fefe465d699ce3424b604a0c87bb83bc602
from sympy.utilities.pytest import XFAIL, raises, warns_deprecated_sympy from sympy import (S, Symbol, symbols, nan, oo, I, pi, Float, And, Or, Not, Implies, Xor, zoo, sqrt, Rational, simplify, Function, log, cos, sin, Add, Mul, Pow, floor, ceiling, trigsimp) from sympy.core.compatibility import range, PY3 from sympy.core.relational import (Relational, Equality, Unequality, GreaterThan, LessThan, StrictGreaterThan, StrictLessThan, Rel, Eq, Lt, Le, Gt, Ge, Ne) from sympy.sets.sets import Interval, FiniteSet from itertools import combinations x, y, z, t = symbols('x,y,z,t') def rel_check(a, b): from sympy.utilities.pytest import raises assert a.is_number and b.is_number for do in range(len(set([type(a), type(b)]))): if S.NaN in (a, b): v = [(a == b), (a != b)] assert len(set(v)) == 1 and v[0] == False assert not (a != b) and not (a == b) assert raises(TypeError, lambda: a < b) assert raises(TypeError, lambda: a <= b) assert raises(TypeError, lambda: a > b) assert raises(TypeError, lambda: a >= b) else: E = [(a == b), (a != b)] assert len(set(E)) == 2 v = [ (a < b), (a <= b), (a > b), (a >= b)] i = [ [True, True, False, False], [False, True, False, True], # <-- i == 1 [False, False, True, True]].index(v) if i == 1: assert E[0] or (a.is_Float != b.is_Float) # ugh else: assert E[1] a, b = b, a return True def test_rel_ne(): assert Relational(x, y, '!=') == Ne(x, y) # issue 6116 p = Symbol('p', positive=True) assert Ne(p, 0) is S.true def test_rel_subs(): e = Relational(x, y, '==') e = e.subs(x, z) assert isinstance(e, Equality) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '>=') e = e.subs(x, z) assert isinstance(e, GreaterThan) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '<=') e = e.subs(x, z) assert isinstance(e, LessThan) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '>') e = e.subs(x, z) assert isinstance(e, StrictGreaterThan) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '<') e = e.subs(x, z) assert isinstance(e, StrictLessThan) assert e.lhs == z assert e.rhs == y e = Eq(x, 0) assert e.subs(x, 0) is S.true assert e.subs(x, 1) is S.false def test_wrappers(): e = x + x**2 res = Relational(y, e, '==') assert Rel(y, x + x**2, '==') == res assert Eq(y, x + x**2) == res res = Relational(y, e, '<') assert Lt(y, x + x**2) == res res = Relational(y, e, '<=') assert Le(y, x + x**2) == res res = Relational(y, e, '>') assert Gt(y, x + x**2) == res res = Relational(y, e, '>=') assert Ge(y, x + x**2) == res res = Relational(y, e, '!=') assert Ne(y, x + x**2) == res def test_Eq(): assert Eq(x, x) # issue 5719 with warns_deprecated_sympy(): assert Eq(x) == Eq(x, 0) # issue 6116 p = Symbol('p', positive=True) assert Eq(p, 0) is S.false # issue 13348 assert Eq(True, 1) is S.false assert Eq((), 1) is S.false def test_as_poly(): from sympy.polys.polytools import Poly # Only Eq should have an as_poly method: assert Eq(x, 1).as_poly() == Poly(x - 1, x, domain='ZZ') raises(AttributeError, lambda: Ne(x, 1).as_poly()) raises(AttributeError, lambda: Ge(x, 1).as_poly()) raises(AttributeError, lambda: Gt(x, 1).as_poly()) raises(AttributeError, lambda: Le(x, 1).as_poly()) raises(AttributeError, lambda: Lt(x, 1).as_poly()) def test_rel_Infinity(): # NOTE: All of these are actually handled by sympy.core.Number, and do # not create Relational objects. assert (oo > oo) is S.false assert (oo > -oo) is S.true assert (oo > 1) is S.true assert (oo < oo) is S.false assert (oo < -oo) is S.false assert (oo < 1) is S.false assert (oo >= oo) is S.true assert (oo >= -oo) is S.true assert (oo >= 1) is S.true assert (oo <= oo) is S.true assert (oo <= -oo) is S.false assert (oo <= 1) is S.false assert (-oo > oo) is S.false assert (-oo > -oo) is S.false assert (-oo > 1) is S.false assert (-oo < oo) is S.true assert (-oo < -oo) is S.false assert (-oo < 1) is S.true assert (-oo >= oo) is S.false assert (-oo >= -oo) is S.true assert (-oo >= 1) is S.false assert (-oo <= oo) is S.true assert (-oo <= -oo) is S.true assert (-oo <= 1) is S.true def test_infinite_symbol_inequalities(): x = Symbol('x', extended_positive=True, infinite=True) y = Symbol('y', extended_positive=True, infinite=True) z = Symbol('z', extended_negative=True, infinite=True) w = Symbol('w', extended_negative=True, infinite=True) inf_set = (x, y, oo) ninf_set = (z, w, -oo) for inf1 in inf_set: assert (inf1 < 1) is S.false assert (inf1 > 1) is S.true assert (inf1 <= 1) is S.false assert (inf1 >= 1) is S.true for inf2 in inf_set: assert (inf1 < inf2) is S.false assert (inf1 > inf2) is S.false assert (inf1 <= inf2) is S.true assert (inf1 >= inf2) is S.true for ninf1 in ninf_set: assert (inf1 < ninf1) is S.false assert (inf1 > ninf1) is S.true assert (inf1 <= ninf1) is S.false assert (inf1 >= ninf1) is S.true assert (ninf1 < inf1) is S.true assert (ninf1 > inf1) is S.false assert (ninf1 <= inf1) is S.true assert (ninf1 >= inf1) is S.false for ninf1 in ninf_set: assert (ninf1 < 1) is S.true assert (ninf1 > 1) is S.false assert (ninf1 <= 1) is S.true assert (ninf1 >= 1) is S.false for ninf2 in ninf_set: assert (ninf1 < ninf2) is S.false assert (ninf1 > ninf2) is S.false assert (ninf1 <= ninf2) is S.true assert (ninf1 >= ninf2) is S.true def test_bool(): assert Eq(0, 0) is S.true assert Eq(1, 0) is S.false assert Ne(0, 0) is S.false assert Ne(1, 0) is S.true assert Lt(0, 1) is S.true assert Lt(1, 0) is S.false assert Le(0, 1) is S.true assert Le(1, 0) is S.false assert Le(0, 0) is S.true assert Gt(1, 0) is S.true assert Gt(0, 1) is S.false assert Ge(1, 0) is S.true assert Ge(0, 1) is S.false assert Ge(1, 1) is S.true assert Eq(I, 2) is S.false assert Ne(I, 2) is S.true raises(TypeError, lambda: Gt(I, 2)) raises(TypeError, lambda: Ge(I, 2)) raises(TypeError, lambda: Lt(I, 2)) raises(TypeError, lambda: Le(I, 2)) a = Float('.000000000000000000001', '') b = Float('.0000000000000000000001', '') assert Eq(pi + a, pi + b) is S.false def test_rich_cmp(): assert (x < y) == Lt(x, y) assert (x <= y) == Le(x, y) assert (x > y) == Gt(x, y) assert (x >= y) == Ge(x, y) def test_doit(): from sympy import Symbol p = Symbol('p', positive=True) n = Symbol('n', negative=True) np = Symbol('np', nonpositive=True) nn = Symbol('nn', nonnegative=True) assert Gt(p, 0).doit() is S.true assert Gt(p, 1).doit() == Gt(p, 1) assert Ge(p, 0).doit() is S.true assert Le(p, 0).doit() is S.false assert Lt(n, 0).doit() is S.true assert Le(np, 0).doit() is S.true assert Gt(nn, 0).doit() == Gt(nn, 0) assert Lt(nn, 0).doit() is S.false assert Eq(x, 0).doit() == Eq(x, 0) def test_new_relational(): x = Symbol('x') assert Eq(x, 0) == Relational(x, 0) # None ==> Equality assert Eq(x, 0) == Relational(x, 0, '==') assert Eq(x, 0) == Relational(x, 0, 'eq') assert Eq(x, 0) == Equality(x, 0) assert Eq(x, 0) != Relational(x, 1) # None ==> Equality assert Eq(x, 0) != Relational(x, 1, '==') assert Eq(x, 0) != Relational(x, 1, 'eq') assert Eq(x, 0) != Equality(x, 1) assert Eq(x, -1) == Relational(x, -1) # None ==> Equality assert Eq(x, -1) == Relational(x, -1, '==') assert Eq(x, -1) == Relational(x, -1, 'eq') assert Eq(x, -1) == Equality(x, -1) assert Eq(x, -1) != Relational(x, 1) # None ==> Equality assert Eq(x, -1) != Relational(x, 1, '==') assert Eq(x, -1) != Relational(x, 1, 'eq') assert Eq(x, -1) != Equality(x, 1) assert Ne(x, 0) == Relational(x, 0, '!=') assert Ne(x, 0) == Relational(x, 0, '<>') assert Ne(x, 0) == Relational(x, 0, 'ne') assert Ne(x, 0) == Unequality(x, 0) assert Ne(x, 0) != Relational(x, 1, '!=') assert Ne(x, 0) != Relational(x, 1, '<>') assert Ne(x, 0) != Relational(x, 1, 'ne') assert Ne(x, 0) != Unequality(x, 1) assert Ge(x, 0) == Relational(x, 0, '>=') assert Ge(x, 0) == Relational(x, 0, 'ge') assert Ge(x, 0) == GreaterThan(x, 0) assert Ge(x, 1) != Relational(x, 0, '>=') assert Ge(x, 1) != Relational(x, 0, 'ge') assert Ge(x, 1) != GreaterThan(x, 0) assert (x >= 1) == Relational(x, 1, '>=') assert (x >= 1) == Relational(x, 1, 'ge') assert (x >= 1) == GreaterThan(x, 1) assert (x >= 0) != Relational(x, 1, '>=') assert (x >= 0) != Relational(x, 1, 'ge') assert (x >= 0) != GreaterThan(x, 1) assert Le(x, 0) == Relational(x, 0, '<=') assert Le(x, 0) == Relational(x, 0, 'le') assert Le(x, 0) == LessThan(x, 0) assert Le(x, 1) != Relational(x, 0, '<=') assert Le(x, 1) != Relational(x, 0, 'le') assert Le(x, 1) != LessThan(x, 0) assert (x <= 1) == Relational(x, 1, '<=') assert (x <= 1) == Relational(x, 1, 'le') assert (x <= 1) == LessThan(x, 1) assert (x <= 0) != Relational(x, 1, '<=') assert (x <= 0) != Relational(x, 1, 'le') assert (x <= 0) != LessThan(x, 1) assert Gt(x, 0) == Relational(x, 0, '>') assert Gt(x, 0) == Relational(x, 0, 'gt') assert Gt(x, 0) == StrictGreaterThan(x, 0) assert Gt(x, 1) != Relational(x, 0, '>') assert Gt(x, 1) != Relational(x, 0, 'gt') assert Gt(x, 1) != StrictGreaterThan(x, 0) assert (x > 1) == Relational(x, 1, '>') assert (x > 1) == Relational(x, 1, 'gt') assert (x > 1) == StrictGreaterThan(x, 1) assert (x > 0) != Relational(x, 1, '>') assert (x > 0) != Relational(x, 1, 'gt') assert (x > 0) != StrictGreaterThan(x, 1) assert Lt(x, 0) == Relational(x, 0, '<') assert Lt(x, 0) == Relational(x, 0, 'lt') assert Lt(x, 0) == StrictLessThan(x, 0) assert Lt(x, 1) != Relational(x, 0, '<') assert Lt(x, 1) != Relational(x, 0, 'lt') assert Lt(x, 1) != StrictLessThan(x, 0) assert (x < 1) == Relational(x, 1, '<') assert (x < 1) == Relational(x, 1, 'lt') assert (x < 1) == StrictLessThan(x, 1) assert (x < 0) != Relational(x, 1, '<') assert (x < 0) != Relational(x, 1, 'lt') assert (x < 0) != StrictLessThan(x, 1) # finally, some fuzz testing from random import randint from sympy.core.compatibility import unichr for i in range(100): while 1: strtype, length = (unichr, 65535) if randint(0, 1) else (chr, 255) relation_type = strtype(randint(0, length)) if randint(0, 1): relation_type += strtype(randint(0, length)) if relation_type not in ('==', 'eq', '!=', '<>', 'ne', '>=', 'ge', '<=', 'le', '>', 'gt', '<', 'lt', ':=', '+=', '-=', '*=', '/=', '%='): break raises(ValueError, lambda: Relational(x, 1, relation_type)) assert all(Relational(x, 0, op).rel_op == '==' for op in ('eq', '==')) assert all(Relational(x, 0, op).rel_op == '!=' for op in ('ne', '<>', '!=')) assert all(Relational(x, 0, op).rel_op == '>' for op in ('gt', '>')) assert all(Relational(x, 0, op).rel_op == '<' for op in ('lt', '<')) assert all(Relational(x, 0, op).rel_op == '>=' for op in ('ge', '>=')) assert all(Relational(x, 0, op).rel_op == '<=' for op in ('le', '<=')) def test_relational_arithmetic(): for cls in [Eq, Ne, Le, Lt, Ge, Gt]: rel = cls(x, y) raises(TypeError, lambda: 0+rel) raises(TypeError, lambda: 1*rel) raises(TypeError, lambda: 1**rel) raises(TypeError, lambda: rel**1) raises(TypeError, lambda: Add(0, rel)) raises(TypeError, lambda: Mul(1, rel)) raises(TypeError, lambda: Pow(1, rel)) raises(TypeError, lambda: Pow(rel, 1)) def test_relational_bool_output(): # https://github.com/sympy/sympy/issues/5931 raises(TypeError, lambda: bool(x > 3)) raises(TypeError, lambda: bool(x >= 3)) raises(TypeError, lambda: bool(x < 3)) raises(TypeError, lambda: bool(x <= 3)) raises(TypeError, lambda: bool(Eq(x, 3))) raises(TypeError, lambda: bool(Ne(x, 3))) def test_relational_logic_symbols(): # See issue 6204 assert (x < y) & (z < t) == And(x < y, z < t) assert (x < y) | (z < t) == Or(x < y, z < t) assert ~(x < y) == Not(x < y) assert (x < y) >> (z < t) == Implies(x < y, z < t) assert (x < y) << (z < t) == Implies(z < t, x < y) assert (x < y) ^ (z < t) == Xor(x < y, z < t) assert isinstance((x < y) & (z < t), And) assert isinstance((x < y) | (z < t), Or) assert isinstance(~(x < y), GreaterThan) assert isinstance((x < y) >> (z < t), Implies) assert isinstance((x < y) << (z < t), Implies) assert isinstance((x < y) ^ (z < t), (Or, Xor)) def test_univariate_relational_as_set(): assert (x > 0).as_set() == Interval(0, oo, True, True) assert (x >= 0).as_set() == Interval(0, oo) assert (x < 0).as_set() == Interval(-oo, 0, True, True) assert (x <= 0).as_set() == Interval(-oo, 0) assert Eq(x, 0).as_set() == FiniteSet(0) assert Ne(x, 0).as_set() == Interval(-oo, 0, True, True) + \ Interval(0, oo, True, True) assert (x**2 >= 4).as_set() == Interval(-oo, -2) + Interval(2, oo) @XFAIL def test_multivariate_relational_as_set(): assert (x*y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) + \ Interval(-oo, 0)*Interval(-oo, 0) def test_Not(): assert Not(Equality(x, y)) == Unequality(x, y) assert Not(Unequality(x, y)) == Equality(x, y) assert Not(StrictGreaterThan(x, y)) == LessThan(x, y) assert Not(StrictLessThan(x, y)) == GreaterThan(x, y) assert Not(GreaterThan(x, y)) == StrictLessThan(x, y) assert Not(LessThan(x, y)) == StrictGreaterThan(x, y) def test_evaluate(): assert str(Eq(x, x, evaluate=False)) == 'Eq(x, x)' assert Eq(x, x, evaluate=False).doit() == S.true assert str(Ne(x, x, evaluate=False)) == 'Ne(x, x)' assert Ne(x, x, evaluate=False).doit() == S.false assert str(Ge(x, x, evaluate=False)) == 'x >= x' assert str(Le(x, x, evaluate=False)) == 'x <= x' assert str(Gt(x, x, evaluate=False)) == 'x > x' assert str(Lt(x, x, evaluate=False)) == 'x < x' def assert_all_ineq_raise_TypeError(a, b): raises(TypeError, lambda: a > b) raises(TypeError, lambda: a >= b) raises(TypeError, lambda: a < b) raises(TypeError, lambda: a <= b) raises(TypeError, lambda: b > a) raises(TypeError, lambda: b >= a) raises(TypeError, lambda: b < a) raises(TypeError, lambda: b <= a) def assert_all_ineq_give_class_Inequality(a, b): """All inequality operations on `a` and `b` result in class Inequality.""" from sympy.core.relational import _Inequality as Inequality assert isinstance(a > b, Inequality) assert isinstance(a >= b, Inequality) assert isinstance(a < b, Inequality) assert isinstance(a <= b, Inequality) assert isinstance(b > a, Inequality) assert isinstance(b >= a, Inequality) assert isinstance(b < a, Inequality) assert isinstance(b <= a, Inequality) def test_imaginary_compare_raises_TypeError(): # See issue #5724 assert_all_ineq_raise_TypeError(I, x) def test_complex_compare_not_real(): # two cases which are not real y = Symbol('y', imaginary=True) z = Symbol('z', complex=True, extended_real=False) for w in (y, z): assert_all_ineq_raise_TypeError(2, w) # some cases which should remain un-evaluated t = Symbol('t') x = Symbol('x', real=True) z = Symbol('z', complex=True) for w in (x, z, t): assert_all_ineq_give_class_Inequality(2, w) def test_imaginary_and_inf_compare_raises_TypeError(): # See pull request #7835 y = Symbol('y', imaginary=True) assert_all_ineq_raise_TypeError(oo, y) assert_all_ineq_raise_TypeError(-oo, y) def test_complex_pure_imag_not_ordered(): raises(TypeError, lambda: 2*I < 3*I) # more generally x = Symbol('x', real=True, nonzero=True) y = Symbol('y', imaginary=True) z = Symbol('z', complex=True) assert_all_ineq_raise_TypeError(I, y) t = I*x # an imaginary number, should raise errors assert_all_ineq_raise_TypeError(2, t) t = -I*y # a real number, so no errors assert_all_ineq_give_class_Inequality(2, t) t = I*z # unknown, should be unevaluated assert_all_ineq_give_class_Inequality(2, t) def test_x_minus_y_not_same_as_x_lt_y(): """ A consequence of pull request #7792 is that `x - y < 0` and `x < y` are not synonymous. """ x = I + 2 y = I + 3 raises(TypeError, lambda: x < y) assert x - y < 0 ineq = Lt(x, y, evaluate=False) raises(TypeError, lambda: ineq.doit()) assert ineq.lhs - ineq.rhs < 0 t = Symbol('t', imaginary=True) x = 2 + t y = 3 + t ineq = Lt(x, y, evaluate=False) raises(TypeError, lambda: ineq.doit()) assert ineq.lhs - ineq.rhs < 0 # this one should give error either way x = I + 2 y = 2*I + 3 raises(TypeError, lambda: x < y) raises(TypeError, lambda: x - y < 0) def test_nan_equality_exceptions(): # See issue #7774 import random assert Equality(nan, nan) is S.false assert Unequality(nan, nan) is S.true # See issue #7773 A = (x, S.Zero, S.One/3, pi, oo, -oo) assert Equality(nan, random.choice(A)) is S.false assert Equality(random.choice(A), nan) is S.false assert Unequality(nan, random.choice(A)) is S.true assert Unequality(random.choice(A), nan) is S.true def test_nan_inequality_raise_errors(): # See discussion in pull request #7776. We test inequalities with # a set including examples of various classes. for q in (x, S.Zero, S(10), S.One/3, pi, S(1.3), oo, -oo, nan): assert_all_ineq_raise_TypeError(q, nan) def test_nan_complex_inequalities(): # Comparisons of NaN with non-real raise errors, we're not too # fussy whether its the NaN error or complex error. for r in (I, zoo, Symbol('z', imaginary=True)): assert_all_ineq_raise_TypeError(r, nan) def test_complex_infinity_inequalities(): raises(TypeError, lambda: zoo > 0) raises(TypeError, lambda: zoo >= 0) raises(TypeError, lambda: zoo < 0) raises(TypeError, lambda: zoo <= 0) def test_inequalities_symbol_name_same(): """Using the operator and functional forms should give same results.""" # We test all combinations from a set # FIXME: could replace with random selection after test passes A = (x, y, S.Zero, S.One/3, pi, oo, -oo) for a in A: for b in A: assert Gt(a, b) == (a > b) assert Lt(a, b) == (a < b) assert Ge(a, b) == (a >= b) assert Le(a, b) == (a <= b) for b in (y, S.Zero, S.One/3, pi, oo, -oo): assert Gt(x, b, evaluate=False) == (x > b) assert Lt(x, b, evaluate=False) == (x < b) assert Ge(x, b, evaluate=False) == (x >= b) assert Le(x, b, evaluate=False) == (x <= b) for b in (y, S.Zero, S.One/3, pi, oo, -oo): assert Gt(b, x, evaluate=False) == (b > x) assert Lt(b, x, evaluate=False) == (b < x) assert Ge(b, x, evaluate=False) == (b >= x) assert Le(b, x, evaluate=False) == (b <= x) def test_inequalities_symbol_name_same_complex(): """Using the operator and functional forms should give same results. With complex non-real numbers, both should raise errors. """ # FIXME: could replace with random selection after test passes for a in (x, S.Zero, S.One/3, pi, oo, Rational(1, 3)): raises(TypeError, lambda: Gt(a, I)) raises(TypeError, lambda: a > I) raises(TypeError, lambda: Lt(a, I)) raises(TypeError, lambda: a < I) raises(TypeError, lambda: Ge(a, I)) raises(TypeError, lambda: a >= I) raises(TypeError, lambda: Le(a, I)) raises(TypeError, lambda: a <= I) def test_inequalities_cant_sympify_other(): # see issue 7833 from operator import gt, lt, ge, le bar = "foo" for a in (x, S.Zero, S.One/3, pi, I, zoo, oo, -oo, nan, Rational(1, 3)): for op in (lt, gt, le, ge): if PY3: raises(TypeError, lambda: op(a, bar)) def test_ineq_avoid_wild_symbol_flip(): # see issue #7951, we try to avoid this internally, e.g., by using # __lt__ instead of "<". from sympy.core.symbol import Wild p = symbols('p', cls=Wild) # x > p might flip, but Gt should not: assert Gt(x, p) == Gt(x, p, evaluate=False) # Previously failed as 'p > x': e = Lt(x, y).subs({y: p}) assert e == Lt(x, p, evaluate=False) # Previously failed as 'p <= x': e = Ge(x, p).doit() assert e == Ge(x, p, evaluate=False) def test_issue_8245(): a = S("6506833320952669167898688709329/5070602400912917605986812821504") assert rel_check(a, a.n(10)) assert rel_check(a, a.n(20)) assert rel_check(a, a.n()) # prec of 30 is enough to fully capture a as mpf assert Float(a, 30) == Float(str(a.p), '')/Float(str(a.q), '') for i in range(31): r = Rational(Float(a, i)) f = Float(r) assert (f < a) == (Rational(f) < a) # test sign handling assert (-f < -a) == (Rational(-f) < -a) # test equivalence handling isa = Float(a.p,'')/Float(a.q,'') assert isa <= a assert not isa < a assert isa >= a assert not isa > a assert isa > 0 a = sqrt(2) r = Rational(str(a.n(30))) assert rel_check(a, r) a = sqrt(2) r = Rational(str(a.n(29))) assert rel_check(a, r) assert Eq(log(cos(2)**2 + sin(2)**2), 0) == True def test_issue_8449(): p = Symbol('p', nonnegative=True) assert Lt(-oo, p) assert Ge(-oo, p) is S.false assert Gt(oo, -p) assert Le(oo, -p) is S.false def test_simplify_relational(): assert simplify(x*(y + 1) - x*y - x + 1 < x) == (x > 1) assert simplify(x*(y + 1) - x*y - x - 1 < x) == (x > -1) assert simplify(x < x*(y + 1) - x*y - x + 1) == (x < 1) r = S.One < x # canonical operations are not the same as simplification, # so if there is no simplification, canonicalization will # be done unless the measure forbids it assert simplify(r) == r.canonical assert simplify(r, ratio=0) != r.canonical # this is not a random test; in _eval_simplify # this will simplify to S.false and that is the # reason for the 'if r.is_Relational' in Relational's # _eval_simplify routine assert simplify(-(2**(pi*Rational(3, 2)) + 6**pi)**(1/pi) + 2*(2**(pi/2) + 3**pi)**(1/pi) < 0) is S.false # canonical at least assert Eq(y, x).simplify() == Eq(x, y) assert Eq(x - 1, 0).simplify() == Eq(x, 1) assert Eq(x - 1, x).simplify() == S.false assert Eq(2*x - 1, x).simplify() == Eq(x, 1) assert Eq(2*x, 4).simplify() == Eq(x, 2) z = cos(1)**2 + sin(1)**2 - 1 # z.is_zero is None assert Eq(z*x, 0).simplify() == S.true assert Ne(y, x).simplify() == Ne(x, y) assert Ne(x - 1, 0).simplify() == Ne(x, 1) assert Ne(x - 1, x).simplify() == S.true assert Ne(2*x - 1, x).simplify() == Ne(x, 1) assert Ne(2*x, 4).simplify() == Ne(x, 2) assert Ne(z*x, 0).simplify() == S.false # No real-valued assumptions assert Ge(y, x).simplify() == Le(x, y) assert Ge(x - 1, 0).simplify() == Ge(x, 1) assert Ge(x - 1, x).simplify() == S.false assert Ge(2*x - 1, x).simplify() == Ge(x, 1) assert Ge(2*x, 4).simplify() == Ge(x, 2) assert Ge(z*x, 0).simplify() == S.true assert Ge(x, -2).simplify() == Ge(x, -2) assert Ge(-x, -2).simplify() == Le(x, 2) assert Ge(x, 2).simplify() == Ge(x, 2) assert Ge(-x, 2).simplify() == Le(x, -2) assert Le(y, x).simplify() == Ge(x, y) assert Le(x - 1, 0).simplify() == Le(x, 1) assert Le(x - 1, x).simplify() == S.true assert Le(2*x - 1, x).simplify() == Le(x, 1) assert Le(2*x, 4).simplify() == Le(x, 2) assert Le(z*x, 0).simplify() == S.true assert Le(x, -2).simplify() == Le(x, -2) assert Le(-x, -2).simplify() == Ge(x, 2) assert Le(x, 2).simplify() == Le(x, 2) assert Le(-x, 2).simplify() == Ge(x, -2) assert Gt(y, x).simplify() == Lt(x, y) assert Gt(x - 1, 0).simplify() == Gt(x, 1) assert Gt(x - 1, x).simplify() == S.false assert Gt(2*x - 1, x).simplify() == Gt(x, 1) assert Gt(2*x, 4).simplify() == Gt(x, 2) assert Gt(z*x, 0).simplify() == S.false assert Gt(x, -2).simplify() == Gt(x, -2) assert Gt(-x, -2).simplify() == Lt(x, 2) assert Gt(x, 2).simplify() == Gt(x, 2) assert Gt(-x, 2).simplify() == Lt(x, -2) assert Lt(y, x).simplify() == Gt(x, y) assert Lt(x - 1, 0).simplify() == Lt(x, 1) assert Lt(x - 1, x).simplify() == S.true assert Lt(2*x - 1, x).simplify() == Lt(x, 1) assert Lt(2*x, 4).simplify() == Lt(x, 2) assert Lt(z*x, 0).simplify() == S.false assert Lt(x, -2).simplify() == Lt(x, -2) assert Lt(-x, -2).simplify() == Gt(x, 2) assert Lt(x, 2).simplify() == Lt(x, 2) assert Lt(-x, 2).simplify() == Gt(x, -2) def test_equals(): w, x, y, z = symbols('w:z') f = Function('f') assert Eq(x, 1).equals(Eq(x*(y + 1) - x*y - x + 1, x)) assert Eq(x, y).equals(x < y, True) == False assert Eq(x, f(1)).equals(Eq(x, f(2)), True) == f(1) - f(2) assert Eq(f(1), y).equals(Eq(f(2), y), True) == f(1) - f(2) assert Eq(x, f(1)).equals(Eq(f(2), x), True) == f(1) - f(2) assert Eq(f(1), x).equals(Eq(x, f(2)), True) == f(1) - f(2) assert Eq(w, x).equals(Eq(y, z), True) == False assert Eq(f(1), f(2)).equals(Eq(f(3), f(4)), True) == f(1) - f(3) assert (x < y).equals(y > x, True) == True assert (x < y).equals(y >= x, True) == False assert (x < y).equals(z < y, True) == False assert (x < y).equals(x < z, True) == False assert (x < f(1)).equals(x < f(2), True) == f(1) - f(2) assert (f(1) < x).equals(f(2) < x, True) == f(1) - f(2) def test_reversed(): assert (x < y).reversed == (y > x) assert (x <= y).reversed == (y >= x) assert Eq(x, y, evaluate=False).reversed == Eq(y, x, evaluate=False) assert Ne(x, y, evaluate=False).reversed == Ne(y, x, evaluate=False) assert (x >= y).reversed == (y <= x) assert (x > y).reversed == (y < x) def test_canonical(): c = [i.canonical for i in ( x + y < z, x + 2 > 3, x < 2, S(2) > x, x**2 > -x/y, Gt(3, 2, evaluate=False) )] assert [i.canonical for i in c] == c assert [i.reversed.canonical for i in c] == c assert not any(i.lhs.is_Number and not i.rhs.is_Number for i in c) c = [i.reversed.func(i.rhs, i.lhs, evaluate=False).canonical for i in c] assert [i.canonical for i in c] == c assert [i.reversed.canonical for i in c] == c assert not any(i.lhs.is_Number and not i.rhs.is_Number for i in c) @XFAIL def test_issue_8444_nonworkingtests(): x = symbols('x', real=True) assert (x <= oo) == (x >= -oo) == True x = symbols('x') assert x >= floor(x) assert (x < floor(x)) == False assert x <= ceiling(x) assert (x > ceiling(x)) == False def test_issue_8444_workingtests(): x = symbols('x') assert Gt(x, floor(x)) == Gt(x, floor(x), evaluate=False) assert Ge(x, floor(x)) == Ge(x, floor(x), evaluate=False) assert Lt(x, ceiling(x)) == Lt(x, ceiling(x), evaluate=False) assert Le(x, ceiling(x)) == Le(x, ceiling(x), evaluate=False) i = symbols('i', integer=True) assert (i > floor(i)) == False assert (i < ceiling(i)) == False def test_issue_10304(): d = cos(1)**2 + sin(1)**2 - 1 assert d.is_comparable is False # if this fails, find a new d e = 1 + d*I assert simplify(Eq(e, 0)) is S.false def test_issue_10401(): x = symbols('x') fin = symbols('inf', finite=True) inf = symbols('inf', infinite=True) inf2 = symbols('inf2', infinite=True) infx = symbols('infx', infinite=True, extended_real=True) # Used in the commented tests below: #infx2 = symbols('infx2', infinite=True, extended_real=True) infnx = symbols('inf~x', infinite=True, extended_real=False) infnx2 = symbols('inf~x2', infinite=True, extended_real=False) infp = symbols('infp', infinite=True, extended_positive=True) infp1 = symbols('infp1', infinite=True, extended_positive=True) infn = symbols('infn', infinite=True, extended_negative=True) zero = symbols('z', zero=True) nonzero = symbols('nz', zero=False, finite=True) assert Eq(1/(1/x + 1), 1).func is Eq assert Eq(1/(1/x + 1), 1).subs(x, S.ComplexInfinity) is S.true assert Eq(1/(1/fin + 1), 1) is S.false T, F = S.true, S.false assert Eq(fin, inf) is F assert Eq(inf, inf2) not in (T, F) and inf != inf2 assert Eq(1 + inf, 2 + inf2) not in (T, F) and inf != inf2 assert Eq(infp, infp1) is T assert Eq(infp, infn) is F assert Eq(1 + I*oo, I*oo) is F assert Eq(I*oo, 1 + I*oo) is F assert Eq(1 + I*oo, 2 + I*oo) is F assert Eq(1 + I*oo, 2 + I*infx) is F assert Eq(1 + I*oo, 2 + infx) is F # FIXME: The test below fails because (-infx).is_extended_positive is True # (should be None) #assert Eq(1 + I*infx, 1 + I*infx2) not in (T, F) and infx != infx2 # assert Eq(zoo, sqrt(2) + I*oo) is F assert Eq(zoo, oo) is F r = Symbol('r', real=True) i = Symbol('i', imaginary=True) assert Eq(i*I, r) not in (T, F) assert Eq(infx, infnx) is F assert Eq(infnx, infnx2) not in (T, F) and infnx != infnx2 assert Eq(zoo, oo) is F assert Eq(inf/inf2, 0) is F assert Eq(inf/fin, 0) is F assert Eq(fin/inf, 0) is T assert Eq(zero/nonzero, 0) is T and ((zero/nonzero) != 0) # The commented out test below is incorrect because: assert zoo == -zoo assert Eq(zoo, -zoo) is T assert Eq(oo, -oo) is F assert Eq(inf, -inf) not in (T, F) assert Eq(fin/(fin + 1), 1) is S.false o = symbols('o', odd=True) assert Eq(o, 2*o) is S.false p = symbols('p', positive=True) assert Eq(p/(p - 1), 1) is F def test_issue_10633(): assert Eq(True, False) == False assert Eq(False, True) == False assert Eq(True, True) == True assert Eq(False, False) == True def test_issue_10927(): x = symbols('x') assert str(Eq(x, oo)) == 'Eq(x, oo)' assert str(Eq(x, -oo)) == 'Eq(x, -oo)' def test_issues_13081_12583_12534(): # 13081 r = Rational('905502432259640373/288230376151711744') assert (r < pi) is S.false assert (r > pi) is S.true # 12583 v = sqrt(2) u = sqrt(v) + 2/sqrt(10 - 8/sqrt(2 - v) + 4*v*(1/sqrt(2 - v) - 1)) assert (u >= 0) is S.true # 12534; Rational vs NumberSymbol # here are some precisions for which Rational forms # at a lower and higher precision bracket the value of pi # e.g. for p = 20: # Rational(pi.n(p + 1)).n(25) = 3.14159265358979323846 2834 # pi.n(25) = 3.14159265358979323846 2643 # Rational(pi.n(p )).n(25) = 3.14159265358979323846 1987 assert [p for p in range(20, 50) if (Rational(pi.n(p)) < pi) and (pi < Rational(pi.n(p + 1)))] == [20, 24, 27, 33, 37, 43, 48] # pick one such precision and affirm that the reversed operation # gives the opposite result, i.e. if x < y is true then x > y # must be false for i in (20, 21): v = pi.n(i) assert rel_check(Rational(v), pi) assert rel_check(v, pi) assert rel_check(pi.n(20), pi.n(21)) # Float vs Rational # the rational form is less than the floating representation # at the same precision assert [i for i in range(15, 50) if Rational(pi.n(i)) > pi.n(i)] == [] # this should be the same if we reverse the relational assert [i for i in range(15, 50) if pi.n(i) < Rational(pi.n(i))] == [] def test_binary_symbols(): ans = set([x]) for f in Eq, Ne: for t in S.true, S.false: eq = f(x, S.true) assert eq.binary_symbols == ans assert eq.reversed.binary_symbols == ans assert f(x, 1).binary_symbols == set() def test_rel_args(): # can't have Boolean args; this is automatic with Python 3 # so this test and the __lt__, etc..., definitions in # relational.py and boolalg.py which are marked with /// # can be removed. for op in ['<', '<=', '>', '>=']: for b in (S.true, x < 1, And(x, y)): for v in (0.1, 1, 2**32, t, S.One): raises(TypeError, lambda: Relational(b, v, op)) def test_Equality_rewrite_as_Add(): eq = Eq(x + y, y - x) assert eq.rewrite(Add) == 2*x assert eq.rewrite(Add, evaluate=None).args == (x, x, y, -y) assert eq.rewrite(Add, evaluate=False).args == (x, y, x, -y) def test_issue_15847(): a = Ne(x*(x+y), x**2 + x*y) assert simplify(a) == False def test_negated_property(): eq = Eq(x, y) assert eq.negated == Ne(x, y) eq = Ne(x, y) assert eq.negated == Eq(x, y) eq = Ge(x + y, y - x) assert eq.negated == Lt(x + y, y - x) for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(x, y).negated.negated == f(x, y) def test_reversedsign_property(): eq = Eq(x, y) assert eq.reversedsign == Eq(-x, -y) eq = Ne(x, y) assert eq.reversedsign == Ne(-x, -y) eq = Ge(x + y, y - x) assert eq.reversedsign == Le(-x - y, x - y) for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(x, y).reversedsign.reversedsign == f(x, y) for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(-x, y).reversedsign.reversedsign == f(-x, y) for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(x, -y).reversedsign.reversedsign == f(x, -y) for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(-x, -y).reversedsign.reversedsign == f(-x, -y) def test_reversed_reversedsign_property(): for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(x, y).reversed.reversedsign == f(x, y).reversedsign.reversed for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(-x, y).reversed.reversedsign == f(-x, y).reversedsign.reversed for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(x, -y).reversed.reversedsign == f(x, -y).reversedsign.reversed for f in (Eq, Ne, Ge, Gt, Le, Lt): assert f(-x, -y).reversed.reversedsign == \ f(-x, -y).reversedsign.reversed def test_improved_canonical(): def test_different_forms(listofforms): for form1, form2 in combinations(listofforms, 2): assert form1.canonical == form2.canonical def generate_forms(expr): return [expr, expr.reversed, expr.reversedsign, expr.reversed.reversedsign] test_different_forms(generate_forms(x > -y)) test_different_forms(generate_forms(x >= -y)) test_different_forms(generate_forms(Eq(x, -y))) test_different_forms(generate_forms(Ne(x, -y))) test_different_forms(generate_forms(pi < x)) test_different_forms(generate_forms(pi - 5*y < -x + 2*y**2 - 7)) assert (pi >= x).canonical == (x <= pi) def test_set_equality_canonical(): a, b, c = symbols('a b c') A = Eq(FiniteSet(a, b, c), FiniteSet(1, 2, 3)) B = Ne(FiniteSet(a, b, c), FiniteSet(4, 5, 6)) assert A.canonical == A.reversed assert B.canonical == B.reversed def test_trigsimp(): # issue 16736 s, c = sin(2*x), cos(2*x) eq = Eq(s, c) assert trigsimp(eq) == eq # no rearrangement of sides # simplification of sides might result in # an unevaluated Eq changed = trigsimp(Eq(s + c, sqrt(2))) assert isinstance(changed, Eq) assert changed.subs(x, pi/8) is S.true # or an evaluated one assert trigsimp(Eq(cos(x)**2 + sin(x)**2, 1)) is S.true def test_polynomial_relation_simplification(): assert Ge(3*x*(x + 1) + 4, 3*x).simplify() in [Ge(x**2, -Rational(4,3)), Le(-x**2, Rational(4, 3))] assert Le(-(3*x*(x + 1) + 4), -3*x).simplify() in [Ge(x**2, -Rational(4,3)), Le(-x**2, Rational(4, 3))] assert ((x**2+3)*(x**2-1)+3*x >= 2*x**2).simplify() in [(x**4 + 3*x >= 3), (-x**4 - 3*x <= -3)] def test_multivariate_linear_function_simplification(): assert Ge(x + y, x - y).simplify() == Ge(y, 0) assert Le(-x + y, -x - y).simplify() == Le(y, 0) assert Eq(2*x + y, 2*x + y - 3).simplify() == False assert (2*x + y > 2*x + y - 3).simplify() == True assert (2*x + y < 2*x + y - 3).simplify() == False assert (2*x + y < 2*x + y + 3).simplify() == True a, b, c, d, e, f, g = symbols('a b c d e f g') assert Lt(a + b + c + 2*d, 3*d - f + g). simplify() == Lt(a, -b - c + d - f + g) def test_nonpolymonial_relations(): assert Eq(cos(x), 0).simplify() == Eq(cos(x), 0)
b857cb9a155b609a5e9ebcf7df8b034b1666839bc9d16de22f43b859e70070bf
from sympy import (Eq, Rational, Float, S, Symbol, cos, oo, pi, simplify, sin, sqrt, symbols, acos) from sympy.core.compatibility import range from sympy.functions.elementary.trigonometric import tan from sympy.geometry import (Circle, GeometryError, Line, Point, Ray, Segment, Triangle, intersection, Point3D, Line3D, Ray3D, Segment3D, Point2D, Line2D) from sympy.geometry.line import Undecidable from sympy.geometry.polygon import _asa as asa from sympy.utilities.iterables import cartes from sympy.utilities.pytest import raises, warns x = Symbol('x', real=True) y = Symbol('y', real=True) z = Symbol('z', real=True) k = Symbol('k', real=True) x1 = Symbol('x1', real=True) y1 = Symbol('y1', real=True) t = Symbol('t', real=True) a, b = symbols('a,b', real=True) m = symbols('m', real=True) def test_object_from_equation(): from sympy.abc import x, y, a, b assert Line(3*x + y + 18) == Line2D(Point2D(0, -18), Point2D(1, -21)) assert Line(3*x + 5 * y + 1) == Line2D(Point2D(0, Rational(-1, 5)), Point2D(1, Rational(-4, 5))) assert Line(3*a + b + 18, x='a', y='b') == Line2D(Point2D(0, -18), Point2D(1, -21)) assert Line(3*x + y) == Line2D(Point2D(0, 0), Point2D(1, -3)) assert Line(x + y) == Line2D(Point2D(0, 0), Point2D(1, -1)) assert Line(Eq(3*a + b, -18), x='a', y=b) == Line2D(Point2D(0, -18), Point2D(1, -21)) raises(ValueError, lambda: Line(x)) raises(ValueError, lambda: Line(y)) raises(ValueError, lambda: Line(x/y)) raises(ValueError, lambda: Line(a/b, x='a', y='b')) raises(ValueError, lambda: Line(y/x)) raises(ValueError, lambda: Line(b/a, x='a', y='b')) raises(ValueError, lambda: Line((x + 1)**2 + y)) def feq(a, b): """Test if two floating point values are 'equal'.""" t_float = Float("1.0E-10") return -t_float < a - b < t_float def test_angle_between(): a = Point(1, 2, 3, 4) b = a.orthogonal_direction o = a.origin assert feq(Line.angle_between(Line(Point(0, 0), Point(1, 1)), Line(Point(0, 0), Point(5, 0))).evalf(), pi.evalf() / 4) assert Line(a, o).angle_between(Line(b, o)) == pi / 2 assert Line3D.angle_between(Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1)), Line3D(Point3D(0, 0, 0), Point3D(5, 0, 0))) == acos(sqrt(3) / 3) def test_closing_angle(): a = Ray((0, 0), angle=0) b = Ray((1, 2), angle=pi/2) assert a.closing_angle(b) == -pi/2 assert b.closing_angle(a) == pi/2 assert a.closing_angle(a) == 0 def test_arbitrary_point(): l1 = Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1)) l2 = Line(Point(x1, x1), Point(y1, y1)) assert l2.arbitrary_point() in l2 assert Ray((1, 1), angle=pi / 4).arbitrary_point() == \ Point(t + 1, t + 1) assert Segment((1, 1), (2, 3)).arbitrary_point() == Point(1 + t, 1 + 2 * t) assert l1.perpendicular_segment(l1.arbitrary_point()) == l1.arbitrary_point() assert Ray3D((1, 1, 1), direction_ratio=[1, 2, 3]).arbitrary_point() == \ Point3D(t + 1, 2 * t + 1, 3 * t + 1) assert Segment3D(Point3D(0, 0, 0), Point3D(1, 1, 1)).midpoint == \ Point3D(S.Half, S.Half, S.Half) assert Segment3D(Point3D(x1, x1, x1), Point3D(y1, y1, y1)).length == sqrt(3) * sqrt((x1 - y1) ** 2) assert Segment3D((1, 1, 1), (2, 3, 4)).arbitrary_point() == \ Point3D(t + 1, 2 * t + 1, 3 * t + 1) raises(ValueError, (lambda: Line((x, 1), (2, 3)).arbitrary_point(x))) def test_are_concurrent_2d(): l1 = Line(Point(0, 0), Point(1, 1)) l2 = Line(Point(x1, x1), Point(x1, 1 + x1)) assert Line.are_concurrent(l1) is False assert Line.are_concurrent(l1, l2) assert Line.are_concurrent(l1, l1, l1, l2) assert Line.are_concurrent(l1, l2, Line(Point(5, x1), Point(Rational(-3, 5), x1))) assert Line.are_concurrent(l1, Line(Point(0, 0), Point(-x1, x1)), l2) is False def test_are_concurrent_3d(): p1 = Point3D(0, 0, 0) l1 = Line(p1, Point3D(1, 1, 1)) parallel_1 = Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)) parallel_2 = Line3D(Point3D(0, 1, 0), Point3D(1, 1, 0)) assert Line3D.are_concurrent(l1) is False assert Line3D.are_concurrent(l1, Line(Point3D(x1, x1, x1), Point3D(y1, y1, y1))) is False assert Line3D.are_concurrent(l1, Line3D(p1, Point3D(x1, x1, x1)), Line(Point3D(x1, x1, x1), Point3D(x1, 1 + x1, 1))) is True assert Line3D.are_concurrent(parallel_1, parallel_2) is False def test_arguments(): """Functions accepting `Point` objects in `geometry` should also accept tuples, lists, and generators and automatically convert them to points.""" from sympy import subsets singles2d = ((1, 2), [1, 3], Point(1, 5)) doubles2d = subsets(singles2d, 2) l2d = Line(Point2D(1, 2), Point2D(2, 3)) singles3d = ((1, 2, 3), [1, 2, 4], Point(1, 2, 6)) doubles3d = subsets(singles3d, 2) l3d = Line(Point3D(1, 2, 3), Point3D(1, 1, 2)) singles4d = ((1, 2, 3, 4), [1, 2, 3, 5], Point(1, 2, 3, 7)) doubles4d = subsets(singles4d, 2) l4d = Line(Point(1, 2, 3, 4), Point(2, 2, 2, 2)) # test 2D test_single = ['contains', 'distance', 'equals', 'parallel_line', 'perpendicular_line', 'perpendicular_segment', 'projection', 'intersection'] for p in doubles2d: Line2D(*p) for func in test_single: for p in singles2d: getattr(l2d, func)(p) # test 3D for p in doubles3d: Line3D(*p) for func in test_single: for p in singles3d: getattr(l3d, func)(p) # test 4D for p in doubles4d: Line(*p) for func in test_single: for p in singles4d: getattr(l4d, func)(p) def test_basic_properties_2d(): p1 = Point(0, 0) p2 = Point(1, 1) p10 = Point(2000, 2000) p_r3 = Ray(p1, p2).random_point() p_r4 = Ray(p2, p1).random_point() l1 = Line(p1, p2) l3 = Line(Point(x1, x1), Point(x1, 1 + x1)) l4 = Line(p1, Point(1, 0)) r1 = Ray(p1, Point(0, 1)) r2 = Ray(Point(0, 1), p1) s1 = Segment(p1, p10) p_s1 = s1.random_point() assert Line((1, 1), slope=1) == Line((1, 1), (2, 2)) assert Line((1, 1), slope=oo) == Line((1, 1), (1, 2)) assert Line((1, 1), slope=-oo) == Line((1, 1), (1, 2)) assert Line(p1, p2).scale(2, 1) == Line(p1, Point(2, 1)) assert Line(p1, p2) == Line(p1, p2) assert Line(p1, p2) != Line(p2, p1) assert l1 != Line(Point(x1, x1), Point(y1, y1)) assert l1 != l3 assert Line(p1, p10) != Line(p10, p1) assert Line(p1, p10) != p1 assert p1 in l1 # is p1 on the line l1? assert p1 not in l3 assert s1 in Line(p1, p10) assert Ray(Point(0, 0), Point(0, 1)) in Ray(Point(0, 0), Point(0, 2)) assert Ray(Point(0, 0), Point(0, 2)) in Ray(Point(0, 0), Point(0, 1)) assert (r1 in s1) is False assert Segment(p1, p2) in s1 assert Ray(Point(x1, x1), Point(x1, 1 + x1)) != Ray(p1, Point(-1, 5)) assert Segment(p1, p2).midpoint == Point(S.Half, S.Half) assert Segment(p1, Point(-x1, x1)).length == sqrt(2 * (x1 ** 2)) assert l1.slope == 1 assert l3.slope is oo assert l4.slope == 0 assert Line(p1, Point(0, 1)).slope is oo assert Line(r1.source, r1.random_point()).slope == r1.slope assert Line(r2.source, r2.random_point()).slope == r2.slope assert Segment(Point(0, -1), Segment(p1, Point(0, 1)).random_point()).slope == Segment(p1, Point(0, 1)).slope assert l4.coefficients == (0, 1, 0) assert Line((-x, x), (-x + 1, x - 1)).coefficients == (1, 1, 0) assert Line(p1, Point(0, 1)).coefficients == (1, 0, 0) # issue 7963 r = Ray((0, 0), angle=x) assert r.subs(x, 3 * pi / 4) == Ray((0, 0), (-1, 1)) assert r.subs(x, 5 * pi / 4) == Ray((0, 0), (-1, -1)) assert r.subs(x, -pi / 4) == Ray((0, 0), (1, -1)) assert r.subs(x, pi / 2) == Ray((0, 0), (0, 1)) assert r.subs(x, -pi / 2) == Ray((0, 0), (0, -1)) for ind in range(0, 5): assert l3.random_point() in l3 assert p_r3.x >= p1.x and p_r3.y >= p1.y assert p_r4.x <= p2.x and p_r4.y <= p2.y assert p1.x <= p_s1.x <= p10.x and p1.y <= p_s1.y <= p10.y assert hash(s1) != hash(Segment(p10, p1)) assert s1.plot_interval() == [t, 0, 1] assert Line(p1, p10).plot_interval() == [t, -5, 5] assert Ray((0, 0), angle=pi / 4).plot_interval() == [t, 0, 10] def test_basic_properties_3d(): p1 = Point3D(0, 0, 0) p2 = Point3D(1, 1, 1) p3 = Point3D(x1, x1, x1) p5 = Point3D(x1, 1 + x1, 1) l1 = Line3D(p1, p2) l3 = Line3D(p3, p5) r1 = Ray3D(p1, Point3D(-1, 5, 0)) r3 = Ray3D(p1, p2) s1 = Segment3D(p1, p2) assert Line3D((1, 1, 1), direction_ratio=[2, 3, 4]) == Line3D(Point3D(1, 1, 1), Point3D(3, 4, 5)) assert Line3D((1, 1, 1), direction_ratio=[1, 5, 7]) == Line3D(Point3D(1, 1, 1), Point3D(2, 6, 8)) assert Line3D((1, 1, 1), direction_ratio=[1, 2, 3]) == Line3D(Point3D(1, 1, 1), Point3D(2, 3, 4)) assert Line3D(Line3D(p1, Point3D(0, 1, 0))) == Line3D(p1, Point3D(0, 1, 0)) assert Ray3D(Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0))) == Ray3D(p1, Point3D(1, 0, 0)) assert Line3D(p1, p2) != Line3D(p2, p1) assert l1 != l3 assert l1 != Line3D(p3, Point3D(y1, y1, y1)) assert r3 != r1 assert Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 1)) in Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 2)) assert Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 2)) in Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 1)) assert p1 in l1 assert p1 not in l3 assert l1.direction_ratio == [1, 1, 1] assert s1.midpoint == Point3D(S.Half, S.Half, S.Half) # Test zdirection assert Ray3D(p1, Point3D(0, 0, -1)).zdirection is S.NegativeInfinity def test_contains(): p1 = Point(0, 0) r = Ray(p1, Point(4, 4)) r1 = Ray3D(p1, Point3D(0, 0, -1)) r2 = Ray3D(p1, Point3D(0, 1, 0)) r3 = Ray3D(p1, Point3D(0, 0, 1)) l = Line(Point(0, 1), Point(3, 4)) # Segment contains assert Point(0, (a + b) / 2) in Segment((0, a), (0, b)) assert Point((a + b) / 2, 0) in Segment((a, 0), (b, 0)) assert Point3D(0, 1, 0) in Segment3D((0, 1, 0), (0, 1, 0)) assert Point3D(1, 0, 0) in Segment3D((1, 0, 0), (1, 0, 0)) assert Segment3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).contains([]) is True assert Segment3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).contains( Segment3D(Point3D(2, 2, 2), Point3D(3, 2, 2))) is False # Line contains assert l.contains(Point(0, 1)) is True assert l.contains((0, 1)) is True assert l.contains((0, 0)) is False # Ray contains assert r.contains(p1) is True assert r.contains((1, 1)) is True assert r.contains((1, 3)) is False assert r.contains(Segment((1, 1), (2, 2))) is True assert r.contains(Segment((1, 2), (2, 5))) is False assert r.contains(Ray((2, 2), (3, 3))) is True assert r.contains(Ray((2, 2), (3, 5))) is False assert r1.contains(Segment3D(p1, Point3D(0, 0, -10))) is True assert r1.contains(Segment3D(Point3D(1, 1, 1), Point3D(2, 2, 2))) is False assert r2.contains(Point3D(0, 0, 0)) is True assert r3.contains(Point3D(0, 0, 0)) is True assert Ray3D(Point3D(1, 1, 1), Point3D(1, 0, 0)).contains([]) is False assert Line3D((0, 0, 0), (x, y, z)).contains((2 * x, 2 * y, 2 * z)) with warns(UserWarning): assert Line3D(p1, Point3D(0, 1, 0)).contains(Point(1.0, 1.0)) is False with warns(UserWarning): assert r3.contains(Point(1.0, 1.0)) is False def test_contains_nonreal_symbols(): u, v, w, z = symbols('u, v, w, z') l = Segment(Point(u, w), Point(v, z)) p = Point(u*Rational(2, 3) + v/3, w*Rational(2, 3) + z/3) assert l.contains(p) def test_distance_2d(): p1 = Point(0, 0) p2 = Point(1, 1) half = S.Half s1 = Segment(Point(0, 0), Point(1, 1)) s2 = Segment(Point(half, half), Point(1, 0)) r = Ray(p1, p2) assert s1.distance(Point(0, 0)) == 0 assert s1.distance((0, 0)) == 0 assert s2.distance(Point(0, 0)) == 2 ** half / 2 assert s2.distance(Point(Rational(3) / 2, Rational(3) / 2)) == 2 ** half assert Line(p1, p2).distance(Point(-1, 1)) == sqrt(2) assert Line(p1, p2).distance(Point(1, -1)) == sqrt(2) assert Line(p1, p2).distance(Point(2, 2)) == 0 assert Line(p1, p2).distance((-1, 1)) == sqrt(2) assert Line((0, 0), (0, 1)).distance(p1) == 0 assert Line((0, 0), (0, 1)).distance(p2) == 1 assert Line((0, 0), (1, 0)).distance(p1) == 0 assert Line((0, 0), (1, 0)).distance(p2) == 1 assert r.distance(Point(-1, -1)) == sqrt(2) assert r.distance(Point(1, 1)) == 0 assert r.distance(Point(-1, 1)) == sqrt(2) assert Ray((1, 1), (2, 2)).distance(Point(1.5, 3)) == 3 * sqrt(2) / 4 assert r.distance((1, 1)) == 0 def test_dimension_normalization(): with warns(UserWarning): assert Ray((1, 1), (2, 1, 2)) == Ray((1, 1, 0), (2, 1, 2)) def test_distance_3d(): p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 1) p3 = Point3D(Rational(3) / 2, Rational(3) / 2, Rational(3) / 2) s1 = Segment3D(Point3D(0, 0, 0), Point3D(1, 1, 1)) s2 = Segment3D(Point3D(S.Half, S.Half, S.Half), Point3D(1, 0, 1)) r = Ray3D(p1, p2) assert s1.distance(p1) == 0 assert s2.distance(p1) == sqrt(3) / 2 assert s2.distance(p3) == 2 * sqrt(6) / 3 assert s1.distance((0, 0, 0)) == 0 assert s2.distance((0, 0, 0)) == sqrt(3) / 2 assert s1.distance(p1) == 0 assert s2.distance(p1) == sqrt(3) / 2 assert s2.distance(p3) == 2 * sqrt(6) / 3 assert s1.distance((0, 0, 0)) == 0 assert s2.distance((0, 0, 0)) == sqrt(3) / 2 # Line to point assert Line3D(p1, p2).distance(Point3D(-1, 1, 1)) == 2 * sqrt(6) / 3 assert Line3D(p1, p2).distance(Point3D(1, -1, 1)) == 2 * sqrt(6) / 3 assert Line3D(p1, p2).distance(Point3D(2, 2, 2)) == 0 assert Line3D(p1, p2).distance((2, 2, 2)) == 0 assert Line3D(p1, p2).distance((1, -1, 1)) == 2 * sqrt(6) / 3 assert Line3D((0, 0, 0), (0, 1, 0)).distance(p1) == 0 assert Line3D((0, 0, 0), (0, 1, 0)).distance(p2) == sqrt(2) assert Line3D((0, 0, 0), (1, 0, 0)).distance(p1) == 0 assert Line3D((0, 0, 0), (1, 0, 0)).distance(p2) == sqrt(2) # Ray to point assert r.distance(Point3D(-1, -1, -1)) == sqrt(3) assert r.distance(Point3D(1, 1, 1)) == 0 assert r.distance((-1, -1, -1)) == sqrt(3) assert r.distance((1, 1, 1)) == 0 assert Ray3D((0, 0, 0), (1, 1, 2)).distance((-1, -1, 2)) == 4 * sqrt(3) / 3 assert Ray3D((1, 1, 1), (2, 2, 2)).distance(Point3D(1.5, -3, -1)) == Rational(9) / 2 assert Ray3D((1, 1, 1), (2, 2, 2)).distance(Point3D(1.5, 3, 1)) == sqrt(78) / 6 def test_equals(): p1 = Point(0, 0) p2 = Point(1, 1) l1 = Line(p1, p2) l2 = Line((0, 5), slope=m) l3 = Line(Point(x1, x1), Point(x1, 1 + x1)) assert l1.perpendicular_line(p1.args).equals(Line(Point(0, 0), Point(1, -1))) assert l1.perpendicular_line(p1).equals(Line(Point(0, 0), Point(1, -1))) assert Line(Point(x1, x1), Point(y1, y1)).parallel_line(Point(-x1, x1)). \ equals(Line(Point(-x1, x1), Point(-y1, 2 * x1 - y1))) assert l3.parallel_line(p1.args).equals(Line(Point(0, 0), Point(0, -1))) assert l3.parallel_line(p1).equals(Line(Point(0, 0), Point(0, -1))) assert (l2.distance(Point(2, 3)) - 2 * abs(m + 1) / sqrt(m ** 2 + 1)).equals(0) assert Line3D(p1, Point3D(0, 1, 0)).equals(Point(1.0, 1.0)) is False assert Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).equals(Line3D(Point3D(-5, 0, 0), Point3D(-1, 0, 0))) is True assert Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).equals(Line3D(p1, Point3D(0, 1, 0))) is False assert Ray3D(p1, Point3D(0, 0, -1)).equals(Point(1.0, 1.0)) is False assert Ray3D(p1, Point3D(0, 0, -1)).equals(Ray3D(p1, Point3D(0, 0, -1))) is True assert Line3D((0, 0), (t, t)).perpendicular_line(Point(0, 1, 0)).equals( Line3D(Point3D(0, 1, 0), Point3D(S.Half, S.Half, 0))) assert Line3D((0, 0), (t, t)).perpendicular_segment(Point(0, 1, 0)).equals(Segment3D((0, 1), (S.Half, S.Half))) assert Line3D(p1, Point3D(0, 1, 0)).equals(Point(1.0, 1.0)) is False def test_equation(): p1 = Point(0, 0) p2 = Point(1, 1) l1 = Line(p1, p2) l3 = Line(Point(x1, x1), Point(x1, 1 + x1)) assert simplify(l1.equation()) in (x - y, y - x) assert simplify(l3.equation()) in (x - x1, x1 - x) assert simplify(l1.equation()) in (x - y, y - x) assert simplify(l3.equation()) in (x - x1, x1 - x) assert Line(p1, Point(1, 0)).equation(x=x, y=y) == y assert Line(p1, Point(0, 1)).equation() == x assert Line(Point(2, 0), Point(2, 1)).equation() == x - 2 assert Line(p2, Point(2, 1)).equation() == y - 1 assert Line3D(Point(x1, x1, x1), Point(y1, y1, y1) ).equation() == (-x + y, -x + z) assert Line3D(Point(1, 2, 3), Point(2, 3, 4) ).equation() == (-x + y - 1, -x + z - 2) assert Line3D(Point(1, 2, 3), Point(1, 3, 4) ).equation() == (x - 1, -y + z - 1) assert Line3D(Point(1, 2, 3), Point(2, 2, 4) ).equation() == (y - 2, -x + z - 2) assert Line3D(Point(1, 2, 3), Point(2, 3, 3) ).equation() == (-x + y - 1, z - 3) assert Line3D(Point(1, 2, 3), Point(1, 2, 4) ).equation() == (x - 1, y - 2) assert Line3D(Point(1, 2, 3), Point(1, 3, 3) ).equation() == (x - 1, z - 3) assert Line3D(Point(1, 2, 3), Point(2, 2, 3) ).equation() == (y - 2, z - 3) def test_intersection_2d(): p1 = Point(0, 0) p2 = Point(1, 1) p3 = Point(x1, x1) p4 = Point(y1, y1) l1 = Line(p1, p2) l3 = Line(Point(0, 0), Point(3, 4)) r1 = Ray(Point(1, 1), Point(2, 2)) r2 = Ray(Point(0, 0), Point(3, 4)) r4 = Ray(p1, p2) r6 = Ray(Point(0, 1), Point(1, 2)) r7 = Ray(Point(0.5, 0.5), Point(1, 1)) s1 = Segment(p1, p2) s2 = Segment(Point(0.25, 0.25), Point(0.5, 0.5)) s3 = Segment(Point(0, 0), Point(3, 4)) assert intersection(l1, p1) == [p1] assert intersection(l1, Point(x1, 1 + x1)) == [] assert intersection(l1, Line(p3, p4)) in [[l1], [Line(p3, p4)]] assert intersection(l1, l1.parallel_line(Point(x1, 1 + x1))) == [] assert intersection(l3, l3) == [l3] assert intersection(l3, r2) == [r2] assert intersection(l3, s3) == [s3] assert intersection(s3, l3) == [s3] assert intersection(Segment(Point(-10, 10), Point(10, 10)), Segment(Point(-5, -5), Point(-5, 5))) == [] assert intersection(r2, l3) == [r2] assert intersection(r1, Ray(Point(2, 2), Point(0, 0))) == [Segment(Point(1, 1), Point(2, 2))] assert intersection(r1, Ray(Point(1, 1), Point(-1, -1))) == [Point(1, 1)] assert intersection(r1, Segment(Point(0, 0), Point(2, 2))) == [Segment(Point(1, 1), Point(2, 2))] assert r4.intersection(s2) == [s2] assert r4.intersection(Segment(Point(2, 3), Point(3, 4))) == [] assert r4.intersection(Segment(Point(-1, -1), Point(0.5, 0.5))) == [Segment(p1, Point(0.5, 0.5))] assert r4.intersection(Ray(p2, p1)) == [s1] assert Ray(p2, p1).intersection(r6) == [] assert r4.intersection(r7) == r7.intersection(r4) == [r7] assert Ray3D((0, 0), (3, 0)).intersection(Ray3D((1, 0), (3, 0))) == [Ray3D((1, 0), (3, 0))] assert Ray3D((1, 0), (3, 0)).intersection(Ray3D((0, 0), (3, 0))) == [Ray3D((1, 0), (3, 0))] assert Ray(Point(0, 0), Point(0, 4)).intersection(Ray(Point(0, 1), Point(0, -1))) == \ [Segment(Point(0, 0), Point(0, 1))] assert Segment3D((0, 0), (3, 0)).intersection( Segment3D((1, 0), (2, 0))) == [Segment3D((1, 0), (2, 0))] assert Segment3D((1, 0), (2, 0)).intersection( Segment3D((0, 0), (3, 0))) == [Segment3D((1, 0), (2, 0))] assert Segment3D((0, 0), (3, 0)).intersection( Segment3D((3, 0), (4, 0))) == [Point3D((3, 0))] assert Segment3D((0, 0), (3, 0)).intersection( Segment3D((2, 0), (5, 0))) == [Segment3D((2, 0), (3, 0))] assert Segment3D((0, 0), (3, 0)).intersection( Segment3D((-2, 0), (1, 0))) == [Segment3D((0, 0), (1, 0))] assert Segment3D((0, 0), (3, 0)).intersection( Segment3D((-2, 0), (0, 0))) == [Point3D(0, 0)] assert s1.intersection(Segment(Point(1, 1), Point(2, 2))) == [Point(1, 1)] assert s1.intersection(Segment(Point(0.5, 0.5), Point(1.5, 1.5))) == [Segment(Point(0.5, 0.5), p2)] assert s1.intersection(Segment(Point(4, 4), Point(5, 5))) == [] assert s1.intersection(Segment(Point(-1, -1), p1)) == [p1] assert s1.intersection(Segment(Point(-1, -1), Point(0.5, 0.5))) == [Segment(p1, Point(0.5, 0.5))] assert s1.intersection(Line(Point(1, 0), Point(2, 1))) == [] assert s1.intersection(s2) == [s2] assert s2.intersection(s1) == [s2] assert asa(120, 8, 52) == \ Triangle( Point(0, 0), Point(8, 0), Point(-4 * cos(19 * pi / 90) / sin(2 * pi / 45), 4 * sqrt(3) * cos(19 * pi / 90) / sin(2 * pi / 45))) assert Line((0, 0), (1, 1)).intersection(Ray((1, 0), (1, 2))) == [Point(1, 1)] assert Line((0, 0), (1, 1)).intersection(Segment((1, 0), (1, 2))) == [Point(1, 1)] assert Ray((0, 0), (1, 1)).intersection(Ray((1, 0), (1, 2))) == [Point(1, 1)] assert Ray((0, 0), (1, 1)).intersection(Segment((1, 0), (1, 2))) == [Point(1, 1)] assert Ray((0, 0), (10, 10)).contains(Segment((1, 1), (2, 2))) is True assert Segment((1, 1), (2, 2)) in Line((0, 0), (10, 10)) assert s1.intersection(Ray((1, 1), (4, 4))) == [Point(1, 1)] # 16628 - this should be fast p0 = Point2D(Rational(249, 5), Rational(497999, 10000)) p1 = Point2D((-58977084786*sqrt(405639795226) + 2030690077184193 + 20112207807*sqrt(630547164901) + 99600*sqrt(255775022850776494562626)) /(2000*sqrt(255775022850776494562626) + 1991998000*sqrt(405639795226) + 1991998000*sqrt(630547164901) + 1622561172902000), (-498000*sqrt(255775022850776494562626) - 995999*sqrt(630547164901) + 90004251917891999 + 496005510002*sqrt(405639795226))/(10000*sqrt(255775022850776494562626) + 9959990000*sqrt(405639795226) + 9959990000*sqrt(630547164901) + 8112805864510000)) p2 = Point2D(Rational(497, 10), Rational(-497, 10)) p3 = Point2D(Rational(-497, 10), Rational(-497, 10)) l = Line(p0, p1) s = Segment(p2, p3) n = (-52673223862*sqrt(405639795226) - 15764156209307469 - 9803028531*sqrt(630547164901) + 33200*sqrt(255775022850776494562626)) d = sqrt(405639795226) + 315274080450 + 498000*sqrt( 630547164901) + sqrt(255775022850776494562626) assert intersection(l, s) == [ Point2D(n/d*Rational(3, 2000), Rational(-497, 10))] def test_line_intersection(): # see also test_issue_11238 in test_matrices.py x0 = tan(pi*Rational(13, 45)) x1 = sqrt(3) x2 = x0**2 x, y = [8*x0/(x0 + x1), (24*x0 - 8*x1*x2)/(x2 - 3)] assert Line(Point(0, 0), Point(1, -sqrt(3))).contains(Point(x, y)) is True def test_intersection_3d(): p1 = Point3D(0, 0, 0) p2 = Point3D(1, 1, 1) l1 = Line3D(p1, p2) l2 = Line3D(Point3D(0, 0, 0), Point3D(3, 4, 0)) r1 = Ray3D(Point3D(1, 1, 1), Point3D(2, 2, 2)) r2 = Ray3D(Point3D(0, 0, 0), Point3D(3, 4, 0)) s1 = Segment3D(Point3D(0, 0, 0), Point3D(3, 4, 0)) assert intersection(l1, p1) == [p1] assert intersection(l1, Point3D(x1, 1 + x1, 1)) == [] assert intersection(l1, l1.parallel_line(p1)) == [Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1))] assert intersection(l2, r2) == [r2] assert intersection(l2, s1) == [s1] assert intersection(r2, l2) == [r2] assert intersection(r1, Ray3D(Point3D(1, 1, 1), Point3D(-1, -1, -1))) == [Point3D(1, 1, 1)] assert intersection(r1, Segment3D(Point3D(0, 0, 0), Point3D(2, 2, 2))) == [ Segment3D(Point3D(1, 1, 1), Point3D(2, 2, 2))] assert intersection(Ray3D(Point3D(1, 0, 0), Point3D(-1, 0, 0)), Ray3D(Point3D(0, 1, 0), Point3D(0, -1, 0))) \ == [Point3D(0, 0, 0)] assert intersection(r1, Ray3D(Point3D(2, 2, 2), Point3D(0, 0, 0))) == \ [Segment3D(Point3D(1, 1, 1), Point3D(2, 2, 2))] assert intersection(s1, r2) == [s1] assert Line3D(Point3D(4, 0, 1), Point3D(0, 4, 1)).intersection(Line3D(Point3D(0, 0, 1), Point3D(4, 4, 1))) == \ [Point3D(2, 2, 1)] assert Line3D((0, 1, 2), (0, 2, 3)).intersection(Line3D((0, 1, 2), (0, 1, 1))) == [Point3D(0, 1, 2)] assert Line3D((0, 0), (t, t)).intersection(Line3D((0, 1), (t, t))) == \ [Point3D(t, t)] assert Ray3D(Point3D(0, 0, 0), Point3D(0, 4, 0)).intersection(Ray3D(Point3D(0, 1, 1), Point3D(0, -1, 1))) == [] def test_is_parallel(): p1 = Point3D(0, 0, 0) p2 = Point3D(1, 1, 1) p3 = Point3D(x1, x1, x1) l2 = Line(Point(x1, x1), Point(y1, y1)) l2_1 = Line(Point(x1, x1), Point(x1, 1 + x1)) assert Line.is_parallel(Line(Point(0, 0), Point(1, 1)), l2) assert Line.is_parallel(l2, Line(Point(x1, x1), Point(x1, 1 + x1))) is False assert Line.is_parallel(l2, l2.parallel_line(Point(-x1, x1))) assert Line.is_parallel(l2_1, l2_1.parallel_line(Point(0, 0))) assert Line3D(p1, p2).is_parallel(Line3D(p1, p2)) # same as in 2D assert Line3D(Point3D(4, 0, 1), Point3D(0, 4, 1)).is_parallel(Line3D(Point3D(0, 0, 1), Point3D(4, 4, 1))) is False assert Line3D(p1, p2).parallel_line(p3) == Line3D(Point3D(x1, x1, x1), Point3D(x1 + 1, x1 + 1, x1 + 1)) assert Line3D(p1, p2).parallel_line(p3.args) == \ Line3D(Point3D(x1, x1, x1), Point3D(x1 + 1, x1 + 1, x1 + 1)) assert Line3D(Point3D(4, 0, 1), Point3D(0, 4, 1)).is_parallel(Line3D(Point3D(0, 0, 1), Point3D(4, 4, 1))) is False def test_is_perpendicular(): p1 = Point(0, 0) p2 = Point(1, 1) l1 = Line(p1, p2) l2 = Line(Point(x1, x1), Point(y1, y1)) l1_1 = Line(p1, Point(-x1, x1)) # 2D assert Line.is_perpendicular(l1, l1_1) assert Line.is_perpendicular(l1, l2) is False p = l1.random_point() assert l1.perpendicular_segment(p) == p # 3D assert Line3D.is_perpendicular(Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)), Line3D(Point3D(0, 0, 0), Point3D(0, 1, 0))) is True assert Line3D.is_perpendicular(Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)), Line3D(Point3D(0, 1, 0), Point3D(1, 1, 0))) is False assert Line3D.is_perpendicular(Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1)), Line3D(Point3D(x1, x1, x1), Point3D(y1, y1, y1))) is False def test_is_similar(): p1 = Point(2000, 2000) p2 = p1.scale(2, 2) r1 = Ray3D(Point3D(1, 1, 1), Point3D(1, 0, 0)) r2 = Ray(Point(0, 0), Point(0, 1)) s1 = Segment(Point(0, 0), p1) assert s1.is_similar(Segment(p1, p2)) assert s1.is_similar(r2) is False assert r1.is_similar(Line3D(Point3D(1, 1, 1), Point3D(1, 0, 0))) is True assert r1.is_similar(Line3D(Point3D(0, 0, 0), Point3D(0, 1, 0))) is False def test_length(): s2 = Segment3D(Point3D(x1, x1, x1), Point3D(y1, y1, y1)) assert Line(Point(0, 0), Point(1, 1)).length is oo assert s2.length == sqrt(3) * sqrt((x1 - y1) ** 2) assert Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1)).length is oo def test_projection(): p1 = Point(0, 0) p2 = Point3D(0, 0, 0) p3 = Point(-x1, x1) l1 = Line(p1, Point(1, 1)) l2 = Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)) l3 = Line3D(p2, Point3D(1, 1, 1)) r1 = Ray(Point(1, 1), Point(2, 2)) assert Line(Point(x1, x1), Point(y1, y1)).projection(Point(y1, y1)) == Point(y1, y1) assert Line(Point(x1, x1), Point(x1, 1 + x1)).projection(Point(1, 1)) == Point(x1, 1) assert Segment(Point(-2, 2), Point(0, 4)).projection(r1) == Segment(Point(-1, 3), Point(0, 4)) assert Segment(Point(0, 4), Point(-2, 2)).projection(r1) == Segment(Point(0, 4), Point(-1, 3)) assert l1.projection(p3) == p1 assert l1.projection(Ray(p1, Point(-1, 5))) == Ray(Point(0, 0), Point(2, 2)) assert l1.projection(Ray(p1, Point(-1, 1))) == p1 assert r1.projection(Ray(Point(1, 1), Point(-1, -1))) == Point(1, 1) assert r1.projection(Ray(Point(0, 4), Point(-1, -5))) == Segment(Point(1, 1), Point(2, 2)) assert r1.projection(Segment(Point(-1, 5), Point(-5, -10))) == Segment(Point(1, 1), Point(2, 2)) assert r1.projection(Ray(Point(1, 1), Point(-1, -1))) == Point(1, 1) assert r1.projection(Ray(Point(0, 4), Point(-1, -5))) == Segment(Point(1, 1), Point(2, 2)) assert r1.projection(Segment(Point(-1, 5), Point(-5, -10))) == Segment(Point(1, 1), Point(2, 2)) assert l3.projection(Ray3D(p2, Point3D(-1, 5, 0))) == Ray3D(Point3D(0, 0, 0), Point3D(Rational(4, 3), Rational(4, 3), Rational(4, 3))) assert l3.projection(Ray3D(p2, Point3D(-1, 1, 1))) == Ray3D(Point3D(0, 0, 0), Point3D(Rational(1, 3), Rational(1, 3), Rational(1, 3))) assert l2.projection(Point3D(5, 5, 0)) == Point3D(5, 0) assert l2.projection(Line3D(Point3D(0, 1, 0), Point3D(1, 1, 0))).equals(l2) def test_perpendicular_bisector(): s1 = Segment(Point(0, 0), Point(1, 1)) aline = Line(Point(S.Half, S.Half), Point(Rational(3, 2), Rational(-1, 2))) on_line = Segment(Point(S.Half, S.Half), Point(Rational(3, 2), Rational(-1, 2))).midpoint assert s1.perpendicular_bisector().equals(aline) assert s1.perpendicular_bisector(on_line).equals(Segment(s1.midpoint, on_line)) assert s1.perpendicular_bisector(on_line + (1, 0)).equals(aline) def test_raises(): d, e = symbols('a,b', real=True) s = Segment((d, 0), (e, 0)) raises(TypeError, lambda: Line((1, 1), 1)) raises(ValueError, lambda: Line(Point(0, 0), Point(0, 0))) raises(Undecidable, lambda: Point(2 * d, 0) in s) raises(ValueError, lambda: Ray3D(Point(1.0, 1.0))) raises(ValueError, lambda: Line3D(Point3D(0, 0, 0), Point3D(0, 0, 0))) raises(TypeError, lambda: Line3D((1, 1), 1)) raises(ValueError, lambda: Line3D(Point3D(0, 0, 0))) raises(TypeError, lambda: Ray((1, 1), 1)) raises(GeometryError, lambda: Line(Point(0, 0), Point(1, 0)) .projection(Circle(Point(0, 0), 1))) def test_ray_generation(): assert Ray((1, 1), angle=pi / 4) == Ray((1, 1), (2, 2)) assert Ray((1, 1), angle=pi / 2) == Ray((1, 1), (1, 2)) assert Ray((1, 1), angle=-pi / 2) == Ray((1, 1), (1, 0)) assert Ray((1, 1), angle=-3 * pi / 2) == Ray((1, 1), (1, 2)) assert Ray((1, 1), angle=5 * pi / 2) == Ray((1, 1), (1, 2)) assert Ray((1, 1), angle=5.0 * pi / 2) == Ray((1, 1), (1, 2)) assert Ray((1, 1), angle=pi) == Ray((1, 1), (0, 1)) assert Ray((1, 1), angle=3.0 * pi) == Ray((1, 1), (0, 1)) assert Ray((1, 1), angle=4.0 * pi) == Ray((1, 1), (2, 1)) assert Ray((1, 1), angle=0) == Ray((1, 1), (2, 1)) assert Ray((1, 1), angle=4.05 * pi) == Ray(Point(1, 1), Point(2, -sqrt(5) * sqrt(2 * sqrt(5) + 10) / 4 - sqrt( 2 * sqrt(5) + 10) / 4 + 2 + sqrt(5))) assert Ray((1, 1), angle=4.02 * pi) == Ray(Point(1, 1), Point(2, 1 + tan(4.02 * pi))) assert Ray((1, 1), angle=5) == Ray((1, 1), (2, 1 + tan(5))) assert Ray3D((1, 1, 1), direction_ratio=[4, 4, 4]) == Ray3D(Point3D(1, 1, 1), Point3D(5, 5, 5)) assert Ray3D((1, 1, 1), direction_ratio=[1, 2, 3]) == Ray3D(Point3D(1, 1, 1), Point3D(2, 3, 4)) assert Ray3D((1, 1, 1), direction_ratio=[1, 1, 1]) == Ray3D(Point3D(1, 1, 1), Point3D(2, 2, 2)) def test_symbolic_intersect(): # Issue 7814. circle = Circle(Point(x, 0), y) line = Line(Point(k, z), slope=0) assert line.intersection(circle) == [Point(x + sqrt((y - z) * (y + z)), z), Point(x - sqrt((y - z) * (y + z)), z)] def test_issue_2941(): def _check(): for f, g in cartes(*[(Line, Ray, Segment)] * 2): l1 = f(a, b) l2 = g(c, d) assert l1.intersection(l2) == l2.intersection(l1) # intersect at end point c, d = (-2, -2), (-2, 0) a, b = (0, 0), (1, 1) _check() # midline intersection c, d = (-2, -3), (-2, 0) _check() def test_parameter_value(): t = Symbol('t') p1, p2 = Point(0, 1), Point(5, 6) l = Line(p1, p2) assert l.parameter_value((5, 6), t) == {t: 1} raises(ValueError, lambda: l.parameter_value((0, 0), t))
312dfc4a0b1f4f30823267ac515424296506a51a75da1ab0df97e12bfef1df82
#!/usr/bin/env python """Distutils based setup script for SymPy. This uses Distutils (https://python.org/sigs/distutils-sig/) the standard python mechanism for installing packages. Optionally, you can use Setuptools (https://setuptools.readthedocs.io/en/latest/) to automatically handle dependencies. For the easiest installation just type the command (you'll probably need root privileges for that): python setup.py install This will install the library in the default location. For instructions on how to customize the install procedure read the output of: python setup.py --help install In addition, there are some other commands: python setup.py clean -> will clean all trash (*.pyc and stuff) python setup.py test -> will run the complete test suite python setup.py bench -> will run the complete benchmark suite python setup.py audit -> will run pyflakes checker on source code To get a full list of available commands, read the output of: python setup.py --help-commands Or, if all else fails, feel free to write to the sympy list at [email protected] and ask for help. """ import sys import os import shutil import glob import subprocess from distutils.command.sdist import sdist min_mpmath_version = '0.19' # This directory dir_setup = os.path.dirname(os.path.realpath(__file__)) extra_kwargs = {} try: from setuptools import setup, Command extra_kwargs['zip_safe'] = False extra_kwargs['entry_points'] = { 'console_scripts': [ 'isympy = isympy:main', ] } except ImportError: from distutils.core import setup, Command extra_kwargs['scripts'] = ['bin/isympy'] # handle mpmath deps in the hard way: from distutils.version import LooseVersion try: import mpmath if mpmath.__version__ < LooseVersion(min_mpmath_version): raise ImportError except ImportError: print("Please install the mpmath package with a version >= %s" % min_mpmath_version) sys.exit(-1) PY3 = sys.version_info[0] > 2 # Make sure I have the right Python version. if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 5)): print("SymPy requires Python 2.7 or 3.5 or newer. Python %d.%d detected" % sys.version_info[:2]) sys.exit(-1) # Check that this list is uptodate against the result of the command: # python bin/generate_module_list.py modules = [ 'sympy.algebras', 'sympy.assumptions', 'sympy.assumptions.handlers', 'sympy.benchmarks', 'sympy.calculus', 'sympy.categories', 'sympy.codegen', 'sympy.combinatorics', 'sympy.concrete', 'sympy.core', 'sympy.core.benchmarks', 'sympy.crypto', 'sympy.deprecated', 'sympy.diffgeom', 'sympy.discrete', 'sympy.external', 'sympy.functions', 'sympy.functions.combinatorial', 'sympy.functions.elementary', 'sympy.functions.elementary.benchmarks', 'sympy.functions.special', 'sympy.functions.special.benchmarks', 'sympy.geometry', 'sympy.holonomic', 'sympy.integrals', 'sympy.integrals.benchmarks', 'sympy.integrals.rubi', 'sympy.integrals.rubi.parsetools', 'sympy.integrals.rubi.rubi_tests', 'sympy.integrals.rubi.rules', 'sympy.interactive', 'sympy.liealgebras', 'sympy.logic', 'sympy.logic.algorithms', 'sympy.logic.utilities', 'sympy.matrices', 'sympy.matrices.benchmarks', 'sympy.matrices.expressions', 'sympy.multipledispatch', 'sympy.ntheory', 'sympy.parsing', 'sympy.parsing.autolev', 'sympy.parsing.autolev._antlr', 'sympy.parsing.c', 'sympy.parsing.fortran', 'sympy.parsing.latex', 'sympy.parsing.latex._antlr', 'sympy.physics', 'sympy.physics.continuum_mechanics', 'sympy.physics.hep', 'sympy.physics.mechanics', 'sympy.physics.optics', 'sympy.physics.quantum', 'sympy.physics.units', 'sympy.physics.units.definitions', 'sympy.physics.units.systems', 'sympy.physics.vector', 'sympy.plotting', 'sympy.plotting.intervalmath', 'sympy.plotting.pygletplot', 'sympy.polys', 'sympy.polys.agca', 'sympy.polys.benchmarks', 'sympy.polys.domains', 'sympy.printing', 'sympy.printing.pretty', 'sympy.sandbox', 'sympy.series', 'sympy.series.benchmarks', 'sympy.sets', 'sympy.sets.handlers', 'sympy.simplify', 'sympy.solvers', 'sympy.solvers.benchmarks', 'sympy.solvers.diophantine', 'sympy.solvers.ode', 'sympy.stats', 'sympy.strategies', 'sympy.strategies.branch', 'sympy.tensor', 'sympy.tensor.array', 'sympy.testing', 'sympy.unify', 'sympy.utilities', 'sympy.utilities._compilation', 'sympy.utilities.mathml', 'sympy.vector', ] class audit(Command): """Audits SymPy's source code for following issues: - Names which are used but not defined or used before they are defined. - Names which are redefined without having been used. """ description = "Audit SymPy source with PyFlakes" user_options = [] def initialize_options(self): self.all = None def finalize_options(self): pass def run(self): import os try: import pyflakes.scripts.pyflakes as flakes except ImportError: print("In order to run the audit, you need to have PyFlakes installed.") sys.exit(-1) dirs = (os.path.join(*d) for d in (m.split('.') for m in modules)) warns = 0 for dir in dirs: for filename in os.listdir(dir): if filename.endswith('.py') and filename != '__init__.py': warns += flakes.checkPath(os.path.join(dir, filename)) if warns > 0: print("Audit finished with total %d warnings" % warns) class clean(Command): """Cleans *.pyc and debian trashs, so you should get the same copy as is in the VCS. """ description = "remove build files" user_options = [("all", "a", "the same")] def initialize_options(self): self.all = None def finalize_options(self): pass def run(self): curr_dir = os.getcwd() for root, dirs, files in os.walk(dir_setup): for file in files: if file.endswith('.pyc') and os.path.isfile: os.remove(os.path.join(root, file)) os.chdir(dir_setup) names = ["python-build-stamp-2.4", "MANIFEST", "build", "dist", "doc/_build", "sample.tex"] for f in names: if os.path.isfile(f): os.remove(f) elif os.path.isdir(f): shutil.rmtree(f) for name in glob.glob(os.path.join(dir_setup, "doc", "src", "modules", "physics", "vector", "*.pdf")): if os.path.isfile(name): os.remove(name) os.chdir(curr_dir) class test_sympy(Command): """Runs all tests under the sympy/ folder """ description = "run all tests and doctests; also see bin/test and bin/doctest" user_options = [] # distutils complains if this is not here. def __init__(self, *args): self.args = args[0] # so we can pass it to other classes Command.__init__(self, *args) def initialize_options(self): # distutils wants this pass def finalize_options(self): # this too pass def run(self): from sympy.utilities import runtests runtests.run_all_tests() class run_benchmarks(Command): """Runs all SymPy benchmarks""" description = "run all benchmarks" user_options = [] # distutils complains if this is not here. def __init__(self, *args): self.args = args[0] # so we can pass it to other classes Command.__init__(self, *args) def initialize_options(self): # distutils wants this pass def finalize_options(self): # this too pass # we use py.test like architecture: # # o collector -- collects benchmarks # o runner -- executes benchmarks # o presenter -- displays benchmarks results # # this is done in sympy.utilities.benchmarking on top of py.test def run(self): from sympy.utilities import benchmarking benchmarking.main(['sympy']) class antlr(Command): """Generate code with antlr4""" description = "generate parser code from antlr grammars" user_options = [] # distutils complains if this is not here. def __init__(self, *args): self.args = args[0] # so we can pass it to other classes Command.__init__(self, *args) def initialize_options(self): # distutils wants this pass def finalize_options(self): # this too pass def run(self): from sympy.parsing.latex._build_latex_antlr import build_parser if not build_parser(): sys.exit(-1) class sdist_sympy(sdist): def run(self): # Fetch git commit hash and write down to commit_hash.txt before # shipped in tarball. commit_hash = None commit_hash_filepath = 'doc/commit_hash.txt' try: commit_hash = \ subprocess.check_output(['git', 'rev-parse', 'HEAD']) commit_hash = commit_hash.decode('ascii') commit_hash = commit_hash.rstrip() print('Commit hash found : {}.'.format(commit_hash)) print('Writing it to {}.'.format(commit_hash_filepath)) except: pass if commit_hash: with open(commit_hash_filepath, 'w') as f: f.write(commit_hash) super(sdist_sympy, self).run() try: os.remove(commit_hash_filepath) print( 'Successfully removed temporary file {}.' .format(commit_hash_filepath)) except OSError as e: print("Error deleting %s - %s." % (e.filename, e.strerror)) # Check that this list is uptodate against the result of the command: # python bin/generate_test_list.py tests = [ 'sympy.algebras.tests', 'sympy.assumptions.tests', 'sympy.calculus.tests', 'sympy.categories.tests', 'sympy.codegen.tests', 'sympy.combinatorics.tests', 'sympy.concrete.tests', 'sympy.core.tests', 'sympy.crypto.tests', 'sympy.deprecated.tests', 'sympy.diffgeom.tests', 'sympy.discrete.tests', 'sympy.external.tests', 'sympy.functions.combinatorial.tests', 'sympy.functions.elementary.tests', 'sympy.functions.special.tests', 'sympy.geometry.tests', 'sympy.holonomic.tests', 'sympy.integrals.rubi.parsetools.tests', 'sympy.integrals.rubi.rubi_tests.tests', 'sympy.integrals.rubi.tests', 'sympy.integrals.tests', 'sympy.interactive.tests', 'sympy.liealgebras.tests', 'sympy.logic.tests', 'sympy.matrices.expressions.tests', 'sympy.matrices.tests', 'sympy.multipledispatch.tests', 'sympy.ntheory.tests', 'sympy.parsing.tests', 'sympy.physics.continuum_mechanics.tests', 'sympy.physics.hep.tests', 'sympy.physics.mechanics.tests', 'sympy.physics.optics.tests', 'sympy.physics.quantum.tests', 'sympy.physics.tests', 'sympy.physics.units.tests', 'sympy.physics.vector.tests', 'sympy.plotting.intervalmath.tests', 'sympy.plotting.pygletplot.tests', 'sympy.plotting.tests', 'sympy.polys.agca.tests', 'sympy.polys.domains.tests', 'sympy.polys.tests', 'sympy.printing.pretty.tests', 'sympy.printing.tests', 'sympy.sandbox.tests', 'sympy.series.tests', 'sympy.sets.tests', 'sympy.simplify.tests', 'sympy.solvers.diophantine.tests', 'sympy.solvers.ode.tests', 'sympy.solvers.tests', 'sympy.stats.tests', 'sympy.strategies.branch.tests', 'sympy.strategies.tests', 'sympy.tensor.array.tests', 'sympy.tensor.tests', 'sympy.testing.tests', 'sympy.unify.tests', 'sympy.utilities._compilation.tests', 'sympy.utilities.tests', 'sympy.vector.tests', ] long_description = '''SymPy is a Python library for symbolic mathematics. It aims to become a full-featured computer algebra system (CAS) while keeping the code as simple as possible in order to be comprehensible and easily extensible. SymPy is written entirely in Python.''' with open(os.path.join(dir_setup, 'sympy', 'release.py')) as f: # Defines __version__ exec(f.read()) with open(os.path.join(dir_setup, 'sympy', '__init__.py')) as f: long_description = f.read().split('"""')[1] if __name__ == '__main__': setup(name='sympy', version=__version__, description='Computer algebra system (CAS) in Python', long_description=long_description, author='SymPy development team', author_email='[email protected]', license='BSD', keywords="Math CAS", url='https://sympy.org', py_modules=['isympy'], packages=['sympy'] + modules + tests, ext_modules=[], package_data={ 'sympy.utilities.mathml': ['data/*.xsl'], 'sympy.logic.benchmarks': ['input/*.cnf'], 'sympy.parsing.autolev': [ '*.g4', 'test-examples/*.al', 'test-examples/*.py', 'test-examples/pydy-example-repo/*.al', 'test-examples/pydy-example-repo/*.py', ], 'sympy.parsing.latex': ['*.txt', '*.g4'], 'sympy.integrals.rubi.parsetools': ['header.py.txt'], 'sympy.plotting.tests': ['test_region_*.png'], }, data_files=[('share/man/man1', ['doc/man/isympy.1'])], cmdclass={'test': test_sympy, 'bench': run_benchmarks, 'clean': clean, 'audit': audit, 'antlr': antlr, 'sdist': sdist_sympy, }, python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*', classifiers=[ 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Physics', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ], install_requires=[ 'mpmath>=%s' % min_mpmath_version, ], **extra_kwargs )
10a411105b32c012bec212749dd6cc5f84bea3cd72a2e59bcea4d10541541423
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import import os from itertools import chain import json import sys import warnings import pytest from sympy.testing.runtests import setup_pprint, _get_doctest_blacklist durations_path = os.path.join(os.path.dirname(__file__), '.ci', 'durations.json') blacklist_path = os.path.join(os.path.dirname(__file__), '.ci', 'blacklisted.json') # Collecting tests from rubi_tests under pytest leads to errors even if the # tests will be skipped. collect_ignore = ["sympy/integrals/rubi"] + _get_doctest_blacklist() if sys.version_info < (3,): collect_ignore.append('doc/src/gotchas.rst') # Set up printing for doctests setup_pprint() sys.__displayhook__ = sys.displayhook #from sympy import pprint_use_unicode #pprint_use_unicode(False) def _mk_group(group_dict): return list(chain(*[[k+'::'+v for v in files] for k, files in group_dict.items()])) if os.path.exists(durations_path): veryslow_group, slow_group = [_mk_group(group_dict) for group_dict in json.loads(open(durations_path, 'rt').read())] else: # warnings in conftest has issues: https://github.com/pytest-dev/pytest/issues/2891 warnings.warn("conftest.py:22: Could not find %s, --quickcheck and --veryquickcheck will have no effect.\n" % durations_path) veryslow_group, slow_group = [], [] if os.path.exists(blacklist_path): blacklist_group = _mk_group(json.loads(open(blacklist_path, 'rt').read())) else: warnings.warn("conftest.py:28: Could not find %s, no tests will be skipped due to blacklisting\n" % blacklist_path) blacklist_group = [] def pytest_addoption(parser): parser.addoption("--quickcheck", dest="runquick", action="store_true", help="Skip very slow tests (see ./ci/parse_durations_log.py)") parser.addoption("--veryquickcheck", dest="runveryquick", action="store_true", help="Skip slow & very slow (see ./ci/parse_durations_log.py)") def pytest_configure(config): # register an additional marker config.addinivalue_line("markers", "slow: manually marked test as slow (use .ci/durations.json instead)") config.addinivalue_line("markers", "quickcheck: skip very slow tests") config.addinivalue_line("markers", "veryquickcheck: skip slow & very slow tests") def pytest_runtest_setup(item): if isinstance(item, pytest.Function): if item.nodeid in veryslow_group and (item.config.getvalue("runquick") or item.config.getvalue("runveryquick")): pytest.skip("very slow test, skipping since --quickcheck or --veryquickcheck was passed.") return if item.nodeid in slow_group and item.config.getvalue("runveryquick"): pytest.skip("slow test, skipping since --veryquickcheck was passed.") return if item.nodeid in blacklist_group: pytest.skip("blacklisted test, see %s" % blacklist_path) return
fe6e32663b91bbf131c23eef8f471441c979993cce53b2cb767d56b66c5857c0
#!/usr/bin/env python # # Tests that a useful message is give in the ImportError when trying to import # sympy from Python 2. This is tested on Travis to ensure that we don't get a # Py2 SyntaxError from sympy/__init__.py import sys assert sys.version_info[:2] == (2, 7), "This test is for Python 2.7 only" import os thisdir = os.path.dirname(__file__) parentdir = os.path.normpath(os.path.join(thisdir, '..')) # Append the SymPy root directory to path sys.path.append(parentdir) try: import sympy except ImportError as exc: message = str(exc) # "Python version 3.5 or above is required for SymPy." assert message.startswith("Python version") assert message.endswith(" or above is required for SymPy.") else: raise AssertionError("import sympy should give ImportError on Python 2.7")
8f95be0c38fdd1f637ab2522d574a6da47dc16820d9e418c66a04f3a5b6b8a6a
#!/usr/bin/env python """ Program to test that all methods/functions have at least one example doctest. Also checks if docstrings are imported into Sphinx. For this to work, the Sphinx docs need to be built first. Use "cd doc; make html" to build the Sphinx docs. Usage: ./bin/coverage_doctest.py sympy/core or ./bin/coverage_doctest.py sympy/core/basic.py If no arguments are given, all files in sympy/ are checked. """ from __future__ import print_function import os import sys import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter try: from HTMLParser import HTMLParser except ImportError: # It's html.parser in Python 3 from html.parser import HTMLParser from sympy.utilities.misc import filldedent # Load color templates, duplicated from sympy/testing/runtests.py color_templates = ( ("Black", "0;30"), ("Red", "0;31"), ("Green", "0;32"), ("Brown", "0;33"), ("Blue", "0;34"), ("Purple", "0;35"), ("Cyan", "0;36"), ("LightGray", "0;37"), ("DarkGray", "1;30"), ("LightRed", "1;31"), ("LightGreen", "1;32"), ("Yellow", "1;33"), ("LightBlue", "1;34"), ("LightPurple", "1;35"), ("LightCyan", "1;36"), ("White", "1;37"), ) colors = {} for name, value in color_templates: colors[name] = value c_normal = '\033[0m' c_color = '\033[%sm' def print_header(name, underline=None, color=None): print() if color: print("%s%s%s" % (c_color % colors[color], name, c_normal)) else: print(name) if underline and not color: print(underline*len(name)) def print_coverage(module_path, c, c_md, c_mdt, c_idt, c_sph, f, f_md, f_mdt, f_idt, f_sph, score, total_doctests, total_members, sphinx_score, total_sphinx, verbose=False, no_color=False, sphinx=True): """ Prints details (depending on verbose) of a module """ doctest_color = "Brown" sphinx_color = "DarkGray" less_100_color = "Red" less_50_color = "LightRed" equal_100_color = "Green" big_header_color = "LightPurple" small_header_color = "Purple" if no_color: score_string = "Doctests: %s%% (%s of %s)" % (score, total_doctests, total_members) elif score < 100: if score < 50: score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[doctest_color], c_normal, c_color % colors[less_50_color], score, total_doctests, total_members, c_normal) else: score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[doctest_color], c_normal, c_color % colors[less_100_color], score, total_doctests, total_members, c_normal) else: score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[doctest_color], c_normal, c_color % colors[equal_100_color], score, total_doctests, total_members, c_normal) if sphinx: if no_color: sphinx_score_string = "Sphinx: %s%% (%s of %s)" % (sphinx_score, total_members - total_sphinx, total_members) elif sphinx_score < 100: if sphinx_score < 50: sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[sphinx_color], c_normal, c_color % colors[less_50_color], sphinx_score, total_members - total_sphinx, total_members, c_normal) else: sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[sphinx_color], c_normal, c_color % colors[less_100_color], sphinx_score, total_members - total_sphinx, total_members, c_normal) else: sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[sphinx_color], c_normal, c_color % colors[equal_100_color], sphinx_score, total_members - total_sphinx, total_members, c_normal) if verbose: print('\n' + '-'*70) print(module_path) print('-'*70) else: if sphinx: print("%s: %s %s" % (module_path, score_string, sphinx_score_string)) else: print("%s: %s" % (module_path, score_string)) if verbose: print_header('CLASSES', '*', not no_color and big_header_color) if not c: print_header('No classes found!') else: if c_md: print_header('Missing docstrings', '-', not no_color and small_header_color) for md in c_md: print(' * ' + md) if c_mdt: print_header('Missing doctests', '-', not no_color and small_header_color) for md in c_mdt: print(' * ' + md) if c_idt: # Use "# indirect doctest" in the docstring to # suppress this warning. print_header('Indirect doctests', '-', not no_color and small_header_color) for md in c_idt: print(' * ' + md) print('\n Use \"# indirect doctest\" in the docstring to suppress this warning') if c_sph: print_header('Not imported into Sphinx', '-', not no_color and small_header_color) for md in c_sph: print(' * ' + md) print_header('FUNCTIONS', '*', not no_color and big_header_color) if not f: print_header('No functions found!') else: if f_md: print_header('Missing docstrings', '-', not no_color and small_header_color) for md in f_md: print(' * ' + md) if f_mdt: print_header('Missing doctests', '-', not no_color and small_header_color) for md in f_mdt: print(' * ' + md) if f_idt: print_header('Indirect doctests', '-', not no_color and small_header_color) for md in f_idt: print(' * ' + md) print('\n Use \"# indirect doctest\" in the docstring to suppress this warning') if f_sph: print_header('Not imported into Sphinx', '-', not no_color and small_header_color) for md in f_sph: print(' * ' + md) if verbose: print('\n' + '-'*70) print(score_string) if sphinx: print(sphinx_score_string) print('-'*70) def _is_indirect(member, doc): """ Given string repr of doc and member checks if the member contains indirect documentation """ d = member in doc e = 'indirect doctest' in doc if not d and not e: return True else: return False def _get_arg_list(name, fobj): """ Given a function object, constructs a list of arguments and their defaults. Takes care of varargs and kwargs """ trunc = 20 # Sometimes argument length can be huge argspec = inspect.getargspec(fobj) arg_list = [] if argspec.args: for arg in argspec.args: arg_list.append(str(arg)) arg_list.reverse() # Now add the defaults if argspec.defaults: for i in range(len(argspec.defaults)): arg_list[i] = str(arg_list[i]) + '=' + str(argspec.defaults[-i]) # Get the list in right order arg_list.reverse() # Add var args if argspec.varargs: arg_list.append(argspec.varargs) if argspec.keywords: arg_list.append(argspec.keywords) # Truncate long arguments arg_list = [x[:trunc] for x in arg_list] # Construct the parameter string (enclosed in brackets) str_param = "%s(%s)" % (name, ', '.join(arg_list)) return str_param def get_mod_name(path, base): """ Gets a module name, given the path of file/dir and base dir of sympy """ rel_path = os.path.relpath(path, base) # Remove the file extension rel_path, ign = os.path.splitext(rel_path) # Replace separators by . for module path file_module = "" h, t = os.path.split(rel_path) while h or t: if t: file_module = t + '.' + file_module h, t = os.path.split(h) return file_module[:-1] class FindInSphinx(HTMLParser): is_imported = [] def handle_starttag(self, tag, attr): a = dict(attr) if tag == "div" and a.get('class', None) == "viewcode-block": self.is_imported.append(a['id']) def find_sphinx(name, mod_path, found={}): if mod_path in found: # Cache results return name in found[mod_path] doc_path = mod_path.split('.') doc_path[-1] += '.html' sphinx_path = os.path.join(sympy_top, 'doc', '_build', 'html', '_modules', *doc_path) if not os.path.exists(sphinx_path): return False with open(sphinx_path) as f: html_txt = f.read() p = FindInSphinx() p.feed(html_txt) found[mod_path] = p.is_imported return name in p.is_imported def process_function(name, c_name, b_obj, mod_path, f_sk, f_md, f_mdt, f_idt, f_has_doctest, sk_list, sph, sphinx=True): """ Processes a function to get information regarding documentation. It is assume that the function calling this subrouting has already verified that it is a valid module function. """ if name in sk_list: return False, False # We add in the end, as inspect.getsourcelines is slow add_md = False add_mdt = False add_idt = False in_sphinx = True f_doctest = False function = False if inspect.isclass(b_obj): obj = getattr(b_obj, name) obj_name = c_name + '.' + name else: obj = b_obj obj_name = name full_name = _get_arg_list(name, obj) if name.startswith('_'): f_sk.append(full_name) else: doc = obj.__doc__ if type(doc) is str: if not doc: add_md = True elif not '>>>' in doc: add_mdt = True elif _is_indirect(name, doc): add_idt = True else: f_doctest = True elif doc is None: # this was a function defined in the docstring f_doctest = True else: assert None, type(doc) function = True if sphinx: in_sphinx = find_sphinx(obj_name, mod_path) if add_md or add_mdt or add_idt or not in_sphinx: try: line_no = inspect.getsourcelines(obj)[1] except IOError: # Raised when source does not exist # which means the function is not there. return False, False full_name = "LINE %d: %s" % (line_no, full_name) if add_md: f_md.append(full_name) elif add_mdt: f_mdt.append(full_name) elif add_idt: f_idt.append(full_name) if not in_sphinx: sph.append(full_name) return f_doctest, function def process_class(c_name, obj, c_sk, c_md, c_mdt, c_idt, c_has_doctest, mod_path, sph, sphinx=True): """ Extracts information about the class regarding documentation. It is assumed that the function calling this subroutine has already checked that the class is valid. """ # Skip class case if c_name.startswith('_'): c_sk.append(c_name) return False, False, None c = False c_dt = False # Get the line number of class try: source, line_no = inspect.getsourcelines(obj) except IOError: # Raised when source does not exist # which means the class is not there. return False, False, None c = True full_name = "LINE %d: %s" % (line_no, c_name) doc = obj.__doc__ if type(doc) is str: if not doc: c_md.append(full_name) elif not '>>>' in doc: c_mdt.append(full_name) elif _is_indirect(c_name, doc): c_idt.append(full_name) else: c_dt = True c_has_doctest.append(full_name) elif doc is None: # this was a class defined in the docstring c_dt = True c_has_doctest.append(full_name) else: assert None, type(doc) in_sphinx = False if sphinx: in_sphinx = find_sphinx(c_name, mod_path) if not in_sphinx: sph.append(full_name) return c_dt, c, source def coverage(module_path, verbose=False, no_color=False, sphinx=True): """ Given a module path, builds an index of all classes and functions contained. It then goes through each of the classes/functions to get the docstring and doctest coverage of the module. """ # Import the package and find members m = None try: __import__(module_path) m = sys.modules[module_path] except Exception as a: # Most likely cause, absence of __init__ print("%s could not be loaded due to %s." % (module_path, repr(a))) return 0, 0, 0 c_skipped = [] c_md = [] c_mdt = [] c_has_doctest = [] c_idt = [] classes = 0 c_doctests = 0 c_sph = [] f_skipped = [] f_md = [] f_mdt = [] f_has_doctest = [] f_idt = [] functions = 0 f_doctests = 0 f_sph = [] skip_members = ['__abstractmethods__'] # Get the list of members m_members = dir(m) for member in m_members: # Check for skipped functions first, they throw nasty errors # when combined with getattr if member in skip_members: continue # Identify if the member (class/def) is a part of this module obj = getattr(m, member) obj_mod = inspect.getmodule(obj) # Function not a part of this module if not obj_mod or not obj_mod.__name__ == module_path: continue # If it's a function if inspect.isfunction(obj) or inspect.ismethod(obj): f_dt, f = process_function(member, '', obj, module_path, f_skipped, f_md, f_mdt, f_idt, f_has_doctest, skip_members, f_sph, sphinx=sphinx) if f: functions += 1 if f_dt: f_doctests += 1 # If it's a class, look at it's methods too elif inspect.isclass(obj): # Process the class first c_dt, c, source = process_class(member, obj, c_skipped, c_md, c_mdt, c_idt, c_has_doctest, module_path, c_sph, sphinx=sphinx) if not c: continue else: classes += 1 if c_dt: c_doctests += 1 # Iterate through it's members for f_name in obj.__dict__: if f_name in skip_members or f_name.startswith('_'): continue # Check if def funcname appears in source if not ("def " + f_name) in ' '.join(source): continue # Identify the module of the current class member f_obj = getattr(obj, f_name) obj_mod = inspect.getmodule(f_obj) # Function not a part of this module if not obj_mod or not obj_mod.__name__ == module_path: continue # If it's a function if inspect.isfunction(f_obj) or inspect.ismethod(f_obj): f_dt, f = process_function(f_name, member, obj, module_path, f_skipped, f_md, f_mdt, f_idt, f_has_doctest, skip_members, f_sph, sphinx=sphinx) if f: functions += 1 if f_dt: f_doctests += 1 # Evaluate the percent coverage total_doctests = c_doctests + f_doctests total_members = classes + functions if total_members: score = 100 * float(total_doctests) / (total_members) else: score = 100 score = int(score) if sphinx: total_sphinx = len(c_sph) + len(f_sph) if total_members: sphinx_score = 100 - 100 * float(total_sphinx) / total_members else: sphinx_score = 100 sphinx_score = int(sphinx_score) else: total_sphinx = 0 sphinx_score = 0 # Sort functions/classes by line number c_md = sorted(c_md, key=lambda x: int(x.split()[1][:-1])) c_mdt = sorted(c_mdt, key=lambda x: int(x.split()[1][:-1])) c_idt = sorted(c_idt, key=lambda x: int(x.split()[1][:-1])) f_md = sorted(f_md, key=lambda x: int(x.split()[1][:-1])) f_mdt = sorted(f_mdt, key=lambda x: int(x.split()[1][:-1])) f_idt = sorted(f_idt, key=lambda x: int(x.split()[1][:-1])) print_coverage(module_path, classes, c_md, c_mdt, c_idt, c_sph, functions, f_md, f_mdt, f_idt, f_sph, score, total_doctests, total_members, sphinx_score, total_sphinx, verbose=verbose, no_color=no_color, sphinx=sphinx) return total_doctests, total_sphinx, total_members def go(sympy_top, file, verbose=False, no_color=False, exact=True, sphinx=True): # file names containing any string in skip_paths will be skipped, skip_paths = [] if os.path.isdir(file): doctests, total_sphinx, num_functions = 0, 0, 0 for F in os.listdir(file): _doctests, _total_sphinx, _num_functions = go(sympy_top, '%s/%s' % (file, F), verbose=verbose, no_color=no_color, exact=False, sphinx=sphinx) doctests += _doctests total_sphinx += _total_sphinx num_functions += _num_functions return doctests, total_sphinx, num_functions if (not (file.endswith('.py') or file.endswith('.pyx')) or file.endswith('__init__.py') or not exact and ('test_' in file or 'bench_' in file or any(name in file for name in skip_paths))): return 0, 0, 0 if not os.path.exists(file): print("File(%s does not exist." % file) sys.exit(1) # Relpath for constructing the module name return coverage(get_mod_name(file, sympy_top), verbose=verbose, no_color=no_color, sphinx=sphinx) if __name__ == "__main__": bintest_dir = os.path.abspath(os.path.dirname(__file__)) # bin/cover... sympy_top = os.path.split(bintest_dir)[0] # ../ sympy_dir = os.path.join(sympy_top, 'sympy') # ../sympy/ if os.path.isdir(sympy_dir): sys.path.insert(0, sympy_top) usage = "usage: ./bin/doctest_coverage.py PATHS" parser = ArgumentParser( description=__doc__, usage=usage, formatter_class=RawDescriptionHelpFormatter, ) parser.add_argument("path", nargs='*', default=[os.path.join(sympy_top, 'sympy')]) parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False) parser.add_argument("--no-colors", action="store_true", dest="no_color", help="use no colors", default=False) parser.add_argument("--no-sphinx", action="store_false", dest="sphinx", help="don't report Sphinx coverage", default=True) args = parser.parse_args() if args.sphinx and not os.path.exists(os.path.join(sympy_top, 'doc', '_build', 'html')): print(filldedent(""" Cannot check Sphinx coverage without a documentation build. To build the docs, run "cd doc; make html". To skip checking Sphinx coverage, pass --no-sphinx. """)) sys.exit(1) full_coverage = True for file in args.path: file = os.path.normpath(file) print('DOCTEST COVERAGE for %s' % (file)) print('='*70) print() doctests, total_sphinx, num_functions = go(sympy_top, file, verbose=args.verbose, no_color=args.no_color, sphinx=args.sphinx) if num_functions == 0: score = 100 sphinx_score = 100 else: score = 100 * float(doctests) / num_functions score = int(score) if doctests < num_functions: full_coverage = False if args.sphinx: sphinx_score = 100 - 100 * float(total_sphinx) / num_functions sphinx_score = int(sphinx_score) if total_sphinx > 0: full_coverage = False print() print('='*70) if args.no_color: print("TOTAL DOCTEST SCORE for %s: %s%% (%s of %s)" % \ (get_mod_name(file, sympy_top), score, doctests, num_functions)) elif score < 100: print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Red"]), score, doctests, num_functions, c_normal)) else: print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Green"]), score, doctests, num_functions, c_normal)) if args.sphinx: if args.no_color: print("TOTAL SPHINX SCORE for %s: %s%% (%s of %s)" % \ (get_mod_name(file, sympy_top), sphinx_score, num_functions - total_sphinx, num_functions)) elif sphinx_score < 100: print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Red"]), sphinx_score, num_functions - total_sphinx, num_functions, c_normal)) else: print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Green"]), sphinx_score, num_functions - total_sphinx, num_functions, c_normal)) print() sys.exit(not full_coverage)
1dbf3a843d7061dff5cd80049f8b32e34c0c0d531cd04ad0516f2b530cb55e01
#!/usr/bin/env python from __future__ import print_function DESCRIPTION = """ Runs all the examples for testing purposes and reports successes and failures to stderr. An example is marked successful if the running thread does not throw an exception, for threaded examples, such as plotting, one needs to check the stderr messages as well. """ EPILOG = """ Example Usage: When no examples fail: $ ./all.py > out SUCCESSFUL: - beginner.basic [...] NO FAILED EXAMPLES $ When examples fail: $ ./all.py -w > out Traceback (most recent call last): File "./all.py", line 111, in run_examples [...] SUCCESSFUL: - beginner.basic [...] FAILED: - intermediate.mplot2D [...] $ Obviously, we want to achieve the first result. """ import imp import optparse import os import sys import traceback # add local sympy to the module path this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "..") sympy_dir = os.path.normpath(sympy_dir) sys.path.insert(0, sympy_dir) import sympy TERMINAL_EXAMPLES = [ "beginner.basic", "beginner.differentiation", "beginner.expansion", "beginner.functions", "beginner.limits_examples", "beginner.precision", "beginner.print_pretty", "beginner.series", "beginner.substitution", "intermediate.coupled_cluster", "intermediate.differential_equations", "intermediate.infinite_1d_box", "intermediate.partial_differential_eqs", "intermediate.trees", "intermediate.vandermonde", "advanced.curvilinear_coordinates", "advanced.dense_coding_example", "advanced.fem", "advanced.gibbs_phenomenon", "advanced.grover_example", "advanced.hydrogen", "advanced.pidigits", "advanced.qft", "advanced.relativity", ] WINDOWED_EXAMPLES = [ "beginner.plotting_nice_plot", "intermediate.mplot2d", "intermediate.mplot3d", "intermediate.print_gtk", "advanced.autowrap_integrators", "advanced.autowrap_ufuncify", "advanced.pyglet_plotting", ] EXAMPLE_DIR = os.path.dirname(__file__) def __import__(name, globals=None, locals=None, fromlist=None): """An alternative to the import function so that we can import modules defined as strings. This code was taken from: http://docs.python.org/lib/examples-imp.html """ # Fast path: see if the module has already been imported. try: return sys.modules[name] except KeyError: pass # If any of the following calls raises an exception, # there's a problem we can't handle -- let the caller handle it. module_name = name.split('.')[-1] module_path = os.path.join(EXAMPLE_DIR, *name.split('.')[:-1]) fp, pathname, description = imp.find_module(module_name, [module_path]) try: return imp.load_module(module_name, fp, pathname, description) finally: # Since we may exit via an exception, close fp explicitly. if fp: fp.close() def load_example_module(example): """Loads modules based upon the given package name""" mod = __import__(example) return mod def run_examples(windowed=False, quiet=False, summary=True): """Run all examples in the list of modules. Returns a boolean value indicating whether all the examples were successful. """ successes = [] failures = [] examples = TERMINAL_EXAMPLES if windowed: examples += WINDOWED_EXAMPLES if quiet: from sympy.testing.runtests import PyTestReporter reporter = PyTestReporter() reporter.write("Testing Examples\n") reporter.write("-" * reporter.terminal_width) else: reporter = None for example in examples: if run_example(example, reporter=reporter): successes.append(example) else: failures.append(example) if summary: show_summary(successes, failures, reporter=reporter) return len(failures) == 0 def run_example(example, reporter=None): """Run a specific example. Returns a boolean value indicating whether the example was successful. """ if reporter: reporter.write(example) else: print("=" * 79) print("Running: ", example) try: mod = load_example_module(example) if reporter: suppress_output(mod.main) reporter.write("[PASS]", "Green", align="right") else: mod.main() return True except KeyboardInterrupt as e: raise e except: if reporter: reporter.write("[FAIL]", "Red", align="right") traceback.print_exc() return False class DummyFile(object): def write(self, x): pass def suppress_output(fn): """Suppresses the output of fn on sys.stdout.""" save_stdout = sys.stdout try: sys.stdout = DummyFile() fn() finally: sys.stdout = save_stdout def show_summary(successes, failures, reporter=None): """Shows a summary detailing which examples were successful and which failed.""" if reporter: reporter.write("-" * reporter.terminal_width) if failures: reporter.write("FAILED:\n", "Red") for example in failures: reporter.write(" %s\n" % example) else: reporter.write("ALL EXAMPLES PASSED\n", "Green") else: if successes: print("SUCCESSFUL: ", file=sys.stderr) for example in successes: print(" -", example, file=sys.stderr) else: print("NO SUCCESSFUL EXAMPLES", file=sys.stderr) if failures: print("FAILED: ", file=sys.stderr) for example in failures: print(" -", example, file=sys.stderr) else: print("NO FAILED EXAMPLES", file=sys.stderr) def main(*args, **kws): """Main script runner""" parser = optparse.OptionParser() parser.add_option('-w', '--windowed', action="store_true", dest="windowed", help="also run examples requiring windowed environment") parser.add_option('-q', '--quiet', action="store_true", dest="quiet", help="runs examples in 'quiet mode' suppressing example output and \ showing simple status messages.") parser.add_option('--no-summary', action="store_true", dest="no_summary", help="hides the summary at the end of testing the examples") (options, _) = parser.parse_args() return 0 if run_examples(windowed=options.windowed, quiet=options.quiet, summary=not options.no_summary) else 1 if __name__ == "__main__": sys.exit(main(*sys.argv[1:]))
2db43194f52789f3ec1a5f48aa5e034824cc0fcb69e9ecc7a0b9a4914db8ddfe
""" SymPy is a Python library for symbolic mathematics. It aims to become a full-featured computer algebra system (CAS) while keeping the code as simple as possible in order to be comprehensible and easily extensible. SymPy is written entirely in Python. It depends on mpmath, and other external libraries may be optionally for things like plotting support. See the webpage for more information and documentation: https://sympy.org """ import sys if sys.version_info < (3, 5): raise ImportError("Python version 3.5 or above is required for SymPy.") del sys try: import mpmath except ImportError: raise ImportError("SymPy now depends on mpmath as an external library. " "See https://docs.sympy.org/latest/install.html#mpmath for more information.") del mpmath from sympy.release import __version__ if 'dev' in __version__: def enable_warnings(): import warnings warnings.filterwarnings('default', '.*', DeprecationWarning, module='sympy.*') del warnings enable_warnings() del enable_warnings def __sympy_debug(): # helper function so we don't import os globally import os debug_str = os.getenv('SYMPY_DEBUG', 'False') if debug_str in ('True', 'False'): return eval(debug_str) else: raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" % debug_str) SYMPY_DEBUG = __sympy_debug() # type: bool from .core import (sympify, SympifyError, cacheit, Basic, Atom, preorder_traversal, S, Expr, AtomicExpr, UnevaluatedExpr, Symbol, Wild, Dummy, symbols, var, Number, Float, Rational, Integer, NumberSymbol, RealNumber, igcd, ilcm, seterr, E, I, nan, oo, pi, zoo, AlgebraicNumber, comp, mod_inverse, Pow, integer_nthroot, integer_log, Mul, prod, Add, Mod, Rel, Eq, Ne, Lt, Le, Gt, Ge, Equality, GreaterThan, LessThan, Unequality, StrictGreaterThan, StrictLessThan, vectorize, Lambda, WildFunction, Derivative, diff, FunctionClass, Function, Subs, expand, PoleError, count_ops, expand_mul, expand_log, expand_func, expand_trig, expand_complex, expand_multinomial, nfloat, expand_power_base, expand_power_exp, arity, PrecisionExhausted, N, evalf, Tuple, Dict, gcd_terms, factor_terms, factor_nc, evaluate, Catalan, EulerGamma, GoldenRatio, TribonacciConstant) from .logic import (to_cnf, to_dnf, to_nnf, And, Or, Not, Xor, Nand, Nor, Implies, Equivalent, ITE, POSform, SOPform, simplify_logic, bool_map, true, false, satisfiable) from .assumptions import (AppliedPredicate, Predicate, AssumptionsContext, assuming, Q, ask, register_handler, remove_handler, refine) from .polys import (Poly, PurePoly, poly_from_expr, parallel_poly_from_expr, degree, total_degree, degree_list, LC, LM, LT, pdiv, prem, pquo, pexquo, div, rem, quo, exquo, half_gcdex, gcdex, invert, subresultants, resultant, discriminant, cofactors, gcd_list, gcd, lcm_list, lcm, terms_gcd, trunc, monic, content, primitive, compose, decompose, sturm, gff_list, gff, sqf_norm, sqf_part, sqf_list, sqf, factor_list, factor, intervals, refine_root, count_roots, real_roots, nroots, ground_roots, nth_power_roots_poly, cancel, reduced, groebner, is_zero_dimensional, GroebnerBasis, poly, symmetrize, horner, interpolate, rational_interpolate, viete, together, BasePolynomialError, ExactQuotientFailed, PolynomialDivisionFailed, OperationNotSupported, HeuristicGCDFailed, HomomorphismFailed, IsomorphismFailed, ExtraneousFactors, EvaluationFailed, RefinementFailed, CoercionFailed, NotInvertible, NotReversible, NotAlgebraic, DomainError, PolynomialError, UnificationFailed, GeneratorsError, GeneratorsNeeded, ComputationFailed, UnivariatePolynomialError, MultivariatePolynomialError, PolificationFailed, OptionError, FlagError, minpoly, minimal_polynomial, primitive_element, field_isomorphism, to_number_field, isolate, itermonomials, Monomial, lex, grlex, grevlex, ilex, igrlex, igrevlex, CRootOf, rootof, RootOf, ComplexRootOf, RootSum, roots, Domain, FiniteField, IntegerRing, RationalField, RealField, ComplexField, PythonFiniteField, GMPYFiniteField, PythonIntegerRing, GMPYIntegerRing, PythonRational, GMPYRationalField, AlgebraicField, PolynomialRing, FractionField, ExpressionDomain, FF_python, FF_gmpy, ZZ_python, ZZ_gmpy, QQ_python, QQ_gmpy, GF, FF, ZZ, QQ, RR, CC, EX, construct_domain, swinnerton_dyer_poly, cyclotomic_poly, symmetric_poly, random_poly, interpolating_poly, jacobi_poly, chebyshevt_poly, chebyshevu_poly, hermite_poly, legendre_poly, laguerre_poly, apart, apart_list, assemble_partfrac_list, Options, ring, xring, vring, sring, field, xfield, vfield, sfield) from .series import (Order, O, limit, Limit, gruntz, series, approximants, residue, EmptySequence, SeqPer, SeqFormula, sequence, SeqAdd, SeqMul, fourier_series, fps, difference_delta, limit_seq) from .functions import (factorial, factorial2, rf, ff, binomial, RisingFactorial, FallingFactorial, subfactorial, carmichael, fibonacci, lucas, tribonacci, harmonic, bernoulli, bell, euler, catalan, genocchi, partition, sqrt, root, Min, Max, Id, real_root, cbrt, re, im, sign, Abs, conjugate, arg, polar_lift, periodic_argument, unbranched_argument, principal_branch, transpose, adjoint, polarify, unpolarify, sin, cos, tan, sec, csc, cot, sinc, asin, acos, atan, asec, acsc, acot, atan2, exp_polar, exp, ln, log, LambertW, sinh, cosh, tanh, coth, sech, csch, asinh, acosh, atanh, acoth, asech, acsch, floor, ceiling, frac, Piecewise, piecewise_fold, erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv, Ei, expint, E1, li, Li, Si, Ci, Shi, Chi, fresnels, fresnelc, gamma, lowergamma, uppergamma, polygamma, loggamma, digamma, trigamma, multigamma, dirichlet_eta, zeta, lerchphi, polylog, stieltjes, Eijk, LeviCivita, KroneckerDelta, SingularityFunction, DiracDelta, Heaviside, bspline_basis, bspline_basis_set, interpolating_spline, besselj, bessely, besseli, besselk, hankel1, hankel2, jn, yn, jn_zeros, hn1, hn2, airyai, airybi, airyaiprime, airybiprime, marcumq, hyper, meijerg, appellf1, legendre, assoc_legendre, hermite, chebyshevt, chebyshevu, chebyshevu_root, chebyshevt_root, laguerre, assoc_laguerre, gegenbauer, jacobi, jacobi_normalized, Ynm, Ynm_c, Znm, elliptic_k, elliptic_f, elliptic_e, elliptic_pi, beta, mathieus, mathieuc, mathieusprime, mathieucprime) from .ntheory import (nextprime, prevprime, prime, primepi, primerange, randprime, Sieve, sieve, primorial, cycle_length, composite, compositepi, isprime, divisors, proper_divisors, factorint, multiplicity, perfect_power, pollard_pm1, pollard_rho, primefactors, totient, trailing, divisor_count, proper_divisor_count, divisor_sigma, factorrat, reduced_totient, primenu, primeomega, mersenne_prime_exponent, is_perfect, is_mersenne_prime, is_abundant, is_deficient, is_amicable, abundance, npartitions, is_primitive_root, is_quad_residue, legendre_symbol, jacobi_symbol, n_order, sqrt_mod, quadratic_residues, primitive_root, nthroot_mod, is_nthpow_residue, sqrt_mod_iter, mobius, discrete_log, quadratic_congruence, binomial_coefficients, binomial_coefficients_list, multinomial_coefficients, continued_fraction_periodic, continued_fraction_iterator, continued_fraction_reduce, continued_fraction_convergents, continued_fraction, egyptian_fraction) from .concrete import product, Product, summation, Sum from .discrete import (fft, ifft, ntt, intt, fwht, ifwht, mobius_transform, inverse_mobius_transform, convolution, covering_product, intersecting_product) from .simplify import (simplify, hypersimp, hypersimilar, logcombine, separatevars, posify, besselsimp, kroneckersimp, signsimp, bottom_up, nsimplify, FU, fu, sqrtdenest, cse, use, epath, EPath, hyperexpand, collect, rcollect, radsimp, collect_const, fraction, numer, denom, trigsimp, exptrigsimp, powsimp, powdenest, combsimp, gammasimp, ratsimp, ratsimpmodprime) from .sets import (Set, Interval, Union, EmptySet, FiniteSet, ProductSet, Intersection, imageset, Complement, SymmetricDifference, ImageSet, Range, ComplexRegion, Reals, Contains, ConditionSet, Ordinal, OmegaPower, ord0, PowerSet, Naturals, Naturals0, UniversalSet, Integers, Rationals) from .solvers import (solve, solve_linear_system, solve_linear_system_LU, solve_undetermined_coeffs, nsolve, solve_linear, checksol, det_quick, inv_quick, check_assumptions, failing_assumptions, diophantine, rsolve, rsolve_poly, rsolve_ratio, rsolve_hyper, checkodesol, classify_ode, dsolve, homogeneous_order, solve_poly_system, solve_triangulated, pde_separate, pde_separate_add, pde_separate_mul, pdsolve, classify_pde, checkpdesol, ode_order, reduce_inequalities, reduce_abs_inequality, reduce_abs_inequalities, solve_poly_inequality, solve_rational_inequalities, solve_univariate_inequality, decompogen, solveset, linsolve, linear_eq_to_matrix, nonlinsolve, substitution, Complexes) from .matrices import (ShapeError, NonSquareMatrixError, GramSchmidt, casoratian, diag, eye, hessian, jordan_cell, list2numpy, matrix2numpy, matrix_multiply_elementwise, ones, randMatrix, rot_axis1, rot_axis2, rot_axis3, symarray, wronskian, zeros, MutableDenseMatrix, DeferredVector, MatrixBase, Matrix, MutableMatrix, MutableSparseMatrix, banded, ImmutableDenseMatrix, ImmutableSparseMatrix, ImmutableMatrix, SparseMatrix, MatrixSlice, BlockDiagMatrix, BlockMatrix, FunctionMatrix, Identity, Inverse, MatAdd, MatMul, MatPow, MatrixExpr, MatrixSymbol, Trace, Transpose, ZeroMatrix, OneMatrix, blockcut, block_collapse, matrix_symbols, Adjoint, hadamard_product, HadamardProduct, HadamardPower, Determinant, det, diagonalize_vector, DiagMatrix, DiagonalMatrix, DiagonalOf, trace, DotProduct, kronecker_product, KroneckerProduct, PermutationMatrix, MatrixPermute) from .geometry import (Point, Point2D, Point3D, Line, Ray, Segment, Line2D, Segment2D, Ray2D, Line3D, Segment3D, Ray3D, Plane, Ellipse, Circle, Polygon, RegularPolygon, Triangle, rad, deg, are_similar, centroid, convex_hull, idiff, intersection, closest_points, farthest_points, GeometryError, Curve, Parabola) from .utilities import (flatten, group, take, subsets, variations, numbered_symbols, cartes, capture, dict_merge, postorder_traversal, interactive_traversal, prefixes, postfixes, sift, topological_sort, unflatten, has_dups, has_variety, reshape, default_sort_key, ordered, rotations, filldedent, lambdify, source, threaded, xthreaded, public, memoize_property, timed) from .integrals import (integrate, Integral, line_integrate, mellin_transform, inverse_mellin_transform, MellinTransform, InverseMellinTransform, laplace_transform, inverse_laplace_transform, LaplaceTransform, InverseLaplaceTransform, fourier_transform, inverse_fourier_transform, FourierTransform, InverseFourierTransform, sine_transform, inverse_sine_transform, SineTransform, InverseSineTransform, cosine_transform, inverse_cosine_transform, CosineTransform, InverseCosineTransform, hankel_transform, inverse_hankel_transform, HankelTransform, InverseHankelTransform, singularityintegrate) from .tensor import (IndexedBase, Idx, Indexed, get_contraction_structure, get_indices, MutableDenseNDimArray, ImmutableDenseNDimArray, MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray, tensorproduct, tensorcontraction, derive_by_array, permutedims, Array, DenseNDimArray, SparseNDimArray) from .parsing import parse_expr from .calculus import (euler_equations, singularities, is_increasing, is_strictly_increasing, is_decreasing, is_strictly_decreasing, is_monotonic, finite_diff_weights, apply_finite_diff, as_finite_diff, differentiate_finite, periodicity, not_empty_in, AccumBounds, is_convex, stationary_points, minimum, maximum) from .algebras import Quaternion from .printing import (pager_print, pretty, pretty_print, pprint, pprint_use_unicode, pprint_try_use_unicode, latex, print_latex, multiline_latex, mathml, print_mathml, python, print_python, pycode, ccode, print_ccode, glsl_code, print_glsl, cxxcode, fcode, print_fcode, rcode, print_rcode, jscode, print_jscode, julia_code, mathematica_code, octave_code, rust_code, print_gtk, preview, srepr, print_tree, StrPrinter, sstr, sstrrepr, TableForm, dotprint, maple_code, print_maple_code) from .testing import test, doctest # This module causes conflicts with other modules: # from .stats import * # Adds about .04-.05 seconds of import time # from combinatorics import * # This module is slow to import: #from physics import units from .plotting import plot, textplot, plot_backends, plot_implicit, plot_parametric from .interactive import init_session, init_printing evalf._create_evalf_table() # This is slow to import: #import abc from .deprecated import C, ClassRegistry, class_registry __all__ = [ # sympy.core 'sympify', 'SympifyError', 'cacheit', 'Basic', 'Atom', 'preorder_traversal', 'S', 'Expr', 'AtomicExpr', 'UnevaluatedExpr', 'Symbol', 'Wild', 'Dummy', 'symbols', 'var', 'Number', 'Float', 'Rational', 'Integer', 'NumberSymbol', 'RealNumber', 'igcd', 'ilcm', 'seterr', 'E', 'I', 'nan', 'oo', 'pi', 'zoo', 'AlgebraicNumber', 'comp', 'mod_inverse', 'Pow', 'integer_nthroot', 'integer_log', 'Mul', 'prod', 'Add', 'Mod', 'Rel', 'Eq', 'Ne', 'Lt', 'Le', 'Gt', 'Ge', 'Equality', 'GreaterThan', 'LessThan', 'Unequality', 'StrictGreaterThan', 'StrictLessThan', 'vectorize', 'Lambda', 'WildFunction', 'Derivative', 'diff', 'FunctionClass', 'Function', 'Subs', 'expand', 'PoleError', 'count_ops', 'expand_mul', 'expand_log', 'expand_func', 'expand_trig', 'expand_complex', 'expand_multinomial', 'nfloat', 'expand_power_base', 'expand_power_exp', 'arity', 'PrecisionExhausted', 'N', 'evalf', 'Tuple', 'Dict', 'gcd_terms', 'factor_terms', 'factor_nc', 'evaluate', 'Catalan', 'EulerGamma', 'GoldenRatio', 'TribonacciConstant', # sympy.logic 'to_cnf', 'to_dnf', 'to_nnf', 'And', 'Or', 'Not', 'Xor', 'Nand', 'Nor', 'Implies', 'Equivalent', 'ITE', 'POSform', 'SOPform', 'simplify_logic', 'bool_map', 'true', 'false', 'satisfiable', # sympy.assumptions 'AppliedPredicate', 'Predicate', 'AssumptionsContext', 'assuming', 'Q', 'ask', 'register_handler', 'remove_handler', 'refine', # sympy.polys 'Poly', 'PurePoly', 'poly_from_expr', 'parallel_poly_from_expr', 'degree', 'total_degree', 'degree_list', 'LC', 'LM', 'LT', 'pdiv', 'prem', 'pquo', 'pexquo', 'div', 'rem', 'quo', 'exquo', 'half_gcdex', 'gcdex', 'invert', 'subresultants', 'resultant', 'discriminant', 'cofactors', 'gcd_list', 'gcd', 'lcm_list', 'lcm', 'terms_gcd', 'trunc', 'monic', 'content', 'primitive', 'compose', 'decompose', 'sturm', 'gff_list', 'gff', 'sqf_norm', 'sqf_part', 'sqf_list', 'sqf', 'factor_list', 'factor', 'intervals', 'refine_root', 'count_roots', 'real_roots', 'nroots', 'ground_roots', 'nth_power_roots_poly', 'cancel', 'reduced', 'groebner', 'is_zero_dimensional', 'GroebnerBasis', 'poly', 'symmetrize', 'horner', 'interpolate', 'rational_interpolate', 'viete', 'together', 'BasePolynomialError', 'ExactQuotientFailed', 'PolynomialDivisionFailed', 'OperationNotSupported', 'HeuristicGCDFailed', 'HomomorphismFailed', 'IsomorphismFailed', 'ExtraneousFactors', 'EvaluationFailed', 'RefinementFailed', 'CoercionFailed', 'NotInvertible', 'NotReversible', 'NotAlgebraic', 'DomainError', 'PolynomialError', 'UnificationFailed', 'GeneratorsError', 'GeneratorsNeeded', 'ComputationFailed', 'UnivariatePolynomialError', 'MultivariatePolynomialError', 'PolificationFailed', 'OptionError', 'FlagError', 'minpoly', 'minimal_polynomial', 'primitive_element', 'field_isomorphism', 'to_number_field', 'isolate', 'itermonomials', 'Monomial', 'lex', 'grlex', 'grevlex', 'ilex', 'igrlex', 'igrevlex', 'CRootOf', 'rootof', 'RootOf', 'ComplexRootOf', 'RootSum', 'roots', 'Domain', 'FiniteField', 'IntegerRing', 'RationalField', 'RealField', 'ComplexField', 'PythonFiniteField', 'GMPYFiniteField', 'PythonIntegerRing', 'GMPYIntegerRing', 'PythonRational', 'GMPYRationalField', 'AlgebraicField', 'PolynomialRing', 'FractionField', 'ExpressionDomain', 'FF_python', 'FF_gmpy', 'ZZ_python', 'ZZ_gmpy', 'QQ_python', 'QQ_gmpy', 'GF', 'FF', 'ZZ', 'QQ', 'RR', 'CC', 'EX', 'construct_domain', 'swinnerton_dyer_poly', 'cyclotomic_poly', 'symmetric_poly', 'random_poly', 'interpolating_poly', 'jacobi_poly', 'chebyshevt_poly', 'chebyshevu_poly', 'hermite_poly', 'legendre_poly', 'laguerre_poly', 'apart', 'apart_list', 'assemble_partfrac_list', 'Options', 'ring', 'xring', 'vring', 'sring', 'field', 'xfield', 'vfield', 'sfield', # sympy.series 'Order', 'O', 'limit', 'Limit', 'gruntz', 'series', 'approximants', 'residue', 'EmptySequence', 'SeqPer', 'SeqFormula', 'sequence', 'SeqAdd', 'SeqMul', 'fourier_series', 'fps', 'difference_delta', 'limit_seq', # sympy.functions 'factorial', 'factorial2', 'rf', 'ff', 'binomial', 'RisingFactorial', 'FallingFactorial', 'subfactorial', 'carmichael', 'fibonacci', 'lucas', 'tribonacci', 'harmonic', 'bernoulli', 'bell', 'euler', 'catalan', 'genocchi', 'partition', 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root', 'cbrt', 're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift', 'periodic_argument', 'unbranched_argument', 'principal_branch', 'transpose', 'adjoint', 'polarify', 'unpolarify', 'sin', 'cos', 'tan', 'sec', 'csc', 'cot', 'sinc', 'asin', 'acos', 'atan', 'asec', 'acsc', 'acot', 'atan2', 'exp_polar', 'exp', 'ln', 'log', 'LambertW', 'sinh', 'cosh', 'tanh', 'coth', 'sech', 'csch', 'asinh', 'acosh', 'atanh', 'acoth', 'asech', 'acsch', 'floor', 'ceiling', 'frac', 'Piecewise', 'piecewise_fold', 'erf', 'erfc', 'erfi', 'erf2', 'erfinv', 'erfcinv', 'erf2inv', 'Ei', 'expint', 'E1', 'li', 'Li', 'Si', 'Ci', 'Shi', 'Chi', 'fresnels', 'fresnelc', 'gamma', 'lowergamma', 'uppergamma', 'polygamma', 'loggamma', 'digamma', 'trigamma', 'multigamma', 'dirichlet_eta', 'zeta', 'lerchphi', 'polylog', 'stieltjes', 'Eijk', 'LeviCivita', 'KroneckerDelta', 'SingularityFunction', 'DiracDelta', 'Heaviside', 'bspline_basis', 'bspline_basis_set', 'interpolating_spline', 'besselj', 'bessely', 'besseli', 'besselk', 'hankel1', 'hankel2', 'jn', 'yn', 'jn_zeros', 'hn1', 'hn2', 'airyai', 'airybi', 'airyaiprime', 'airybiprime', 'marcumq', 'hyper', 'meijerg', 'appellf1', 'legendre', 'assoc_legendre', 'hermite', 'chebyshevt', 'chebyshevu', 'chebyshevu_root', 'chebyshevt_root', 'laguerre', 'assoc_laguerre', 'gegenbauer', 'jacobi', 'jacobi_normalized', 'Ynm', 'Ynm_c', 'Znm', 'elliptic_k', 'elliptic_f', 'elliptic_e', 'elliptic_pi', 'beta', 'mathieus', 'mathieuc', 'mathieusprime', 'mathieucprime', # sympy.ntheory 'nextprime', 'prevprime', 'prime', 'primepi', 'primerange', 'randprime', 'Sieve', 'sieve', 'primorial', 'cycle_length', 'composite', 'compositepi', 'isprime', 'divisors', 'proper_divisors', 'factorint', 'multiplicity', 'perfect_power', 'pollard_pm1', 'pollard_rho', 'primefactors', 'totient', 'trailing', 'divisor_count', 'proper_divisor_count', 'divisor_sigma', 'factorrat', 'reduced_totient', 'primenu', 'primeomega', 'mersenne_prime_exponent', 'is_perfect', 'is_mersenne_prime', 'is_abundant', 'is_deficient', 'is_amicable', 'abundance', 'npartitions', 'is_primitive_root', 'is_quad_residue', 'legendre_symbol', 'jacobi_symbol', 'n_order', 'sqrt_mod', 'quadratic_residues', 'primitive_root', 'nthroot_mod', 'is_nthpow_residue', 'sqrt_mod_iter', 'mobius', 'discrete_log', 'quadratic_congruence', 'binomial_coefficients', 'binomial_coefficients_list', 'multinomial_coefficients', 'continued_fraction_periodic', 'continued_fraction_iterator', 'continued_fraction_reduce', 'continued_fraction_convergents', 'continued_fraction', 'egyptian_fraction', # sympy.concrete 'product', 'Product', 'summation', 'Sum', # sympy.discrete 'fft', 'ifft', 'ntt', 'intt', 'fwht', 'ifwht', 'mobius_transform', 'inverse_mobius_transform', 'convolution', 'covering_product', 'intersecting_product', # sympy.simplify 'simplify', 'hypersimp', 'hypersimilar', 'logcombine', 'separatevars', 'posify', 'besselsimp', 'kroneckersimp', 'signsimp', 'bottom_up', 'nsimplify', 'FU', 'fu', 'sqrtdenest', 'cse', 'use', 'epath', 'EPath', 'hyperexpand', 'collect', 'rcollect', 'radsimp', 'collect_const', 'fraction', 'numer', 'denom', 'trigsimp', 'exptrigsimp', 'powsimp', 'powdenest', 'combsimp', 'gammasimp', 'ratsimp', 'ratsimpmodprime', # sympy.sets 'Set', 'Interval', 'Union', 'EmptySet', 'FiniteSet', 'ProductSet', 'Intersection', 'imageset', 'Complement', 'SymmetricDifference', 'ImageSet', 'Range', 'ComplexRegion', 'Reals', 'Contains', 'ConditionSet', 'Ordinal', 'OmegaPower', 'ord0', 'PowerSet', 'Reals', 'Naturals', 'Naturals0', 'UniversalSet', 'Integers', 'Rationals', # sympy.solvers 'solve', 'solve_linear_system', 'solve_linear_system_LU', 'solve_undetermined_coeffs', 'nsolve', 'solve_linear', 'checksol', 'det_quick', 'inv_quick', 'check_assumptions', 'failing_assumptions', 'diophantine', 'rsolve', 'rsolve_poly', 'rsolve_ratio', 'rsolve_hyper', 'checkodesol', 'classify_ode', 'dsolve', 'homogeneous_order', 'solve_poly_system', 'solve_triangulated', 'pde_separate', 'pde_separate_add', 'pde_separate_mul', 'pdsolve', 'classify_pde', 'checkpdesol', 'ode_order', 'reduce_inequalities', 'reduce_abs_inequality', 'reduce_abs_inequalities', 'solve_poly_inequality', 'solve_rational_inequalities', 'solve_univariate_inequality', 'decompogen', 'solveset', 'linsolve', 'linear_eq_to_matrix', 'nonlinsolve', 'substitution', 'Complexes', # sympy.matrices 'ShapeError', 'NonSquareMatrixError', 'GramSchmidt', 'casoratian', 'diag', 'eye', 'hessian', 'jordan_cell', 'list2numpy', 'matrix2numpy', 'matrix_multiply_elementwise', 'ones', 'randMatrix', 'rot_axis1', 'rot_axis2', 'rot_axis3', 'symarray', 'wronskian', 'zeros', 'MutableDenseMatrix', 'DeferredVector', 'MatrixBase', 'Matrix', 'MutableMatrix', 'MutableSparseMatrix', 'banded', 'ImmutableDenseMatrix', 'ImmutableSparseMatrix', 'ImmutableMatrix', 'SparseMatrix', 'MatrixSlice', 'BlockDiagMatrix', 'BlockMatrix', 'FunctionMatrix', 'Identity', 'Inverse', 'MatAdd', 'MatMul', 'MatPow', 'MatrixExpr', 'MatrixSymbol', 'Trace', 'Transpose', 'ZeroMatrix', 'OneMatrix', 'blockcut', 'block_collapse', 'matrix_symbols', 'Adjoint', 'hadamard_product', 'HadamardProduct', 'HadamardPower', 'Determinant', 'det', 'diagonalize_vector', 'DiagMatrix', 'DiagonalMatrix', 'DiagonalOf', 'trace', 'DotProduct', 'kronecker_product', 'KroneckerProduct', 'PermutationMatrix', 'MatrixPermute', # sympy.geometry 'Point', 'Point2D', 'Point3D', 'Line', 'Ray', 'Segment', 'Line2D', 'Segment2D', 'Ray2D', 'Line3D', 'Segment3D', 'Ray3D', 'Plane', 'Ellipse', 'Circle', 'Polygon', 'RegularPolygon', 'Triangle', 'rad', 'deg', 'are_similar', 'centroid', 'convex_hull', 'idiff', 'intersection', 'closest_points', 'farthest_points', 'GeometryError', 'Curve', 'Parabola', # sympy.utilities 'flatten', 'group', 'take', 'subsets', 'variations', 'numbered_symbols', 'cartes', 'capture', 'dict_merge', 'postorder_traversal', 'interactive_traversal', 'prefixes', 'postfixes', 'sift', 'topological_sort', 'unflatten', 'has_dups', 'has_variety', 'reshape', 'default_sort_key', 'ordered', 'rotations', 'filldedent', 'lambdify', 'source', 'threaded', 'xthreaded', 'public', 'memoize_property', 'test', 'doctest', 'timed', # sympy.integrals 'integrate', 'Integral', 'line_integrate', 'mellin_transform', 'inverse_mellin_transform', 'MellinTransform', 'InverseMellinTransform', 'laplace_transform', 'inverse_laplace_transform', 'LaplaceTransform', 'InverseLaplaceTransform', 'fourier_transform', 'inverse_fourier_transform', 'FourierTransform', 'InverseFourierTransform', 'sine_transform', 'inverse_sine_transform', 'SineTransform', 'InverseSineTransform', 'cosine_transform', 'inverse_cosine_transform', 'CosineTransform', 'InverseCosineTransform', 'hankel_transform', 'inverse_hankel_transform', 'HankelTransform', 'InverseHankelTransform', 'singularityintegrate', # sympy.tensor 'IndexedBase', 'Idx', 'Indexed', 'get_contraction_structure', 'get_indices', 'MutableDenseNDimArray', 'ImmutableDenseNDimArray', 'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray', 'tensorproduct', 'tensorcontraction', 'derive_by_array', 'permutedims', 'Array', 'DenseNDimArray', 'SparseNDimArray', # sympy.parsing 'parse_expr', # sympy.calculus 'euler_equations', 'singularities', 'is_increasing', 'is_strictly_increasing', 'is_decreasing', 'is_strictly_decreasing', 'is_monotonic', 'finite_diff_weights', 'apply_finite_diff', 'as_finite_diff', 'differentiate_finite', 'periodicity', 'not_empty_in', 'AccumBounds', 'is_convex', 'stationary_points', 'minimum', 'maximum', # sympy.algebras 'Quaternion', # sympy.printing 'pager_print', 'pretty', 'pretty_print', 'pprint', 'pprint_use_unicode', 'pprint_try_use_unicode', 'latex', 'print_latex', 'multiline_latex', 'mathml', 'print_mathml', 'python', 'print_python', 'pycode', 'ccode', 'print_ccode', 'glsl_code', 'print_glsl', 'cxxcode', 'fcode', 'print_fcode', 'rcode', 'print_rcode', 'jscode', 'print_jscode', 'julia_code', 'mathematica_code', 'octave_code', 'rust_code', 'print_gtk', 'preview', 'srepr', 'print_tree', 'StrPrinter', 'sstr', 'sstrrepr', 'TableForm', 'dotprint', 'maple_code', 'print_maple_code', # sympy.plotting 'plot', 'textplot', 'plot_backends', 'plot_implicit', 'plot_parametric', # sympy.interactive 'init_session', 'init_printing', # sympy.testing 'test', 'doctest', # sympy.deprecated: 'C', 'ClassRegistry', 'class_registry', ]
5bb757a80bf28944a18808aef139f66c1c51b9325c3f52a5561b58f15dd51696
from __future__ import print_function, division import sys sys._running_pytest = True # type: ignore from distutils.version import LooseVersion as V import pytest from sympy.core.cache import clear_cache import re sp = re.compile(r'([0-9]+)/([1-9][0-9]*)') def process_split(config, items): split = config.getoption("--split") if not split: return m = sp.match(split) if not m: raise ValueError("split must be a string of the form a/b " "where a and b are ints.") i, t = map(int, m.groups()) start, end = (i-1)*len(items)//t, i*len(items)//t if i < t: # remove elements from end of list first del items[end:] del items[:start] def pytest_report_header(config): from sympy.utilities.misc import ARCH s = "architecture: %s\n" % ARCH from sympy.core.cache import USE_CACHE s += "cache: %s\n" % USE_CACHE from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY version = '' if GROUND_TYPES =='gmpy': if HAS_GMPY == 1: import gmpy elif HAS_GMPY == 2: import gmpy2 as gmpy version = gmpy.version() s += "ground types: %s %s\n" % (GROUND_TYPES, version) return s def pytest_terminal_summary(terminalreporter): if (terminalreporter.stats.get('error', None) or terminalreporter.stats.get('failed', None)): terminalreporter.write_sep( ' ', 'DO *NOT* COMMIT!', red=True, bold=True) def pytest_addoption(parser): parser.addoption("--split", action="store", default="", help="split tests") def pytest_collection_modifyitems(config, items): """ pytest hook. """ # handle splits process_split(config, items) @pytest.fixture(autouse=True, scope='module') def file_clear_cache(): clear_cache() @pytest.fixture(autouse=True, scope='module') def check_disabled(request): if getattr(request.module, 'disabled', False): pytest.skip("test requirements not met.") elif getattr(request.module, 'ipython', False): # need to check version and options for ipython tests if (V(pytest.__version__) < '2.6.3' and pytest.config.getvalue('-s') != 'no'): pytest.skip("run py.test with -s or upgrade to newer version.")
70ce5b48292a6a64d8d5eb6b024c2e63d43522bc0750b6c8fb54a2a73c6ecfd9
""" This module exports all latin and greek letters as Symbols, so you can conveniently do >>> from sympy.abc import x, y instead of the slightly more clunky-looking >>> from sympy import symbols >>> x, y = symbols('x y') Caveats ======= 1. As of the time of writing this, the names ``C``, ``O``, ``S``, ``I``, ``N``, ``E``, and ``Q`` are colliding with names defined in SymPy. If you import them from both ``sympy.abc`` and ``sympy``, the second import will "win". This is an issue only for * imports, which should only be used for short-lived code such as interactive sessions and throwaway scripts that do not survive until the next SymPy upgrade, where ``sympy`` may contain a different set of names. 2. This module does not define symbol names on demand, i.e. ``from sympy.abc import foo`` will be reported as an error because ``sympy.abc`` does not contain the name ``foo``. To get a symbol named ``foo``, you still need to use ``Symbol('foo')`` or ``symbols('foo')``. You can freely mix usage of ``sympy.abc`` and ``Symbol``/``symbols``, though sticking with one and only one way to get the symbols does tend to make the code more readable. The module also defines some special names to help detect which names clash with the default SymPy namespace. ``_clash1`` defines all the single letter variables that clash with SymPy objects; ``_clash2`` defines the multi-letter clashing symbols; and ``_clash`` is the union of both. These can be passed for ``locals`` during sympification if one desires Symbols rather than the non-Symbol objects for those names. Examples ======== >>> from sympy import S >>> from sympy.abc import _clash1, _clash2, _clash >>> S("Q & C", locals=_clash1) C & Q >>> S('pi(x)', locals=_clash2) pi(x) >>> S('pi(C, Q)', locals=_clash) pi(C, Q) """ from __future__ import print_function, division from typing import Any, Dict import string from .core import Symbol, symbols from .core.alphabets import greeks from .core.compatibility import exec_ ##### Symbol definitions ##### # Implementation note: The easiest way to avoid typos in the symbols() # parameter is to copy it from the left-hand side of the assignment. a, b, c, d, e, f, g, h, i, j = symbols('a, b, c, d, e, f, g, h, i, j') k, l, m, n, o, p, q, r, s, t = symbols('k, l, m, n, o, p, q, r, s, t') u, v, w, x, y, z = symbols('u, v, w, x, y, z') A, B, C, D, E, F, G, H, I, J = symbols('A, B, C, D, E, F, G, H, I, J') K, L, M, N, O, P, Q, R, S, T = symbols('K, L, M, N, O, P, Q, R, S, T') U, V, W, X, Y, Z = symbols('U, V, W, X, Y, Z') alpha, beta, gamma, delta = symbols('alpha, beta, gamma, delta') epsilon, zeta, eta, theta = symbols('epsilon, zeta, eta, theta') iota, kappa, lamda, mu = symbols('iota, kappa, lamda, mu') nu, xi, omicron, pi = symbols('nu, xi, omicron, pi') rho, sigma, tau, upsilon = symbols('rho, sigma, tau, upsilon') phi, chi, psi, omega = symbols('phi, chi, psi, omega') ##### Clashing-symbols diagnostics ##### # We want to know which names in SymPy collide with those in here. # This is mostly for diagnosing SymPy's namespace during SymPy development. _latin = list(string.ascii_letters) # OSINEQ should not be imported as they clash; gamma, pi and zeta clash, too _greek = list(greeks) # make a copy, so we can mutate it # Note: We import lamda since lambda is a reserved keyword in Python _greek.remove("lambda") _greek.append("lamda") ns = {} # type: Dict[str, Any] exec_('from sympy import *', ns) _clash1 = {} _clash2 = {} while ns: _k, _ = ns.popitem() if _k in _greek: _clash2[_k] = Symbol(_k) _greek.remove(_k) elif _k in _latin: _clash1[_k] = Symbol(_k) _latin.remove(_k) _clash = {} _clash.update(_clash1) _clash.update(_clash2) del _latin, _greek, Symbol, _k
768c84106fad5fbdd9bc2e9cf03c0f483ab8794bdaa3ccbc049ba1853d5e95f3
#!/usr/bin/env python """ Plotting Examples Suggested Usage: python -i pyglet_plotting.py """ from sympy import symbols, sin, cos, pi, sqrt from sympy.core.compatibility import clock from sympy.plotting.pygletplot import PygletPlot from time import sleep def main(): x, y, z = symbols('x,y,z') # toggle axes visibility with F5, colors with F6 axes_options = 'visible=false; colored=true; label_ticks=true; label_axes=true; overlay=true; stride=0.5' # axes_options = 'colored=false; overlay=false; stride=(1.0, 0.5, 0.5)' p = PygletPlot( width=600, height=500, ortho=False, invert_mouse_zoom=False, axes=axes_options, antialiasing=True) examples = [] def example_wrapper(f): examples.append(f) return f @example_wrapper def mirrored_saddles(): p[5] = x**2 - y**2, [20], [20] p[6] = y**2 - x**2, [20], [20] @example_wrapper def mirrored_saddles_saveimage(): p[5] = x**2 - y**2, [20], [20] p[6] = y**2 - x**2, [20], [20] p.wait_for_calculations() # although the calculation is complete, # we still need to wait for it to be # rendered, so we'll sleep to be sure. sleep(1) p.saveimage("plot_example.png") @example_wrapper def mirrored_ellipsoids(): p[2] = x**2 + y**2, [40], [40], 'color=zfade' p[3] = -x**2 - y**2, [40], [40], 'color=zfade' @example_wrapper def saddle_colored_by_derivative(): f = x**2 - y**2 p[1] = f, 'style=solid' p[1].color = abs(f.diff(x)), abs(f.diff(x) + f.diff(y)), abs(f.diff(y)) @example_wrapper def ding_dong_surface(): f = sqrt(1.0 - y)*y p[1] = f, [x, 0, 2*pi, 40], [y, - 1, 4, 100], 'mode=cylindrical; style=solid; color=zfade4' @example_wrapper def polar_circle(): p[7] = 1, 'mode=polar' @example_wrapper def polar_flower(): p[8] = 1.5*sin(4*x), [160], 'mode=polar' p[8].color = z, x, y, (0.5, 0.5, 0.5), ( 0.8, 0.8, 0.8), (x, y, None, z) # z is used for t @example_wrapper def simple_cylinder(): p[9] = 1, 'mode=cylindrical' @example_wrapper def cylindrical_hyperbola(): # (note that polar is an alias for cylindrical) p[10] = 1/y, 'mode=polar', [x], [y, -2, 2, 20] @example_wrapper def extruded_hyperbolas(): p[11] = 1/x, [x, -10, 10, 100], [1], 'style=solid' p[12] = -1/x, [x, -10, 10, 100], [1], 'style=solid' @example_wrapper def torus(): a, b = 1, 0.5 # radius, thickness p[13] = (a + b*cos(x))*cos(y), (a + b*cos(x)) *\ sin(y), b*sin(x), [x, 0, pi*2, 40], [y, 0, pi*2, 40] @example_wrapper def warped_torus(): a, b = 2, 1 # radius, thickness p[13] = (a + b*cos(x))*cos(y), (a + b*cos(x))*sin(y), b *\ sin(x) + 0.5*sin(4*y), [x, 0, pi*2, 40], [y, 0, pi*2, 40] @example_wrapper def parametric_spiral(): p[14] = cos(y), sin(y), y / 10.0, [y, -4*pi, 4*pi, 100] p[14].color = x, (0.1, 0.9), y, (0.1, 0.9), z, (0.1, 0.9) @example_wrapper def multistep_gradient(): p[1] = 1, 'mode=spherical', 'style=both' # p[1] = exp(-x**2-y**2+(x*y)/4), [-1.7,1.7,100], [-1.7,1.7,100], 'style=solid' # p[1] = 5*x*y*exp(-x**2-y**2), [-2,2,100], [-2,2,100] gradient = [0.0, (0.3, 0.3, 1.0), 0.30, (0.3, 1.0, 0.3), 0.55, (0.95, 1.0, 0.2), 0.65, (1.0, 0.95, 0.2), 0.85, (1.0, 0.7, 0.2), 1.0, (1.0, 0.3, 0.2)] p[1].color = z, [None, None, z], gradient # p[1].color = 'zfade' # p[1].color = 'zfade3' @example_wrapper def lambda_vs_sympy_evaluation(): start = clock() p[4] = x**2 + y**2, [100], [100], 'style=solid' p.wait_for_calculations() print("lambda-based calculation took %s seconds." % (clock() - start)) start = clock() p[4] = x**2 + y**2, [100], [100], 'style=solid; use_sympy_eval' p.wait_for_calculations() print( "sympy substitution-based calculation took %s seconds." % (clock() - start)) @example_wrapper def gradient_vectors(): def gradient_vectors_inner(f, i): from sympy import lambdify from sympy.plotting.plot_interval import PlotInterval from pyglet.gl import glBegin, glColor3f from pyglet.gl import glVertex3f, glEnd, GL_LINES def draw_gradient_vectors(f, iu, iv): """ Create a function which draws vectors representing the gradient of f. """ dx, dy, dz = f.diff(x), f.diff(y), 0 FF = lambdify([x, y], [x, y, f]) FG = lambdify([x, y], [dx, dy, dz]) iu.v_steps /= 5 iv.v_steps /= 5 Gvl = list(list([FF(u, v), FG(u, v)] for v in iv.frange()) for u in iu.frange()) def draw_arrow(p1, p2): """ Draw a single vector. """ glColor3f(0.4, 0.4, 0.9) glVertex3f(*p1) glColor3f(0.9, 0.4, 0.4) glVertex3f(*p2) def draw(): """ Iterate through the calculated vectors and draw them. """ glBegin(GL_LINES) for u in Gvl: for v in u: point = [[v[0][0], v[0][1], v[0][2]], [v[0][0] + v[1][0], v[0][1] + v[1][1], v[0][2] + v[1][2]]] draw_arrow(point[0], point[1]) glEnd() return draw p[i] = f, [-0.5, 0.5, 25], [-0.5, 0.5, 25], 'style=solid' iu = PlotInterval(p[i].intervals[0]) iv = PlotInterval(p[i].intervals[1]) p[i].postdraw.append(draw_gradient_vectors(f, iu, iv)) gradient_vectors_inner(x**2 + y**2, 1) gradient_vectors_inner(-x**2 - y**2, 2) def help_str(): s = ("\nPlot p has been created. Useful commands: \n" " help(p), p[1] = x**2, print p, p.clear() \n\n" "Available examples (see source in plotting.py):\n\n") for i in range(len(examples)): s += "(%i) %s\n" % (i, examples[i].__name__) s += "\n" s += "e.g. >>> example(2)\n" s += " >>> ding_dong_surface()\n" return s def example(i): if callable(i): p.clear() i() elif i >= 0 and i < len(examples): p.clear() examples[i]() else: print("Not a valid example.\n") print(p) example(0) # 0 - 15 are defined above print(help_str()) if __name__ == "__main__": main()
ecb93a180c4272899c981984edbba87aef6275f08f12315531916b6c5ce68f3c
#!/usr/bin/env python """Vandermonde matrix example Demonstrates matrix computations using the Vandermonde matrix. * https://en.wikipedia.org/wiki/Vandermonde_matrix """ from sympy import Matrix, pprint, Rational, symbols, Symbol, zeros def symbol_gen(sym_str): """Symbol generator Generates sym_str_n where n is the number of times the generator has been called. """ n = 0 while True: yield Symbol("%s_%d" % (sym_str, n)) n += 1 def comb_w_rep(n, k): """Combinations with repetition Returns the list of k combinations with repetition from n objects. """ if k == 0: return [[]] combs = [[i] for i in range(n)] for i in range(k - 1): curr = [] for p in combs: for m in range(p[-1], n): curr.append(p + [m]) combs = curr return combs def vandermonde(order, dim=1, syms='a b c d'): """Computes a Vandermonde matrix of given order and dimension. Define syms to give beginning strings for temporary variables. Returns the Matrix, the temporary variables, and the terms for the polynomials. """ syms = syms.split() n = len(syms) if n < dim: new_syms = [] for i in range(dim - n): j, rem = divmod(i, n) new_syms.append(syms[rem] + str(j)) syms.extend(new_syms) terms = [] for i in range(order + 1): terms.extend(comb_w_rep(dim, i)) rank = len(terms) V = zeros(rank) generators = [symbol_gen(syms[i]) for i in range(dim)] all_syms = [] for i in range(rank): row_syms = [next(g) for g in generators] all_syms.append(row_syms) for j, term in enumerate(terms): v_entry = 1 for k in term: v_entry *= row_syms[k] V[i*rank + j] = v_entry return V, all_syms, terms def gen_poly(points, order, syms): """Generates a polynomial using a Vandermonde system""" num_pts = len(points) if num_pts == 0: raise ValueError("Must provide points") dim = len(points[0]) - 1 if dim > len(syms): raise ValueError("Must provide at least %d symbols for the polynomial" % dim) V, tmp_syms, terms = vandermonde(order, dim) if num_pts < V.shape[0]: raise ValueError( "Must provide %d points for order %d, dimension " "%d polynomial, given %d points" % (V.shape[0], order, dim, num_pts)) elif num_pts > V.shape[0]: print("gen_poly given %d points but only requires %d, "\ "continuing using the first %d points" % \ (num_pts, V.shape[0], V.shape[0])) num_pts = V.shape[0] subs_dict = {} for j in range(dim): for i in range(num_pts): subs_dict[tmp_syms[i][j]] = points[i][j] V_pts = V.subs(subs_dict) V_inv = V_pts.inv() coeffs = V_inv.multiply(Matrix([points[i][-1] for i in range(num_pts)])) f = 0 for j, term in enumerate(terms): t = 1 for k in term: t *= syms[k] f += coeffs[j]*t return f def main(): order = 2 V, tmp_syms, _ = vandermonde(order) print("Vandermonde matrix of order 2 in 1 dimension") pprint(V) print('-'*79) print("Computing the determinant and comparing to \sum_{0<i<j<=3}(a_j - a_i)") det_sum = 1 for j in range(order + 1): for i in range(j): det_sum *= (tmp_syms[j][0] - tmp_syms[i][0]) print(""" det(V) = %(det)s \sum = %(sum)s = %(sum_expand)s """ % {"det": V.det(), "sum": det_sum, "sum_expand": det_sum.expand(), }) print('-'*79) print("Polynomial fitting with a Vandermonde Matrix:") x, y, z = symbols('x,y,z') points = [(0, 3), (1, 2), (2, 3)] print(""" Quadratic function, represented by 3 points: points = %(pts)s f = %(f)s """ % {"pts": points, "f": gen_poly(points, 2, [x]), }) points = [(0, 1, 1), (1, 0, 0), (1, 1, 0), (Rational(1, 2), 0, 0), (0, Rational(1, 2), 0), (Rational(1, 2), Rational(1, 2), 0)] print(""" 2D Quadratic function, represented by 6 points: points = %(pts)s f = %(f)s """ % {"pts": points, "f": gen_poly(points, 2, [x, y]), }) points = [(0, 1, 1, 1), (1, 1, 0, 0), (1, 0, 1, 0), (1, 1, 1, 1)] print(""" 3D linear function, represented by 4 points: points = %(pts)s f = %(f)s """ % {"pts": points, "f": gen_poly(points, 1, [x, y, z]), }) if __name__ == "__main__": main()
5653ce3524ac49aaac53e8a5c82948482c358702d81d83ca935f7059d24956d6
#!/usr/bin/env python """Matplotlib 3D plotting example Demonstrates plotting with matplotlib. """ import sys from sample import sample from sympy import Symbol from sympy.external import import_module def mplot3d(f, var1, var2, show=True): """ Plot a 3d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") p = import_module('pylab') # Try newer version first p3 = import_module('mpl_toolkits.mplot3d', import_kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d') if not p or not p3: sys.exit("Matplotlib is required to use mplot3d.") x, y, z = sample(f, var1, var2) fig = p.figure() ax = p3.Axes3D(fig) # ax.plot_surface(x, y, z, rstride=2, cstride=2) ax.plot_wireframe(x, y, z) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') if show: p.show() def main(): x = Symbol('x') y = Symbol('y') mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20)) # mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20)) # mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10)) if __name__ == "__main__": main()
1a5bcbe59e873d289475a474e65e052a281efbd4dd619182e5f07955c1b1827f
""" ======== numpydoc ======== Sphinx extension that handles docstrings in the Numpy standard format. [1] It will: - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. - Extract the signature from the docstring, if it can't be determined otherwise. .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ from __future__ import division, absolute_import, print_function import sys import re import pydoc import sphinx import inspect from collections.abc import Callable if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") from docscrape_sphinx import get_doc_object, SphinxDocString if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = {'use_plots': app.config.numpydoc_use_plots, 'show_class_members': app.config.numpydoc_show_class_members, 'show_inherited_class_members': app.config.numpydoc_show_inherited_class_members, 'class_members_toctree': app.config.numpydoc_class_members_toctree} u_NL = sixu('\n') if what == 'module': # Strip top title pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*' title_re = re.compile(sixu(pattern), re.I | re.S) lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL) else: doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg) if sys.version_info[0] >= 3: doc = str(doc) else: doc = unicode(doc) lines[:] = doc.split(u_NL) if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and obj.__name__): if hasattr(obj, '__module__'): v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) # start renaming from the longest string, to avoid overwriting parts references.sort(key=lambda x: -len(x)) if references: for i, line in enumerate(lines): for r in references: if re.match(sixu('^\\d+$'), r): new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: new_r = sixu("%s%d") % (r, reference_offset[0]) lines[i] = lines[i].replace(sixu('[%s]_') % r, sixu('[%s]_') % new_r) lines[i] = lines[i].replace(sixu('.. [%s]') % r, sixu('.. [%s]') % new_r) reference_offset[0] += len(references) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (isinstance(obj, Callable) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) return sig, sixu('') def setup(app, get_doc_object_=get_doc_object): if not hasattr(app, 'add_config_value'): return # probably called by nose, better bail out global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) app.add_config_value('numpydoc_show_inherited_class_members', True, True) app.add_config_value('numpydoc_class_members_toctree', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) # ------------------------------------------------------------------------------ # Docstring-mangling domains # ------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } indices = [] class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive
6a9ccbeaa67cecfa77232f13ba029452631a6f05bb1390afc6f167d333bc6515
""" Extract reference documentation from the NumPy source tree. """ from __future__ import division, absolute_import, print_function import inspect import textwrap import re import pydoc from collections.abc import Mapping import sys class Reader(object): """ A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data, list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l + 1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self, n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class NumpyDocString(Mapping): def __init__(self, docstring, config={}): docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Yields': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], # 'Notes': [], 'Warnings': [], 'References': '', # 'Examples': '', 'index': {} } self._other_keys = [] self._parse() def __getitem__(self, key): return self._parsed_data[key] def __setitem__(self, key, val): if key not in self._parsed_data: self._other_keys.append(key) self._parsed_data[key] = val def __iter__(self): return iter(self._parsed_data) def __len__(self): return len(self._parsed_data) def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self, doc): i = 0 j = 0 for i, line in enumerate(doc): if line.strip(): break for j, line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc) - j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self, content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name, arg_type, desc)) return params _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|" r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ValueError("%s is not an item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) if '.' not in name: name = '~.' + name items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): if func.strip(): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return # If several signatures present, take the last one while True: summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): continue break if summary is not None: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() sections = list(self._read_sections()) section_names = set([section for section, content in sections]) has_returns = 'Returns' in section_names has_yields = 'Yields' in section_names # We could do more tests, but we are not. Arbitrarily. if has_returns and has_yields: msg = 'Docstring contains both a Returns and Yields section.' raise ValueError(msg) for (section, content) in sections: if not section.startswith('..'): section = (s.capitalize() for s in section.split(' ')) section = ' '.join(section) if section in ('Parameters', 'Returns', 'Yields', 'Raises', 'Warns', 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*', '\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param, param_type, desc in self[name]: if param_type: out += ['%s : %s' % (param, param_type)] else: out += [param] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.items(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Yields', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes', 'References', 'Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) def indent(str, indent=4): indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") doc = inspect.getdoc(func) or '' NumpyDocString.__init__(self, doc) if not self['Signature'] and func is not None: func, func_name = self.get_func() try: # try to read signature if sys.version_info[0] >= 3: argspec = inspect.getfullargspec(func) else: argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) argspec = argspec.replace('*', '\*') signature = '%s%s' % (func_name, argspec) except TypeError as e: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if self._role not in roles: print("Warning: invalid role %s" % self._role) out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): extra_public_methods = ['__call__'] def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls self.show_inherited_members = config.get( 'show_inherited_class_members', True) if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename if doc is None: if cls is None: raise ValueError("No class or documentation string given") doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) if config.get('show_class_members', True): def splitlines_x(s): if not s: return [] else: return s.splitlines() for field, items in [('Methods', self.methods), ('Attributes', self.properties)]: if not self[field]: doc_list = [] for name in sorted(items): clsname = getattr(self._cls, name, None) if clsname is not None: doc_item = pydoc.getdoc(clsname) doc_list.append((name, '', splitlines_x(doc_item))) self[field] = doc_list @property def methods(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if ((not name.startswith('_') or name in self.extra_public_methods) and callable(func))] @property def properties(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if not name.startswith('_') and func is None]
6752d5b0a29f8b83f002d414decedaa8ab09628c11be24a2d538cffee2f40b24
""" Continuous Random Variables - Prebuilt variables Contains ======== Arcsin Benini Beta BetaNoncentral BetaPrime Cauchy Chi ChiNoncentral ChiSquared Dagum Erlang ExGaussian Exponential ExponentialPower FDistribution FisherZ Frechet Gamma GammaInverse Gumbel Gompertz Kumaraswamy Laplace Levy Logistic LogLogistic LogNormal Maxwell Moyal Nakagami Normal Pareto PowerFunction QuadraticU RaisedCosine Rayleigh Reciprocal ShiftedGompertz StudentT Trapezoidal Triangular Uniform UniformSum VonMises Wald Weibull WignerSemicircle """ from __future__ import print_function, division import random from sympy import beta as beta_fn from sympy import cos, sin, tan, atan, exp, besseli, besselj, besselk from sympy import (log, sqrt, pi, S, Dummy, Interval, sympify, gamma, sign, Piecewise, And, Eq, binomial, factorial, Sum, floor, Abs, Lambda, Basic, lowergamma, erf, erfc, erfi, erfinv, I, asin, hyper, uppergamma, sinh, Ne, expint, Rational) from sympy.external import import_module from sympy.matrices import MatrixBase, MatrixExpr from sympy.stats.crv import (SingleContinuousPSpace, SingleContinuousDistribution, ContinuousDistributionHandmade) from sympy.stats.joint_rv import JointPSpace, CompoundDistribution from sympy.stats.joint_rv_types import multivariate_rv from sympy.stats.rv import _value_check, RandomSymbol oo = S.Infinity __all__ = ['ContinuousRV', 'Arcsin', 'Benini', 'Beta', 'BetaNoncentral', 'BetaPrime', 'Cauchy', 'Chi', 'ChiNoncentral', 'ChiSquared', 'Dagum', 'Erlang', 'ExGaussian', 'Exponential', 'ExponentialPower', 'FDistribution', 'FisherZ', 'Frechet', 'Gamma', 'GammaInverse', 'Gompertz', 'Gumbel', 'Kumaraswamy', 'Laplace', 'Levy', 'Logistic', 'LogLogistic', 'LogNormal', 'Maxwell', 'Moyal', 'Nakagami', 'Normal', 'GaussianInverse', 'Pareto', 'PowerFunction', 'QuadraticU', 'RaisedCosine', 'Rayleigh', 'Reciprocal', 'StudentT', 'ShiftedGompertz', 'Trapezoidal', 'Triangular', 'Uniform', 'UniformSum', 'VonMises', 'Wald', 'Weibull', 'WignerSemicircle', ] def ContinuousRV(symbol, density, set=Interval(-oo, oo)): """ Create a Continuous Random Variable given the following: Parameters ========== symbol : Symbol Represents name of the random variable. density : Expression containing symbol Represents probability density function. set : set/Interval Represents the region where the pdf is valid, by default is real line. Returns ======= RandomSymbol Many common continuous random variable types are already implemented. This function should be necessary only very rarely. Examples ======== >>> from sympy import Symbol, sqrt, exp, pi >>> from sympy.stats import ContinuousRV, P, E >>> x = Symbol("x") >>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution >>> X = ContinuousRV(x, pdf) >>> E(X) 0 >>> P(X>0) 1/2 """ pdf = Piecewise((density, set.as_relational(symbol)), (0, True)) pdf = Lambda(symbol, pdf) dist = ContinuousDistributionHandmade(pdf, set) return SingleContinuousPSpace(symbol, dist).value def rv(symbol, cls, args): args = list(map(sympify, args)) dist = cls(*args) dist.check(*args) pspace = SingleContinuousPSpace(symbol, dist) if any(isinstance(arg, RandomSymbol) for arg in args): pspace = JointPSpace(symbol, CompoundDistribution(dist)) return pspace.value ######################################## # Continuous Probability Distributions # ######################################## #------------------------------------------------------------------------------- # Arcsin distribution ---------------------------------------------------------- class ArcsinDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') @property def set(self): return Interval(self.a, self.b) def pdf(self, x): a, b = self.a, self.b return 1/(pi*sqrt((x - a)*(b - x))) def _cdf(self, x): a, b = self.a, self.b return Piecewise( (S.Zero, x < a), (2*asin(sqrt((x - a)/(b - a)))/pi, x <= b), (S.One, True)) def Arcsin(name, a=0, b=1): r""" Create a Continuous Random Variable with an arcsin distribution. The density of the arcsin distribution is given by .. math:: f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}} with :math:`x \in (a,b)`. It must hold that :math:`-\infty < a < b < \infty`. Parameters ========== a : Real number, the left interval boundary b : Real number, the right interval boundary Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Arcsin, density, cdf >>> from sympy import Symbol, simplify >>> a = Symbol("a", real=True) >>> b = Symbol("b", real=True) >>> z = Symbol("z") >>> X = Arcsin("x", a, b) >>> density(X)(z) 1/(pi*sqrt((-a + z)*(b - z))) >>> cdf(X)(z) Piecewise((0, a > z), (2*asin(sqrt((-a + z)/(-a + b)))/pi, b >= z), (1, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Arcsine_distribution """ return rv(name, ArcsinDistribution, (a, b)) #------------------------------------------------------------------------------- # Benini distribution ---------------------------------------------------------- class BeniniDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta', 'sigma') @staticmethod def check(alpha, beta, sigma): _value_check(alpha > 0, "Shape parameter Alpha must be positive.") _value_check(beta > 0, "Shape parameter Beta must be positive.") _value_check(sigma > 0, "Scale parameter Sigma must be positive.") @property def set(self): return Interval(self.sigma, oo) def pdf(self, x): alpha, beta, sigma = self.alpha, self.beta, self.sigma return (exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2) *(alpha/x + 2*beta*log(x/sigma)/x)) def _moment_generating_function(self, t): raise NotImplementedError('The moment generating function of the ' 'Benini distribution does not exist.') def Benini(name, alpha, beta, sigma): r""" Create a Continuous Random Variable with a Benini distribution. The density of the Benini distribution is given by .. math:: f(x) := e^{-\alpha\log{\frac{x}{\sigma}} -\beta\log^2\left[{\frac{x}{\sigma}}\right]} \left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right) This is a heavy-tailed distribution and is also known as the log-Rayleigh distribution. Parameters ========== alpha : Real number, `\alpha > 0`, a shape beta : Real number, `\beta > 0`, a shape sigma : Real number, `\sigma > 0`, a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Benini, density, cdf >>> from sympy import Symbol, simplify, pprint >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> X = Benini("x", alpha, beta, sigma) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) / / z \\ / z \ 2/ z \ | 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----| |alpha \sigma/| \sigma/ \sigma/ |----- + -----------------|*e \ z z / >>> cdf(X)(z) Piecewise((1 - exp(-alpha*log(z/sigma) - beta*log(z/sigma)**2), sigma <= z), (0, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Benini_distribution .. [2] http://reference.wolfram.com/legacy/v8/ref/BeniniDistribution.html """ return rv(name, BeniniDistribution, (alpha, beta, sigma)) #------------------------------------------------------------------------------- # Beta distribution ------------------------------------------------------------ class BetaDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta') set = Interval(0, 1) @staticmethod def check(alpha, beta): _value_check(alpha > 0, "Shape parameter Alpha must be positive.") _value_check(beta > 0, "Shape parameter Beta must be positive.") def pdf(self, x): alpha, beta = self.alpha, self.beta return x**(alpha - 1) * (1 - x)**(beta - 1) / beta_fn(alpha, beta) def sample(self, size=()): if not size: return random.betavariate(self.alpha, self.beta) else: return [random.betavariate(self.alpha, self.beta)]*size def _characteristic_function(self, t): return hyper((self.alpha,), (self.alpha + self.beta,), I*t) def _moment_generating_function(self, t): return hyper((self.alpha,), (self.alpha + self.beta,), t) def Beta(name, alpha, beta): r""" Create a Continuous Random Variable with a Beta distribution. The density of the Beta distribution is given by .. math:: f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)} with :math:`x \in [0,1]`. Parameters ========== alpha : Real number, `\alpha > 0`, a shape beta : Real number, `\beta > 0`, a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Beta, density, E, variance >>> from sympy import Symbol, simplify, pprint, factor >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> z = Symbol("z") >>> X = Beta("x", alpha, beta) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) alpha - 1 beta - 1 z *(1 - z) -------------------------- B(alpha, beta) >>> simplify(E(X)) alpha/(alpha + beta) >>> factor(simplify(variance(X))) alpha*beta/((alpha + beta)**2*(alpha + beta + 1)) References ========== .. [1] https://en.wikipedia.org/wiki/Beta_distribution .. [2] http://mathworld.wolfram.com/BetaDistribution.html """ return rv(name, BetaDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Noncentral Beta distribution ------------------------------------------------------------ class BetaNoncentralDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta', 'lamda') set = Interval(0, 1) @staticmethod def check(alpha, beta, lamda): _value_check(alpha > 0, "Shape parameter Alpha must be positive.") _value_check(beta > 0, "Shape parameter Beta must be positive.") _value_check(lamda >= 0, "Noncentrality parameter Lambda must be positive") def pdf(self, x): alpha, beta, lamda = self.alpha, self.beta, self.lamda k = Dummy("k") return Sum(exp(-lamda / 2) * (lamda / 2)**k * x**(alpha + k - 1) *( 1 - x)**(beta - 1) / (factorial(k) * beta_fn(alpha + k, beta)), (k, 0, oo)) def BetaNoncentral(name, alpha, beta, lamda): r""" Create a Continuous Random Variable with a Type I Noncentral Beta distribution. The density of the Noncentral Beta distribution is given by .. math:: f(x) := \sum_{k=0}^\infty e^{-\lambda/2}\frac{(\lambda/2)^k}{k!} \frac{x^{\alpha+k-1}(1-x)^{\beta-1}}{\mathrm{B}(\alpha+k,\beta)} with :math:`x \in [0,1]`. Parameters ========== alpha : Real number, `\alpha > 0`, a shape beta : Real number, `\beta > 0`, a shape lamda: Real number, `\lambda >= 0`, noncentrality parameter Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import BetaNoncentral, density, cdf >>> from sympy import Symbol, pprint >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> lamda = Symbol("lamda", nonnegative=True) >>> z = Symbol("z") >>> X = BetaNoncentral("x", alpha, beta, lamda) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) oo _____ \ ` \ -lamda \ k ------- \ k + alpha - 1 /lamda\ beta - 1 2 ) z *|-----| *(1 - z) *e / \ 2 / / ------------------------------------------------ / B(k + alpha, beta)*k! /____, k = 0 Compute cdf with specific 'x', 'alpha', 'beta' and 'lamda' values as follows : >>> cdf(BetaNoncentral("x", 1, 1, 1), evaluate=False)(2).doit() 2*exp(1/2) The argument evaluate=False prevents an attempt at evaluation of the sum for general x, before the argument 2 is passed. References ========== .. [1] https://en.wikipedia.org/wiki/Noncentral_beta_distribution .. [2] https://reference.wolfram.com/language/ref/NoncentralBetaDistribution.html """ return rv(name, BetaNoncentralDistribution, (alpha, beta, lamda)) #------------------------------------------------------------------------------- # Beta prime distribution ------------------------------------------------------ class BetaPrimeDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta') @staticmethod def check(alpha, beta): _value_check(alpha > 0, "Shape parameter Alpha must be positive.") _value_check(beta > 0, "Shape parameter Beta must be positive.") set = Interval(0, oo) def pdf(self, x): alpha, beta = self.alpha, self.beta return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta) def BetaPrime(name, alpha, beta): r""" Create a continuous random variable with a Beta prime distribution. The density of the Beta prime distribution is given by .. math:: f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)} with :math:`x > 0`. Parameters ========== alpha : Real number, `\alpha > 0`, a shape beta : Real number, `\beta > 0`, a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import BetaPrime, density >>> from sympy import Symbol, pprint >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> z = Symbol("z") >>> X = BetaPrime("x", alpha, beta) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) alpha - 1 -alpha - beta z *(z + 1) ------------------------------- B(alpha, beta) References ========== .. [1] https://en.wikipedia.org/wiki/Beta_prime_distribution .. [2] http://mathworld.wolfram.com/BetaPrimeDistribution.html """ return rv(name, BetaPrimeDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Cauchy distribution ---------------------------------------------------------- class CauchyDistribution(SingleContinuousDistribution): _argnames = ('x0', 'gamma') @staticmethod def check(x0, gamma): _value_check(gamma > 0, "Scale parameter Gamma must be positive.") _value_check(x0.is_real, "Location parameter must be real.") def pdf(self, x): return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2)) def _cdf(self, x): x0, gamma = self.x0, self.gamma return (1/pi)*atan((x - x0)/gamma) + S.Half def _characteristic_function(self, t): return exp(self.x0 * I * t - self.gamma * Abs(t)) def _moment_generating_function(self, t): raise NotImplementedError("The moment generating function for the " "Cauchy distribution does not exist.") def _quantile(self, p): return self.x0 + self.gamma*tan(pi*(p - S.Half)) def Cauchy(name, x0, gamma): r""" Create a continuous random variable with a Cauchy distribution. The density of the Cauchy distribution is given by .. math:: f(x) := \frac{1}{\pi \gamma [1 + {(\frac{x-x_0}{\gamma})}^2]} Parameters ========== x0 : Real number, the location gamma : Real number, `\gamma > 0`, a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Cauchy, density >>> from sympy import Symbol >>> x0 = Symbol("x0") >>> gamma = Symbol("gamma", positive=True) >>> z = Symbol("z") >>> X = Cauchy("x", x0, gamma) >>> density(X)(z) 1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2)) References ========== .. [1] https://en.wikipedia.org/wiki/Cauchy_distribution .. [2] http://mathworld.wolfram.com/CauchyDistribution.html """ return rv(name, CauchyDistribution, (x0, gamma)) #------------------------------------------------------------------------------- # Chi distribution ------------------------------------------------------------- class ChiDistribution(SingleContinuousDistribution): _argnames = ('k',) @staticmethod def check(k): _value_check(k > 0, "Number of degrees of freedom (k) must be positive.") _value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.") set = Interval(0, oo) def pdf(self, x): return 2**(1 - self.k/2)*x**(self.k - 1)*exp(-x**2/2)/gamma(self.k/2) def _characteristic_function(self, t): k = self.k part_1 = hyper((k/2,), (S.Half,), -t**2/2) part_2 = I*t*sqrt(2)*gamma((k+1)/2)/gamma(k/2) part_3 = hyper(((k+1)/2,), (Rational(3, 2),), -t**2/2) return part_1 + part_2*part_3 def _moment_generating_function(self, t): k = self.k part_1 = hyper((k / 2,), (S.Half,), t ** 2 / 2) part_2 = t * sqrt(2) * gamma((k + 1) / 2) / gamma(k / 2) part_3 = hyper(((k + 1) / 2,), (S(3) / 2,), t ** 2 / 2) return part_1 + part_2 * part_3 def Chi(name, k): r""" Create a continuous random variable with a Chi distribution. The density of the Chi distribution is given by .. math:: f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)} with :math:`x \geq 0`. Parameters ========== k : Positive integer, The number of degrees of freedom Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Chi, density, E >>> from sympy import Symbol, simplify >>> k = Symbol("k", integer=True) >>> z = Symbol("z") >>> X = Chi("x", k) >>> density(X)(z) 2**(1 - k/2)*z**(k - 1)*exp(-z**2/2)/gamma(k/2) >>> simplify(E(X)) sqrt(2)*gamma(k/2 + 1/2)/gamma(k/2) References ========== .. [1] https://en.wikipedia.org/wiki/Chi_distribution .. [2] http://mathworld.wolfram.com/ChiDistribution.html """ return rv(name, ChiDistribution, (k,)) #------------------------------------------------------------------------------- # Non-central Chi distribution ------------------------------------------------- class ChiNoncentralDistribution(SingleContinuousDistribution): _argnames = ('k', 'l') @staticmethod def check(k, l): _value_check(k > 0, "Number of degrees of freedom (k) must be positive.") _value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.") _value_check(l > 0, "Shift parameter Lambda must be positive.") set = Interval(0, oo) def pdf(self, x): k, l = self.k, self.l return exp(-(x**2+l**2)/2)*x**k*l / (l*x)**(k/2) * besseli(k/2-1, l*x) def ChiNoncentral(name, k, l): r""" Create a continuous random variable with a non-central Chi distribution. The density of the non-central Chi distribution is given by .. math:: f(x) := \frac{e^{-(x^2+\lambda^2)/2} x^k\lambda} {(\lambda x)^{k/2}} I_{k/2-1}(\lambda x) with `x \geq 0`. Here, `I_\nu (x)` is the :ref:`modified Bessel function of the first kind <besseli>`. Parameters ========== k : A positive Integer, `k > 0`, the number of degrees of freedom lambda : Real number, `\lambda > 0`, Shift parameter Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import ChiNoncentral, density >>> from sympy import Symbol >>> k = Symbol("k", integer=True) >>> l = Symbol("l") >>> z = Symbol("z") >>> X = ChiNoncentral("x", k, l) >>> density(X)(z) l*z**k*(l*z)**(-k/2)*exp(-l**2/2 - z**2/2)*besseli(k/2 - 1, l*z) References ========== .. [1] https://en.wikipedia.org/wiki/Noncentral_chi_distribution """ return rv(name, ChiNoncentralDistribution, (k, l)) #------------------------------------------------------------------------------- # Chi squared distribution ----------------------------------------------------- class ChiSquaredDistribution(SingleContinuousDistribution): _argnames = ('k',) @staticmethod def check(k): _value_check(k > 0, "Number of degrees of freedom (k) must be positive.") _value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.") set = Interval(0, oo) def pdf(self, x): k = self.k return 1/(2**(k/2)*gamma(k/2))*x**(k/2 - 1)*exp(-x/2) def _cdf(self, x): k = self.k return Piecewise( (S.One/gamma(k/2)*lowergamma(k/2, x/2), x >= 0), (0, True) ) def _characteristic_function(self, t): return (1 - 2*I*t)**(-self.k/2) def _moment_generating_function(self, t): return (1 - 2*t)**(-self.k/2) def ChiSquared(name, k): r""" Create a continuous random variable with a Chi-squared distribution. The density of the Chi-squared distribution is given by .. math:: f(x) := \frac{1}{2^{\frac{k}{2}}\Gamma\left(\frac{k}{2}\right)} x^{\frac{k}{2}-1} e^{-\frac{x}{2}} with :math:`x \geq 0`. Parameters ========== k : Positive integer, The number of degrees of freedom Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import ChiSquared, density, E, variance, moment >>> from sympy import Symbol >>> k = Symbol("k", integer=True, positive=True) >>> z = Symbol("z") >>> X = ChiSquared("x", k) >>> density(X)(z) 2**(-k/2)*z**(k/2 - 1)*exp(-z/2)/gamma(k/2) >>> E(X) k >>> variance(X) 2*k >>> moment(X, 3) k**3 + 6*k**2 + 8*k References ========== .. [1] https://en.wikipedia.org/wiki/Chi_squared_distribution .. [2] http://mathworld.wolfram.com/Chi-SquaredDistribution.html """ return rv(name, ChiSquaredDistribution, (k, )) #------------------------------------------------------------------------------- # Dagum distribution ----------------------------------------------------------- class DagumDistribution(SingleContinuousDistribution): _argnames = ('p', 'a', 'b') set = Interval(0, oo) @staticmethod def check(p, a, b): _value_check(p > 0, "Shape parameter p must be positive.") _value_check(a > 0, "Shape parameter a must be positive.") _value_check(b > 0, "Scale parameter b must be positive.") def pdf(self, x): p, a, b = self.p, self.a, self.b return a*p/x*((x/b)**(a*p)/(((x/b)**a + 1)**(p + 1))) def _cdf(self, x): p, a, b = self.p, self.a, self.b return Piecewise(((S.One + (S(x)/b)**-a)**-p, x>=0), (S.Zero, True)) def Dagum(name, p, a, b): r""" Create a continuous random variable with a Dagum distribution. The density of the Dagum distribution is given by .. math:: f(x) := \frac{a p}{x} \left( \frac{\left(\tfrac{x}{b}\right)^{a p}} {\left(\left(\tfrac{x}{b}\right)^a + 1 \right)^{p+1}} \right) with :math:`x > 0`. Parameters ========== p : Real number, `p > 0`, a shape a : Real number, `a > 0`, a shape b : Real number, `b > 0`, a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Dagum, density, cdf >>> from sympy import Symbol >>> p = Symbol("p", positive=True) >>> a = Symbol("a", positive=True) >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = Dagum("x", p, a, b) >>> density(X)(z) a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z >>> cdf(X)(z) Piecewise(((1 + (z/b)**(-a))**(-p), z >= 0), (0, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Dagum_distribution """ return rv(name, DagumDistribution, (p, a, b)) #------------------------------------------------------------------------------- # Erlang distribution ---------------------------------------------------------- def Erlang(name, k, l): r""" Create a continuous random variable with an Erlang distribution. The density of the Erlang distribution is given by .. math:: f(x) := \frac{\lambda^k x^{k-1} e^{-\lambda x}}{(k-1)!} with :math:`x \in [0,\infty]`. Parameters ========== k : Positive integer l : Real number, `\lambda > 0`, the rate Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Erlang, density, cdf, E, variance >>> from sympy import Symbol, simplify, pprint >>> k = Symbol("k", integer=True, positive=True) >>> l = Symbol("l", positive=True) >>> z = Symbol("z") >>> X = Erlang("x", k, l) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) k k - 1 -l*z l *z *e --------------- Gamma(k) >>> C = cdf(X)(z) >>> pprint(C, use_unicode=False) /lowergamma(k, l*z) |------------------ for z > 0 < Gamma(k) | \ 0 otherwise >>> E(X) k/l >>> simplify(variance(X)) k/l**2 References ========== .. [1] https://en.wikipedia.org/wiki/Erlang_distribution .. [2] http://mathworld.wolfram.com/ErlangDistribution.html """ return rv(name, GammaDistribution, (k, S.One/l)) # ------------------------------------------------------------------------------- # ExGaussian distribution ----------------------------------------------------- class ExGaussianDistribution(SingleContinuousDistribution): _argnames = ('mean', 'std', 'rate') set = Interval(-oo, oo) @staticmethod def check(mean, std, rate): _value_check( std > 0, "Standard deviation of ExGaussian must be positive.") _value_check(rate > 0, "Rate of ExGaussian must be positive.") def pdf(self, x): mean, std, rate = self.mean, self.std, self.rate term1 = rate/2 term2 = exp(rate * (2 * mean + rate * std**2 - 2*x)/2) term3 = erfc((mean + rate*std**2 - x)/(sqrt(2)*std)) return term1*term2*term3 def _cdf(self, x): from sympy.stats import cdf mean, std, rate = self.mean, self.std, self.rate u = rate*(x - mean) v = rate*std GaussianCDF1 = cdf(Normal('x', 0, v))(u) GaussianCDF2 = cdf(Normal('x', v**2, v))(u) return GaussianCDF1 - exp(-u + (v**2/2) + log(GaussianCDF2)) def _characteristic_function(self, t): mean, std, rate = self.mean, self.std, self.rate term1 = (1 - I*t/rate)**(-1) term2 = exp(I*mean*t - std**2*t**2/2) return term1 * term2 def _moment_generating_function(self, t): mean, std, rate = self.mean, self.std, self.rate term1 = (1 - t/rate)**(-1) term2 = exp(mean*t + std**2*t**2/2) return term1*term2 def ExGaussian(name, mean, std, rate): r""" Create a continuous random variable with an Exponentially modified Gaussian (EMG) distribution. The density of the exponentially modified Gaussian distribution is given by .. math:: f(x) := \frac{\lambda}{2}e^{\frac{\lambda}{2}(2\mu+\lambda\sigma^2-2x)} \text{erfc}(\frac{\mu + \lambda\sigma^2 - x}{\sqrt{2}\sigma}) with `x > 0`. Note that the expected value is `1/\lambda`. Parameters ========== mu : A Real number, the mean of Gaussian component std: A positive Real number, :math: `\sigma^2 > 0` the variance of Gaussian component lambda: A positive Real number, :math: `\lambda > 0` the rate of Exponential component Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import ExGaussian, density, cdf, E >>> from sympy.stats import variance, skewness >>> from sympy import Symbol, pprint, simplify >>> mean = Symbol("mu") >>> std = Symbol("sigma", positive=True) >>> rate = Symbol("lamda", positive=True) >>> z = Symbol("z") >>> X = ExGaussian("x", mean, std, rate) >>> pprint(density(X)(z), use_unicode=False) / 2 \ lamda*\lamda*sigma + 2*mu - 2*z/ --------------------------------- / ___ / 2 \\ 2 |\/ 2 *\lamda*sigma + mu - z/| lamda*e *erfc|-----------------------------| \ 2*sigma / ---------------------------------------------------------------------------- 2 >>> cdf(X)(z) -(erf(sqrt(2)*(-lamda**2*sigma**2 + lamda*(-mu + z))/(2*lamda*sigma))/2 + 1/2)*exp(lamda**2*sigma**2/2 - lamda*(-mu + z)) + erf(sqrt(2)*(-mu + z)/(2*sigma))/2 + 1/2 >>> E(X) (lamda*mu + 1)/lamda >>> simplify(variance(X)) sigma**2 + lamda**(-2) >>> simplify(skewness(X)) 2/(lamda**2*sigma**2 + 1)**(3/2) References ========== .. [1] https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution """ return rv(name, ExGaussianDistribution, (mean, std, rate)) #------------------------------------------------------------------------------- # Exponential distribution ----------------------------------------------------- class ExponentialDistribution(SingleContinuousDistribution): _argnames = ('rate',) set = Interval(0, oo) @staticmethod def check(rate): _value_check(rate > 0, "Rate must be positive.") def pdf(self, x): return self.rate * exp(-self.rate*x) def sample(self, size=()): if not size: return random.expovariate(self.rate) else: return [random.expovariate(self.rate)]*size def _cdf(self, x): return Piecewise( (S.One - exp(-self.rate*x), x >= 0), (0, True), ) def _characteristic_function(self, t): rate = self.rate return rate / (rate - I*t) def _moment_generating_function(self, t): rate = self.rate return rate / (rate - t) def _quantile(self, p): return -log(1-p)/self.rate def Exponential(name, rate): r""" Create a continuous random variable with an Exponential distribution. The density of the exponential distribution is given by .. math:: f(x) := \lambda \exp(-\lambda x) with `x > 0`. Note that the expected value is `1/\lambda`. Parameters ========== rate : A positive Real number, `\lambda > 0`, the rate (or inverse scale/inverse mean) Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Exponential, density, cdf, E >>> from sympy.stats import variance, std, skewness, quantile >>> from sympy import Symbol >>> l = Symbol("lambda", positive=True) >>> z = Symbol("z") >>> p = Symbol("p") >>> X = Exponential("x", l) >>> density(X)(z) lambda*exp(-lambda*z) >>> cdf(X)(z) Piecewise((1 - exp(-lambda*z), z >= 0), (0, True)) >>> quantile(X)(p) -log(1 - p)/lambda >>> E(X) 1/lambda >>> variance(X) lambda**(-2) >>> skewness(X) 2 >>> X = Exponential('x', 10) >>> density(X)(z) 10*exp(-10*z) >>> E(X) 1/10 >>> std(X) 1/10 References ========== .. [1] https://en.wikipedia.org/wiki/Exponential_distribution .. [2] http://mathworld.wolfram.com/ExponentialDistribution.html """ return rv(name, ExponentialDistribution, (rate, )) # ------------------------------------------------------------------------------- # Exponential Power distribution ----------------------------------------------------- class ExponentialPowerDistribution(SingleContinuousDistribution): _argnames = ('mu', 'alpha', 'beta') set = Interval(-oo, oo) @staticmethod def check(mu, alpha, beta): _value_check(alpha > 0, "Scale parameter alpha must be positive.") _value_check(beta > 0, "Shape parameter beta must be positive.") def pdf(self, x): mu, alpha, beta = self.mu, self.alpha, self.beta num = beta*exp(-(Abs(x - mu)/alpha)**beta) den = 2*alpha*gamma(1/beta) return num/den def _cdf(self, x): mu, alpha, beta = self.mu, self.alpha, self.beta num = lowergamma(1/beta, (Abs(x - mu) / alpha)**beta) den = 2*gamma(1/beta) return sign(x - mu)*num/den + S.Half def ExponentialPower(name, mu, alpha, beta): r""" Create a Continuous Random Variable with Exponential Power distribution. This distribution is known also as Generalized Normal distribution version 1 The density of the Exponential Power distribution is given by .. math:: f(x) := \frac{\beta}{2\alpha\Gamma(\frac{1}{\beta})} e^{{-(\frac{|x - \mu|}{\alpha})^{\beta}}} with :math:`x \in [ - \infty, \infty ]`. Parameters ========== mu : Real number, 'mu' is a location alpha : Real number, 'alpha > 0' is a scale beta : Real number, 'beta > 0' is a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import ExponentialPower, density, E, variance, cdf >>> from sympy import Symbol, simplify, pprint >>> z = Symbol("z") >>> mu = Symbol("mu") >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> X = ExponentialPower("x", mu, alpha, beta) >>> pprint(density(X)(z), use_unicode=False) beta /|mu - z|\ -|--------| \ alpha / beta*e --------------------- / 1 \ 2*alpha*Gamma|----| \beta/ >>> cdf(X)(z) 1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta)) References ========== .. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html .. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1 """ return rv(name, ExponentialPowerDistribution, (mu, alpha, beta)) #------------------------------------------------------------------------------- # F distribution --------------------------------------------------------------- class FDistributionDistribution(SingleContinuousDistribution): _argnames = ('d1', 'd2') set = Interval(0, oo) @staticmethod def check(d1, d2): _value_check((d1 > 0, d1.is_integer), "Degrees of freedom d1 must be positive integer.") _value_check((d2 > 0, d2.is_integer), "Degrees of freedom d2 must be positive integer.") def pdf(self, x): d1, d2 = self.d1, self.d2 return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2)) / (x * beta_fn(d1/2, d2/2))) def _moment_generating_function(self, t): raise NotImplementedError('The moment generating function for the ' 'F-distribution does not exist.') def FDistribution(name, d1, d2): r""" Create a continuous random variable with a F distribution. The density of the F distribution is given by .. math:: f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}} {(d_1 x + d_2)^{d_1 + d_2}}}} {x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)} with :math:`x > 0`. Parameters ========== d1 : `d_1 > 0`, where d_1 is the degrees of freedom (n_1 - 1) d2 : `d_2 > 0`, where d_2 is the degrees of freedom (n_2 - 1) Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import FDistribution, density >>> from sympy import Symbol, simplify, pprint >>> d1 = Symbol("d1", positive=True) >>> d2 = Symbol("d2", positive=True) >>> z = Symbol("z") >>> X = FDistribution("x", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d2 -- ______________________________ 2 / d1 -d1 - d2 d2 *\/ (d1*z) *(d1*z + d2) -------------------------------------- /d1 d2\ z*B|--, --| \2 2 / References ========== .. [1] https://en.wikipedia.org/wiki/F-distribution .. [2] http://mathworld.wolfram.com/F-Distribution.html """ return rv(name, FDistributionDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Fisher Z distribution -------------------------------------------------------- class FisherZDistribution(SingleContinuousDistribution): _argnames = ('d1', 'd2') set = Interval(-oo, oo) @staticmethod def check(d1, d2): _value_check(d1 > 0, "Degree of freedom d1 must be positive.") _value_check(d2 > 0, "Degree of freedom d2 must be positive.") def pdf(self, x): d1, d2 = self.d1, self.d2 return (2*d1**(d1/2)*d2**(d2/2) / beta_fn(d1/2, d2/2) * exp(d1*x) / (d1*exp(2*x)+d2)**((d1+d2)/2)) def FisherZ(name, d1, d2): r""" Create a Continuous Random Variable with an Fisher's Z distribution. The density of the Fisher's Z distribution is given by .. math:: f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)} \frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}} .. TODO - What is the difference between these degrees of freedom? Parameters ========== d1 : `d_1 > 0`, degree of freedom d2 : `d_2 > 0`, degree of freedom Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import FisherZ, density >>> from sympy import Symbol, simplify, pprint >>> d1 = Symbol("d1", positive=True) >>> d2 = Symbol("d2", positive=True) >>> z = Symbol("z") >>> X = FisherZ("x", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d1 d2 d1 d2 - -- - -- -- -- 2 2 2 2 / 2*z \ d1*z 2*d1 *d2 *\d1*e + d2/ *e ----------------------------------------- /d1 d2\ B|--, --| \2 2 / References ========== .. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html """ return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution --------------------------------------------------------- class FrechetDistribution(SingleContinuousDistribution): _argnames = ('a', 's', 'm') set = Interval(0, oo) @staticmethod def check(a, s, m): _value_check(a > 0, "Shape parameter alpha must be positive.") _value_check(s > 0, "Scale parameter s must be positive.") def __new__(cls, a, s=1, m=0): a, s, m = list(map(sympify, (a, s, m))) return Basic.__new__(cls, a, s, m) def pdf(self, x): a, s, m = self.a, self.s, self.m return a/s * ((x-m)/s)**(-1-a) * exp(-((x-m)/s)**(-a)) def _cdf(self, x): a, s, m = self.a, self.s, self.m return Piecewise((exp(-((x-m)/s)**(-a)), x >= m), (S.Zero, True)) def Frechet(name, a, s=1, m=0): r""" Create a continuous random variable with a Frechet distribution. The density of the Frechet distribution is given by .. math:: f(x) := \frac{\alpha}{s} \left(\frac{x-m}{s}\right)^{-1-\alpha} e^{-(\frac{x-m}{s})^{-\alpha}} with :math:`x \geq m`. Parameters ========== a : Real number, :math:`a \in \left(0, \infty\right)` the shape s : Real number, :math:`s \in \left(0, \infty\right)` the scale m : Real number, :math:`m \in \left(-\infty, \infty\right)` the minimum Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Frechet, density, E, std, cdf >>> from sympy import Symbol, simplify >>> a = Symbol("a", positive=True) >>> s = Symbol("s", positive=True) >>> m = Symbol("m", real=True) >>> z = Symbol("z") >>> X = Frechet("x", a, s, m) >>> density(X)(z) a*((-m + z)/s)**(-a - 1)*exp(-((-m + z)/s)**(-a))/s >>> cdf(X)(z) Piecewise((exp(-((-m + z)/s)**(-a)), m <= z), (0, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Fr%C3%A9chet_distribution """ return rv(name, FrechetDistribution, (a, s, m)) #------------------------------------------------------------------------------- # Gamma distribution ----------------------------------------------------------- class GammaDistribution(SingleContinuousDistribution): _argnames = ('k', 'theta') set = Interval(0, oo) @staticmethod def check(k, theta): _value_check(k > 0, "k must be positive") _value_check(theta > 0, "Theta must be positive") def pdf(self, x): k, theta = self.k, self.theta return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k) def sample(self, size=()): if not size: return random.gammavariate(self.k, self.theta) else: return [random.gammavariate(self.k, self.theta)]*size def _cdf(self, x): k, theta = self.k, self.theta return Piecewise( (lowergamma(k, S(x)/theta)/gamma(k), x > 0), (S.Zero, True)) def _characteristic_function(self, t): return (1 - self.theta*I*t)**(-self.k) def _moment_generating_function(self, t): return (1- self.theta*t)**(-self.k) def Gamma(name, k, theta): r""" Create a continuous random variable with a Gamma distribution. The density of the Gamma distribution is given by .. math:: f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}} with :math:`x \in [0,1]`. Parameters ========== k : Real number, `k > 0`, a shape theta : Real number, `\theta > 0`, a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Gamma, density, cdf, E, variance >>> from sympy import Symbol, pprint, simplify >>> k = Symbol("k", positive=True) >>> theta = Symbol("theta", positive=True) >>> z = Symbol("z") >>> X = Gamma("x", k, theta) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) -z ----- -k k - 1 theta theta *z *e --------------------- Gamma(k) >>> C = cdf(X, meijerg=True)(z) >>> pprint(C, use_unicode=False) / / z \ |k*lowergamma|k, -----| | \ theta/ <---------------------- for z >= 0 | Gamma(k + 1) | \ 0 otherwise >>> E(X) k*theta >>> V = simplify(variance(X)) >>> pprint(V, use_unicode=False) 2 k*theta References ========== .. [1] https://en.wikipedia.org/wiki/Gamma_distribution .. [2] http://mathworld.wolfram.com/GammaDistribution.html """ return rv(name, GammaDistribution, (k, theta)) #------------------------------------------------------------------------------- # Inverse Gamma distribution --------------------------------------------------- class GammaInverseDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') set = Interval(0, oo) @staticmethod def check(a, b): _value_check(a > 0, "alpha must be positive") _value_check(b > 0, "beta must be positive") def pdf(self, x): a, b = self.a, self.b return b**a/gamma(a) * x**(-a-1) * exp(-b/x) def _cdf(self, x): a, b = self.a, self.b return Piecewise((uppergamma(a,b/x)/gamma(a), x > 0), (S.Zero, True)) def sample(self, size=()): scipy = import_module('scipy') if scipy: from scipy.stats import invgamma return invgamma.rvs(float(self.a), 0, float(self.b), size=size) else: raise NotImplementedError('Sampling the Inverse Gamma Distribution requires Scipy.') def _characteristic_function(self, t): a, b = self.a, self.b return 2 * (-I*b*t)**(a/2) * besselk(a, sqrt(-4*I*b*t)) / gamma(a) def _moment_generating_function(self, t): raise NotImplementedError('The moment generating function for the ' 'gamma inverse distribution does not exist.') def GammaInverse(name, a, b): r""" Create a continuous random variable with an inverse Gamma distribution. The density of the inverse Gamma distribution is given by .. math:: f(x) := \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1} \exp\left(\frac{-\beta}{x}\right) with :math:`x > 0`. Parameters ========== a : Real number, `a > 0` a shape b : Real number, `b > 0` a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import GammaInverse, density, cdf, E, variance >>> from sympy import Symbol, pprint >>> a = Symbol("a", positive=True) >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = GammaInverse("x", a, b) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) -b --- a -a - 1 z b *z *e --------------- Gamma(a) >>> cdf(X)(z) Piecewise((uppergamma(a, b/z)/gamma(a), z > 0), (0, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Inverse-gamma_distribution """ return rv(name, GammaInverseDistribution, (a, b)) #------------------------------------------------------------------------------- # Gumbel distribution (Maximum and Minimum) -------------------------------------------------------- class GumbelDistribution(SingleContinuousDistribution): _argnames = ('beta', 'mu', 'minimum') set = Interval(-oo, oo) @staticmethod def check(beta, mu, minimum): _value_check(beta > 0, "Scale parameter beta must be positive.") def pdf(self, x): beta, mu = self.beta, self.mu z = (x - mu)/beta f_max = (1/beta)*exp(-z - exp(-z)) f_min = (1/beta)*exp(z - exp(z)) return Piecewise((f_min, self.minimum), (f_max, not self.minimum)) def _cdf(self, x): beta, mu = self.beta, self.mu z = (x - mu)/beta F_max = exp(-exp(-z)) F_min = 1 - exp(-exp(z)) return Piecewise((F_min, self.minimum), (F_max, not self.minimum)) def _characteristic_function(self, t): cf_max = gamma(1 - I*self.beta*t) * exp(I*self.mu*t) cf_min = gamma(1 + I*self.beta*t) * exp(I*self.mu*t) return Piecewise((cf_min, self.minimum), (cf_max, not self.minimum)) def _moment_generating_function(self, t): mgf_max = gamma(1 - self.beta*t) * exp(self.mu*t) mgf_min = gamma(1 + self.beta*t) * exp(self.mu*t) return Piecewise((mgf_min, self.minimum), (mgf_max, not self.minimum)) def Gumbel(name, beta, mu, minimum=False): r""" Create a Continuous Random Variable with Gumbel distribution. The density of the Gumbel distribution is given by For Maximum .. math:: f(x) := \dfrac{1}{\beta} \exp \left( -\dfrac{x-\mu}{\beta} - \exp \left( -\dfrac{x - \mu}{\beta} \right) \right) with :math:`x \in [ - \infty, \infty ]`. For Minimum .. math:: f(x) := \frac{e^{- e^{\frac{- \mu + x}{\beta}} + \frac{- \mu + x}{\beta}}}{\beta} with :math:`x \in [ - \infty, \infty ]`. Parameters ========== mu : Real number, 'mu' is a location beta : Real number, 'beta > 0' is a scale minimum : Boolean, by default, False, set to True for enabling minimum distribution Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Gumbel, density, E, variance, cdf >>> from sympy import Symbol, simplify, pprint >>> x = Symbol("x") >>> mu = Symbol("mu") >>> beta = Symbol("beta", positive=True) >>> X = Gumbel("x", beta, mu) >>> density(X)(x) exp(-exp(-(-mu + x)/beta) - (-mu + x)/beta)/beta >>> cdf(X)(x) exp(-exp(-(-mu + x)/beta)) References ========== .. [1] http://mathworld.wolfram.com/GumbelDistribution.html .. [2] https://en.wikipedia.org/wiki/Gumbel_distribution .. [3] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_max.html .. [4] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_min.html """ return rv(name, GumbelDistribution, (beta, mu, minimum)) #------------------------------------------------------------------------------- # Gompertz distribution -------------------------------------------------------- class GompertzDistribution(SingleContinuousDistribution): _argnames = ('b', 'eta') set = Interval(0, oo) @staticmethod def check(b, eta): _value_check(b > 0, "b must be positive") _value_check(eta > 0, "eta must be positive") def pdf(self, x): eta, b = self.eta, self.b return b*eta*exp(b*x)*exp(eta)*exp(-eta*exp(b*x)) def _cdf(self, x): eta, b = self.eta, self.b return 1 - exp(eta)*exp(-eta*exp(b*x)) def _moment_generating_function(self, t): eta, b = self.eta, self.b return eta * exp(eta) * expint(t/b, eta) def Gompertz(name, b, eta): r""" Create a Continuous Random Variable with Gompertz distribution. The density of the Gompertz distribution is given by .. math:: f(x) := b \eta e^{b x} e^{\eta} \exp \left(-\eta e^{bx} \right) with :math: 'x \in [0, \inf)'. Parameters ========== b: Real number, 'b > 0' a scale eta: Real number, 'eta > 0' a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Gompertz, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> b = Symbol("b", positive=True) >>> eta = Symbol("eta", positive=True) >>> z = Symbol("z") >>> X = Gompertz("x", b, eta) >>> density(X)(z) b*eta*exp(eta)*exp(b*z)*exp(-eta*exp(b*z)) References ========== .. [1] https://en.wikipedia.org/wiki/Gompertz_distribution """ return rv(name, GompertzDistribution, (b, eta)) #------------------------------------------------------------------------------- # Kumaraswamy distribution ----------------------------------------------------- class KumaraswamyDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') set = Interval(0, oo) @staticmethod def check(a, b): _value_check(a > 0, "a must be positive") _value_check(b > 0, "b must be positive") def pdf(self, x): a, b = self.a, self.b return a * b * x**(a-1) * (1-x**a)**(b-1) def _cdf(self, x): a, b = self.a, self.b return Piecewise( (S.Zero, x < S.Zero), (1 - (1 - x**a)**b, x <= S.One), (S.One, True)) def Kumaraswamy(name, a, b): r""" Create a Continuous Random Variable with a Kumaraswamy distribution. The density of the Kumaraswamy distribution is given by .. math:: f(x) := a b x^{a-1} (1-x^a)^{b-1} with :math:`x \in [0,1]`. Parameters ========== a : Real number, `a > 0` a shape b : Real number, `b > 0` a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Kumaraswamy, density, E, variance, cdf >>> from sympy import Symbol, simplify, pprint >>> a = Symbol("a", positive=True) >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = Kumaraswamy("x", a, b) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) b - 1 a - 1 / a\ a*b*z *\1 - z / >>> cdf(X)(z) Piecewise((0, z < 0), (1 - (1 - z**a)**b, z <= 1), (1, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Kumaraswamy_distribution """ return rv(name, KumaraswamyDistribution, (a, b)) #------------------------------------------------------------------------------- # Laplace distribution --------------------------------------------------------- class LaplaceDistribution(SingleContinuousDistribution): _argnames = ('mu', 'b') set = Interval(-oo, oo) @staticmethod def check(mu, b): _value_check(b > 0, "Scale parameter b must be positive.") _value_check(mu.is_real, "Location parameter mu should be real") def pdf(self, x): mu, b = self.mu, self.b return 1/(2*b)*exp(-Abs(x - mu)/b) def _cdf(self, x): mu, b = self.mu, self.b return Piecewise( (S.Half*exp((x - mu)/b), x < mu), (S.One - S.Half*exp(-(x - mu)/b), x >= mu) ) def _characteristic_function(self, t): return exp(self.mu*I*t) / (1 + self.b**2*t**2) def _moment_generating_function(self, t): return exp(self.mu*t) / (1 - self.b**2*t**2) def Laplace(name, mu, b): r""" Create a continuous random variable with a Laplace distribution. The density of the Laplace distribution is given by .. math:: f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right) Parameters ========== mu : Real number or a list/matrix, the location (mean) or the location vector b : Real number or a positive definite matrix, representing a scale or the covariance matrix. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Laplace, density, cdf >>> from sympy import Symbol, pprint >>> mu = Symbol("mu") >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = Laplace("x", mu, b) >>> density(X)(z) exp(-Abs(mu - z)/b)/(2*b) >>> cdf(X)(z) Piecewise((exp((-mu + z)/b)/2, mu > z), (1 - exp((mu - z)/b)/2, True)) >>> L = Laplace('L', [1, 2], [[1, 0], [0, 1]]) >>> pprint(density(L)(1, 2), use_unicode=False) 5 / ____\ e *besselk\0, \/ 35 / --------------------- pi References ========== .. [1] https://en.wikipedia.org/wiki/Laplace_distribution .. [2] http://mathworld.wolfram.com/LaplaceDistribution.html """ if isinstance(mu, (list, MatrixBase)) and\ isinstance(b, (list, MatrixBase)): from sympy.stats.joint_rv_types import MultivariateLaplaceDistribution return multivariate_rv( MultivariateLaplaceDistribution, name, mu, b) return rv(name, LaplaceDistribution, (mu, b)) #------------------------------------------------------------------------------- # Levy distribution --------------------------------------------------------- class LevyDistribution(SingleContinuousDistribution): _argnames = ('mu', 'c') @property def set(self): return Interval(self.mu, oo) @staticmethod def check(mu, c): _value_check(c > 0, "c (scale parameter) must be positive") _value_check(mu.is_real, "mu (location paramater) must be real") def pdf(self, x): mu, c = self.mu, self.c return sqrt(c/(2*pi))*exp(-c/(2*(x - mu)))/((x - mu)**(S.One + S.Half)) def _cdf(self, x): mu, c = self.mu, self.c return erfc(sqrt(c/(2*(x - mu)))) def _characteristic_function(self, t): mu, c = self.mu, self.c return exp(I * mu * t - sqrt(-2 * I * c * t)) def _moment_generating_function(self, t): raise NotImplementedError('The moment generating function of Levy distribution does not exist.') def Levy(name, mu, c): r""" Create a continuous random variable with a Levy distribution. The density of the Levy distribution is given by .. math:: f(x) := \sqrt(\frac{c}{2 \pi}) \frac{\exp -\frac{c}{2 (x - \mu)}}{(x - \mu)^{3/2}} Parameters ========== mu : Real number, the location parameter c : Real number, `c > 0`, a scale parameter Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Levy, density, cdf >>> from sympy import Symbol, pprint >>> mu = Symbol("mu", real=True) >>> c = Symbol("c", positive=True) >>> z = Symbol("z") >>> X = Levy("x", mu, c) >>> density(X)(z) sqrt(2)*sqrt(c)*exp(-c/(-2*mu + 2*z))/(2*sqrt(pi)*(-mu + z)**(3/2)) >>> cdf(X)(z) erfc(sqrt(c)*sqrt(1/(-2*mu + 2*z))) References ========== .. [1] https://en.wikipedia.org/wiki/L%C3%A9vy_distribution .. [2] http://mathworld.wolfram.com/LevyDistribution.html """ return rv(name, LevyDistribution, (mu, c)) #------------------------------------------------------------------------------- # Logistic distribution -------------------------------------------------------- class LogisticDistribution(SingleContinuousDistribution): _argnames = ('mu', 's') set = Interval(-oo, oo) @staticmethod def check(mu, s): _value_check(s > 0, "Scale parameter s must be positive.") def pdf(self, x): mu, s = self.mu, self.s return exp(-(x - mu)/s)/(s*(1 + exp(-(x - mu)/s))**2) def _cdf(self, x): mu, s = self.mu, self.s return S.One/(1 + exp(-(x - mu)/s)) def _characteristic_function(self, t): return Piecewise((exp(I*t*self.mu) * pi*self.s*t / sinh(pi*self.s*t), Ne(t, 0)), (S.One, True)) def _moment_generating_function(self, t): return exp(self.mu*t) * beta_fn(1 - self.s*t, 1 + self.s*t) def _quantile(self, p): return self.mu - self.s*log(-S.One + S.One/p) def Logistic(name, mu, s): r""" Create a continuous random variable with a logistic distribution. The density of the logistic distribution is given by .. math:: f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2} Parameters ========== mu : Real number, the location (mean) s : Real number, `s > 0` a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Logistic, density, cdf >>> from sympy import Symbol >>> mu = Symbol("mu", real=True) >>> s = Symbol("s", positive=True) >>> z = Symbol("z") >>> X = Logistic("x", mu, s) >>> density(X)(z) exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2) >>> cdf(X)(z) 1/(exp((mu - z)/s) + 1) References ========== .. [1] https://en.wikipedia.org/wiki/Logistic_distribution .. [2] http://mathworld.wolfram.com/LogisticDistribution.html """ return rv(name, LogisticDistribution, (mu, s)) #------------------------------------------------------------------------------- # Log-logistic distribution -------------------------------------------------------- class LogLogisticDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta') set = Interval(0, oo) @staticmethod def check(alpha, beta): _value_check(alpha > 0, "Scale parameter Alpha must be positive.") _value_check(beta > 0, "Shape parameter Beta must be positive.") def pdf(self, x): a, b = self.alpha, self.beta return ((b/a)*(x/a)**(b - 1))/(1 + (x/a)**b)**2 def _cdf(self, x): a, b = self.alpha, self.beta return 1/(1 + (x/a)**(-b)) def _quantile(self, p): a, b = self.alpha, self.beta return a*((p/(1 - p))**(1/b)) def expectation(self, expr, var, **kwargs): a, b = self.args return Piecewise((S.NaN, b <= 1), (pi*a/(b*sin(pi/b)), True)) def LogLogistic(name, alpha, beta): r""" Create a continuous random variable with a log-logistic distribution. The distribution is unimodal when `beta > 1`. The density of the log-logistic distribution is given by .. math:: f(x) := \frac{(\frac{\beta}{\alpha})(\frac{x}{\alpha})^{\beta - 1}} {(1 + (\frac{x}{\alpha})^{\beta})^2} Parameters ========== alpha : Real number, `\alpha > 0`, scale parameter and median of distribution beta : Real number, `\beta > 0` a shape parameter Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import LogLogistic, density, cdf, quantile >>> from sympy import Symbol, pprint >>> alpha = Symbol("alpha", real=True, positive=True) >>> beta = Symbol("beta", real=True, positive=True) >>> p = Symbol("p") >>> z = Symbol("z", positive=True) >>> X = LogLogistic("x", alpha, beta) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) beta - 1 / z \ beta*|-----| \alpha/ ------------------------ 2 / beta \ |/ z \ | alpha*||-----| + 1| \\alpha/ / >>> cdf(X)(z) 1/(1 + (z/alpha)**(-beta)) >>> quantile(X)(p) alpha*(p/(1 - p))**(1/beta) References ========== .. [1] https://en.wikipedia.org/wiki/Log-logistic_distribution """ return rv(name, LogLogisticDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Log Normal distribution ------------------------------------------------------ class LogNormalDistribution(SingleContinuousDistribution): _argnames = ('mean', 'std') set = Interval(0, oo) @staticmethod def check(mean, std): _value_check(std > 0, "Parameter std must be positive.") def pdf(self, x): mean, std = self.mean, self.std return exp(-(log(x) - mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std) def sample(self, size=()): if not size: return random.lognormvariate(self.mean, self.std) else: return [random.lognormvariate(self.mean, self.std)]*size def _cdf(self, x): mean, std = self.mean, self.std return Piecewise( (S.Half + S.Half*erf((log(x) - mean)/sqrt(2)/std), x > 0), (S.Zero, True) ) def _moment_generating_function(self, t): raise NotImplementedError('Moment generating function of the log-normal distribution is not defined.') def LogNormal(name, mean, std): r""" Create a continuous random variable with a log-normal distribution. The density of the log-normal distribution is given by .. math:: f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}} e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}} with :math:`x \geq 0`. Parameters ========== mu : Real number, the log-scale sigma : Real number, :math:`\sigma^2 > 0` a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import LogNormal, density >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu", real=True) >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> X = LogNormal("x", mu, sigma) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) 2 -(-mu + log(z)) ----------------- 2 ___ 2*sigma \/ 2 *e ------------------------ ____ 2*\/ pi *sigma*z >>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1 >>> density(X)(z) sqrt(2)*exp(-log(z)**2/2)/(2*sqrt(pi)*z) References ========== .. [1] https://en.wikipedia.org/wiki/Lognormal .. [2] http://mathworld.wolfram.com/LogNormalDistribution.html """ return rv(name, LogNormalDistribution, (mean, std)) #------------------------------------------------------------------------------- # Maxwell distribution --------------------------------------------------------- class MaxwellDistribution(SingleContinuousDistribution): _argnames = ('a',) set = Interval(0, oo) @staticmethod def check(a): _value_check(a > 0, "Parameter a must be positive.") def pdf(self, x): a = self.a return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3 def _cdf(self, x): a = self.a return erf(sqrt(2)*x/(2*a)) - sqrt(2)*x*exp(-x**2/(2*a**2))/(sqrt(pi)*a) def Maxwell(name, a): r""" Create a continuous random variable with a Maxwell distribution. The density of the Maxwell distribution is given by .. math:: f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3} with :math:`x \geq 0`. .. TODO - what does the parameter mean? Parameters ========== a : Real number, `a > 0` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Maxwell, density, E, variance >>> from sympy import Symbol, simplify >>> a = Symbol("a", positive=True) >>> z = Symbol("z") >>> X = Maxwell("x", a) >>> density(X)(z) sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3) >>> E(X) 2*sqrt(2)*a/sqrt(pi) >>> simplify(variance(X)) a**2*(-8 + 3*pi)/pi References ========== .. [1] https://en.wikipedia.org/wiki/Maxwell_distribution .. [2] http://mathworld.wolfram.com/MaxwellDistribution.html """ return rv(name, MaxwellDistribution, (a, )) #------------------------------------------------------------------------------- # Moyal Distribution ----------------------------------------------------------- class MoyalDistribution(SingleContinuousDistribution): _argnames = ('mu', 'sigma') @staticmethod def check(mu, sigma): _value_check(mu.is_real, "Location parameter must be real.") _value_check(sigma.is_real and sigma > 0, "Scale parameter must be real\ and positive.") def pdf(self, x): mu, sigma = self.mu, self.sigma num = exp(-(exp(-(x - mu)/sigma) + (x - mu)/(sigma))/2) den = (sqrt(2*pi) * sigma) return num/den def _characteristic_function(self, t): mu, sigma = self.mu, self.sigma term1 = exp(I*t*mu) term2 = (2**(-I*sigma*t) * gamma(Rational(1, 2) - I*t*sigma)) return (term1 * term2)/sqrt(pi) def _moment_generating_function(self, t): mu, sigma = self.mu, self.sigma term1 = exp(t*mu) term2 = (2**(-1*sigma*t) * gamma(Rational(1, 2) - t*sigma)) return (term1 * term2)/sqrt(pi) def Moyal(name, mu, sigma): r""" Create a continuous random variable with a Moyal distribution. The density of the Moyal distribution is given by .. math:: f(x) := \frac{\exp-\frac{1}{2}\exp-\frac{x-\mu}{\sigma}-\frac{x-\mu}{2\sigma}}{\sqrt{2\pi}\sigma} with :math:`x \in \mathbb{R}`. Parameters ========== mu : Real number Location parameter sigma : Real positive number Scale parameter Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Moyal, density, cdf >>> from sympy import Symbol, simplify >>> mu = Symbol("mu", real=True) >>> sigma = Symbol("sigma", positive=True, real=True) >>> z = Symbol("z") >>> X = Moyal("x", mu, sigma) >>> density(X)(z) sqrt(2)*exp(-exp((mu - z)/sigma)/2 - (-mu + z)/(2*sigma))/(2*sqrt(pi)*sigma) >>> simplify(cdf(X)(z)) 1 - erf(sqrt(2)*exp((mu - z)/(2*sigma))/2) References ========== .. [1] https://reference.wolfram.com/language/ref/MoyalDistribution.html .. [2] http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf """ return rv(name, MoyalDistribution, (mu, sigma)) #------------------------------------------------------------------------------- # Nakagami distribution -------------------------------------------------------- class NakagamiDistribution(SingleContinuousDistribution): _argnames = ('mu', 'omega') set = Interval(0, oo) @staticmethod def check(mu, omega): _value_check(mu >= S.Half, "Shape parameter mu must be greater than equal to 1/2.") _value_check(omega > 0, "Spread parameter omega must be positive.") def pdf(self, x): mu, omega = self.mu, self.omega return 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu - 1)*exp(-mu/omega*x**2) def _cdf(self, x): mu, omega = self.mu, self.omega return Piecewise( (lowergamma(mu, (mu/omega)*x**2)/gamma(mu), x > 0), (S.Zero, True)) def Nakagami(name, mu, omega): r""" Create a continuous random variable with a Nakagami distribution. The density of the Nakagami distribution is given by .. math:: f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1} \exp\left(-\frac{\mu}{\omega}x^2 \right) with :math:`x > 0`. Parameters ========== mu : Real number, `\mu \geq \frac{1}{2}` a shape omega : Real number, `\omega > 0`, the spread Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Nakagami, density, E, variance, cdf >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu", positive=True) >>> omega = Symbol("omega", positive=True) >>> z = Symbol("z") >>> X = Nakagami("x", mu, omega) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) 2 -mu*z ------- mu -mu 2*mu - 1 omega 2*mu *omega *z *e ---------------------------------- Gamma(mu) >>> simplify(E(X)) sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1) >>> V = simplify(variance(X)) >>> pprint(V, use_unicode=False) 2 omega*Gamma (mu + 1/2) omega - ----------------------- Gamma(mu)*Gamma(mu + 1) >>> cdf(X)(z) Piecewise((lowergamma(mu, mu*z**2/omega)/gamma(mu), z > 0), (0, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Nakagami_distribution """ return rv(name, NakagamiDistribution, (mu, omega)) #------------------------------------------------------------------------------- # Normal distribution ---------------------------------------------------------- class NormalDistribution(SingleContinuousDistribution): _argnames = ('mean', 'std') @staticmethod def check(mean, std): _value_check(std > 0, "Standard deviation must be positive") def pdf(self, x): return exp(-(x - self.mean)**2 / (2*self.std**2)) / (sqrt(2*pi)*self.std) def sample(self, size=()): if not size: return random.normalvariate(self.mean, self.std) else: return [random.normalvariate(self.mean, self.std)]*size def _cdf(self, x): mean, std = self.mean, self.std return erf(sqrt(2)*(-mean + x)/(2*std))/2 + S.Half def _characteristic_function(self, t): mean, std = self.mean, self.std return exp(I*mean*t - std**2*t**2/2) def _moment_generating_function(self, t): mean, std = self.mean, self.std return exp(mean*t + std**2*t**2/2) def _quantile(self, p): mean, std = self.mean, self.std return mean + std*sqrt(2)*erfinv(2*p - 1) def Normal(name, mean, std): r""" Create a continuous random variable with a Normal distribution. The density of the Normal distribution is given by .. math:: f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} } Parameters ========== mu : Real number or a list representing the mean or the mean vector sigma : Real number or a positive definite square matrix, :math:`\sigma^2 > 0` the variance Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Normal, density, E, std, cdf, skewness, quantile >>> from sympy import Symbol, simplify, pprint, factor, together, factor_terms >>> mu = Symbol("mu") >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> y = Symbol("y") >>> p = Symbol("p") >>> X = Normal("x", mu, sigma) >>> density(X)(z) sqrt(2)*exp(-(-mu + z)**2/(2*sigma**2))/(2*sqrt(pi)*sigma) >>> C = simplify(cdf(X))(z) # it needs a little more help... >>> pprint(C, use_unicode=False) / ___ \ |\/ 2 *(-mu + z)| erf|---------------| \ 2*sigma / 1 -------------------- + - 2 2 >>> quantile(X)(p) mu + sqrt(2)*sigma*erfinv(2*p - 1) >>> simplify(skewness(X)) 0 >>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1 >>> density(X)(z) sqrt(2)*exp(-z**2/2)/(2*sqrt(pi)) >>> E(2*X + 1) 1 >>> simplify(std(2*X + 1)) 2 >>> m = Normal('X', [1, 2], [[2, 1], [1, 2]]) >>> from sympy.stats.joint_rv import marginal_distribution >>> pprint(density(m)(y, z), use_unicode=False) /1 y\ /2*y z\ / z\ / y 2*z \ |- - -|*|--- - -| + |1 - -|*|- - + --- - 1| ___ \2 2/ \ 3 3/ \ 2/ \ 3 3 / \/ 3 *e -------------------------------------------------- 6*pi >>> marginal_distribution(m, m[0])(1) 1/(2*sqrt(pi)) References ========== .. [1] https://en.wikipedia.org/wiki/Normal_distribution .. [2] http://mathworld.wolfram.com/NormalDistributionFunction.html """ if isinstance(mean, (list, MatrixBase, MatrixExpr)) and\ isinstance(std, (list, MatrixBase, MatrixExpr)): from sympy.stats.joint_rv_types import MultivariateNormalDistribution return multivariate_rv( MultivariateNormalDistribution, name, mean, std) return rv(name, NormalDistribution, (mean, std)) #------------------------------------------------------------------------------- # Inverse Gaussian distribution ---------------------------------------------------------- class GaussianInverseDistribution(SingleContinuousDistribution): _argnames = ('mean', 'shape') @property def set(self): return Interval(0, oo) @staticmethod def check(mean, shape): _value_check(shape > 0, "Shape parameter must be positive") _value_check(mean > 0, "Mean must be positive") def pdf(self, x): mu, s = self.mean, self.shape return exp(-s*(x - mu)**2 / (2*x*mu**2)) * sqrt(s/((2*pi*x**3))) def sample(self, size=()): scipy = import_module('scipy') if scipy: from scipy.stats import invgauss return invgauss.rvs(float(self.mean/self.shape), 0, float(self.shape), size=size) else: raise NotImplementedError( 'Sampling the Inverse Gaussian Distribution requires Scipy.') def _cdf(self, x): from sympy.stats import cdf mu, s = self.mean, self.shape stdNormalcdf = cdf(Normal('x', 0, 1)) first_term = stdNormalcdf(sqrt(s/x) * ((x/mu) - S.One)) second_term = exp(2*s/mu) * stdNormalcdf(-sqrt(s/x)*(x/mu + S.One)) return first_term + second_term def _characteristic_function(self, t): mu, s = self.mean, self.shape return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*I*t)/s))) def _moment_generating_function(self, t): mu, s = self.mean, self.shape return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*t)/s))) def GaussianInverse(name, mean, shape): r""" Create a continuous random variable with an Inverse Gaussian distribution. Inverse Gaussian distribution is also known as Wald distribution. The density of the Inverse Gaussian distribution is given by .. math:: f(x) := \sqrt{\frac{\lambda}{2\pi x^3}} e^{-\frac{\lambda(x-\mu)^2}{2x\mu^2}} Parameters ========== mu : Positive number representing the mean lambda : Positive number representing the shape parameter Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import GaussianInverse, density, cdf, E, std, skewness >>> from sympy import Symbol, pprint >>> mu = Symbol("mu", positive=True) >>> lamda = Symbol("lambda", positive=True) >>> z = Symbol("z", positive=True) >>> X = GaussianInverse("x", mu, lamda) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) 2 -lambda*(-mu + z) ------------------- 2 ___ ________ 2*mu *z \/ 2 *\/ lambda *e ------------------------------------- ____ 3/2 2*\/ pi *z >>> E(X) mu >>> std(X).expand() mu**(3/2)/sqrt(lambda) >>> skewness(X).expand() 3*sqrt(mu)/sqrt(lambda) References ========== .. [1] https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution .. [2] http://mathworld.wolfram.com/InverseGaussianDistribution.html """ return rv(name, GaussianInverseDistribution, (mean, shape)) Wald = GaussianInverse #------------------------------------------------------------------------------- # Pareto distribution ---------------------------------------------------------- class ParetoDistribution(SingleContinuousDistribution): _argnames = ('xm', 'alpha') @property def set(self): return Interval(self.xm, oo) @staticmethod def check(xm, alpha): _value_check(xm > 0, "Xm must be positive") _value_check(alpha > 0, "Alpha must be positive") def pdf(self, x): xm, alpha = self.xm, self.alpha return alpha * xm**alpha / x**(alpha + 1) def sample(self, size=()): if not size: return random.paretovariate(self.alpha) else: return [random.paretovariate(self.alpha)]*size def _cdf(self, x): xm, alpha = self.xm, self.alpha return Piecewise( (S.One - xm**alpha/x**alpha, x>=xm), (0, True), ) def _moment_generating_function(self, t): xm, alpha = self.xm, self.alpha return alpha * (-xm*t)**alpha * uppergamma(-alpha, -xm*t) def _characteristic_function(self, t): xm, alpha = self.xm, self.alpha return alpha * (-I * xm * t) ** alpha * uppergamma(-alpha, -I * xm * t) def Pareto(name, xm, alpha): r""" Create a continuous random variable with the Pareto distribution. The density of the Pareto distribution is given by .. math:: f(x) := \frac{\alpha\,x_m^\alpha}{x^{\alpha+1}} with :math:`x \in [x_m,\infty]`. Parameters ========== xm : Real number, `x_m > 0`, a scale alpha : Real number, `\alpha > 0`, a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Pareto, density >>> from sympy import Symbol >>> xm = Symbol("xm", positive=True) >>> beta = Symbol("beta", positive=True) >>> z = Symbol("z") >>> X = Pareto("x", xm, beta) >>> density(X)(z) beta*xm**beta*z**(-beta - 1) References ========== .. [1] https://en.wikipedia.org/wiki/Pareto_distribution .. [2] http://mathworld.wolfram.com/ParetoDistribution.html """ return rv(name, ParetoDistribution, (xm, alpha)) #------------------------------------------------------------------------------- # PowerFunction distribution --------------------------------------------------- class PowerFunctionDistribution(SingleContinuousDistribution): _argnames=('alpha','a','b') @property def set(self): return Interval(self.a, self.b) @staticmethod def check(alpha, a, b): _value_check(a.is_real, "Continuous Boundary parameter should be real.") _value_check(b.is_real, "Continuous Boundary parameter should be real.") _value_check(a < b, " 'a' the left Boundary must be smaller than 'b' the right Boundary." ) _value_check(alpha.is_positive, "Continuous Shape parameter should be positive.") def pdf(self, x): alpha, a, b = self.alpha, self.a, self.b num = alpha*(x - a)**(alpha - 1) den = (b - a)**alpha return num/den def PowerFunction(name, alpha, a, b): r""" Creates a continuous random variable with a Power Function Distribution The density of PowerFunction distribution is given by .. math:: f(x) := \frac{{\alpha}(x - a)^{\alpha - 1}}{(b - a)^{\alpha}} with :math:`x \in [a,b]`. Parameters ========== alpha: Positive number, `0 < alpha` the shape paramater a : Real number, :math:`-\infty < a` the left boundary b : Real number, :math:`a < b < \infty` the right boundary Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import PowerFunction, density, cdf, E, variance >>> from sympy import Symbol, simplify >>> alpha = Symbol("alpha", positive=True) >>> a = Symbol("a", real=True) >>> b = Symbol("b", real=True) >>> z = Symbol("z") >>> X = PowerFunction("X", 2, a, b) >>> density(X)(z) (-2*a + 2*z)/(-a + b)**2 >>> cdf(X)(z) Piecewise((a**2/(a**2 - 2*a*b + b**2) - 2*a*z/(a**2 - 2*a*b + b**2) + z**2/(a**2 - 2*a*b + b**2), a <= z), (0, True)) >>> alpha = 2 >>> a = 0 >>> b = 1 >>> Y = PowerFunction("Y", alpha, a, b) >>> E(Y) 2/3 >>> variance(Y) 1/18 References ========== .. [1] http://www.mathwave.com/help/easyfit/html/analyses/distributions/power_func.html """ return rv(name, PowerFunctionDistribution, (alpha, a, b)) #------------------------------------------------------------------------------- # QuadraticU distribution ------------------------------------------------------ class QuadraticUDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') @property def set(self): return Interval(self.a, self.b) @staticmethod def check(a, b): _value_check(b > a, "Parameter b must be in range (%s, oo)."%(a)) def pdf(self, x): a, b = self.a, self.b alpha = 12 / (b-a)**3 beta = (a+b) / 2 return Piecewise( (alpha * (x-beta)**2, And(a<=x, x<=b)), (S.Zero, True)) def _moment_generating_function(self, t): a, b = self.a, self.b return -3 * (exp(a*t) * (4 + (a**2 + 2*a*(-2 + b) + b**2) * t) \ - exp(b*t) * (4 + (-4*b + (a + b)**2) * t)) / ((a-b)**3 * t**2) def _characteristic_function(self, t): a, b = self.a, self.b return -3*I*(exp(I*a*t*exp(I*b*t)) * (4*I - (-4*b + (a+b)**2)*t)) \ / ((a-b)**3 * t**2) def QuadraticU(name, a, b): r""" Create a Continuous Random Variable with a U-quadratic distribution. The density of the U-quadratic distribution is given by .. math:: f(x) := \alpha (x-\beta)^2 with :math:`x \in [a,b]`. Parameters ========== a : Real number b : Real number, :math:`a < b` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import QuadraticU, density, E, variance >>> from sympy import Symbol, simplify, factor, pprint >>> a = Symbol("a", real=True) >>> b = Symbol("b", real=True) >>> z = Symbol("z") >>> X = QuadraticU("x", a, b) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) / 2 | / a b \ |12*|- - - - + z| | \ 2 2 / <----------------- for And(b >= z, a <= z) | 3 | (-a + b) | \ 0 otherwise References ========== .. [1] https://en.wikipedia.org/wiki/U-quadratic_distribution """ return rv(name, QuadraticUDistribution, (a, b)) #------------------------------------------------------------------------------- # RaisedCosine distribution ---------------------------------------------------- class RaisedCosineDistribution(SingleContinuousDistribution): _argnames = ('mu', 's') @property def set(self): return Interval(self.mu - self.s, self.mu + self.s) @staticmethod def check(mu, s): _value_check(s > 0, "s must be positive") def pdf(self, x): mu, s = self.mu, self.s return Piecewise( ((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)), (S.Zero, True)) def _characteristic_function(self, t): mu, s = self.mu, self.s return Piecewise((exp(-I*pi*mu/s)/2, Eq(t, -pi/s)), (exp(I*pi*mu/s)/2, Eq(t, pi/s)), (pi**2*sin(s*t)*exp(I*mu*t) / (s*t*(pi**2 - s**2*t**2)), True)) def _moment_generating_function(self, t): mu, s = self.mu, self.s return pi**2 * sinh(s*t) * exp(mu*t) / (s*t*(pi**2 + s**2*t**2)) def RaisedCosine(name, mu, s): r""" Create a Continuous Random Variable with a raised cosine distribution. The density of the raised cosine distribution is given by .. math:: f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right) with :math:`x \in [\mu-s,\mu+s]`. Parameters ========== mu : Real number s : Real number, `s > 0` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import RaisedCosine, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu", real=True) >>> s = Symbol("s", positive=True) >>> z = Symbol("z") >>> X = RaisedCosine("x", mu, s) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) / /pi*(-mu + z)\ |cos|------------| + 1 | \ s / <--------------------- for And(z >= mu - s, z <= mu + s) | 2*s | \ 0 otherwise References ========== .. [1] https://en.wikipedia.org/wiki/Raised_cosine_distribution """ return rv(name, RaisedCosineDistribution, (mu, s)) #------------------------------------------------------------------------------- # Rayleigh distribution -------------------------------------------------------- class RayleighDistribution(SingleContinuousDistribution): _argnames = ('sigma',) set = Interval(0, oo) @staticmethod def check(sigma): _value_check(sigma > 0, "Scale parameter sigma must be positive.") def pdf(self, x): sigma = self.sigma return x/sigma**2*exp(-x**2/(2*sigma**2)) def _cdf(self, x): sigma = self.sigma return 1 - exp(-(x**2/(2*sigma**2))) def _characteristic_function(self, t): sigma = self.sigma return 1 - sigma*t*exp(-sigma**2*t**2/2) * sqrt(pi/2) * (erfi(sigma*t/sqrt(2)) - I) def _moment_generating_function(self, t): sigma = self.sigma return 1 + sigma*t*exp(sigma**2*t**2/2) * sqrt(pi/2) * (erf(sigma*t/sqrt(2)) + 1) def Rayleigh(name, sigma): r""" Create a continuous random variable with a Rayleigh distribution. The density of the Rayleigh distribution is given by .. math :: f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2} with :math:`x > 0`. Parameters ========== sigma : Real number, `\sigma > 0` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Rayleigh, density, E, variance >>> from sympy import Symbol, simplify >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> X = Rayleigh("x", sigma) >>> density(X)(z) z*exp(-z**2/(2*sigma**2))/sigma**2 >>> E(X) sqrt(2)*sqrt(pi)*sigma/2 >>> variance(X) -pi*sigma**2/2 + 2*sigma**2 References ========== .. [1] https://en.wikipedia.org/wiki/Rayleigh_distribution .. [2] http://mathworld.wolfram.com/RayleighDistribution.html """ return rv(name, RayleighDistribution, (sigma, )) #------------------------------------------------------------------------------- # Reciprocal distribution -------------------------------------------------------- class ReciprocalDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') @property def set(self): return Interval(self.a, self.b) @staticmethod def check(a, b): _value_check(a > 0, "Parameter > 0. a = %s"%a) _value_check((a < b), "Parameter b must be in range (%s, +oo]. b = %s"%(a, b)) def pdf(self, x): a, b = self.a, self.b return 1/(x*(log(b) - log(a))) def Reciprocal(name, a, b): r"""Creates a continuous random variable with a reciprocal distribution. Parameters ========== a : Real number, :math:`0 < a` b : Real number, :math:`a < b` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Reciprocal, density, cdf >>> from sympy import symbols >>> a, b, x = symbols('a, b, x', positive=True) >>> R = Reciprocal('R', a, b) >>> density(R)(x) 1/(x*(-log(a) + log(b))) >>> cdf(R)(x) Piecewise((log(a)/(log(a) - log(b)) - log(x)/(log(a) - log(b)), a <= x), (0, True)) Reference ========= .. [1] https://en.wikipedia.org/wiki/Reciprocal_distribution """ return rv(name, ReciprocalDistribution, (a, b)) #------------------------------------------------------------------------------- # Shifted Gompertz distribution ------------------------------------------------ class ShiftedGompertzDistribution(SingleContinuousDistribution): _argnames = ('b', 'eta') set = Interval(0, oo) @staticmethod def check(b, eta): _value_check(b > 0, "b must be positive") _value_check(eta > 0, "eta must be positive") def pdf(self, x): b, eta = self.b, self.eta return b*exp(-b*x)*exp(-eta*exp(-b*x))*(1+eta*(1-exp(-b*x))) def ShiftedGompertz(name, b, eta): r""" Create a continuous random variable with a Shifted Gompertz distribution. The density of the Shifted Gompertz distribution is given by .. math:: f(x) := b e^{-b x} e^{-\eta \exp(-b x)} \left[1 + \eta(1 - e^(-bx)) \right] with :math: 'x \in [0, \inf)'. Parameters ========== b: Real number, 'b > 0' a scale eta: Real number, 'eta > 0' a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import ShiftedGompertz, density, E, variance >>> from sympy import Symbol >>> b = Symbol("b", positive=True) >>> eta = Symbol("eta", positive=True) >>> x = Symbol("x") >>> X = ShiftedGompertz("x", b, eta) >>> density(X)(x) b*(eta*(1 - exp(-b*x)) + 1)*exp(-b*x)*exp(-eta*exp(-b*x)) References ========== .. [1] https://en.wikipedia.org/wiki/Shifted_Gompertz_distribution """ return rv(name, ShiftedGompertzDistribution, (b, eta)) #------------------------------------------------------------------------------- # StudentT distribution -------------------------------------------------------- class StudentTDistribution(SingleContinuousDistribution): _argnames = ('nu',) set = Interval(-oo, oo) @staticmethod def check(nu): _value_check(nu > 0, "Degrees of freedom nu must be positive.") def pdf(self, x): nu = self.nu return 1/(sqrt(nu)*beta_fn(S.Half, nu/2))*(1 + x**2/nu)**(-(nu + 1)/2) def _cdf(self, x): nu = self.nu return S.Half + x*gamma((nu+1)/2)*hyper((S.Half, (nu+1)/2), (Rational(3, 2),), -x**2/nu)/(sqrt(pi*nu)*gamma(nu/2)) def _moment_generating_function(self, t): raise NotImplementedError('The moment generating function for the Student-T distribution is undefined.') def StudentT(name, nu): r""" Create a continuous random variable with a student's t distribution. The density of the student's t distribution is given by .. math:: f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)} {\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)} \left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}} Parameters ========== nu : Real number, `\nu > 0`, the degrees of freedom Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import StudentT, density, E, variance, cdf >>> from sympy import Symbol, simplify, pprint >>> nu = Symbol("nu", positive=True) >>> z = Symbol("z") >>> X = StudentT("x", nu) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) nu 1 - -- - - 2 2 / 2\ | z | |1 + --| \ nu/ ----------------- ____ / nu\ \/ nu *B|1/2, --| \ 2 / >>> cdf(X)(z) 1/2 + z*gamma(nu/2 + 1/2)*hyper((1/2, nu/2 + 1/2), (3/2,), -z**2/nu)/(sqrt(pi)*sqrt(nu)*gamma(nu/2)) References ========== .. [1] https://en.wikipedia.org/wiki/Student_t-distribution .. [2] http://mathworld.wolfram.com/Studentst-Distribution.html """ return rv(name, StudentTDistribution, (nu, )) #------------------------------------------------------------------------------- # Trapezoidal distribution ------------------------------------------------------ class TrapezoidalDistribution(SingleContinuousDistribution): _argnames = ('a', 'b', 'c', 'd') @property def set(self): return Interval(self.a, self.d) @staticmethod def check(a, b, c, d): _value_check(a < d, "Lower bound parameter a < %s. a = %s"%(d, a)) _value_check((a <= b, b < c), "Level start parameter b must be in range [%s, %s). b = %s"%(a, c, b)) _value_check((b < c, c <= d), "Level end parameter c must be in range (%s, %s]. c = %s"%(b, d, c)) _value_check(d >= c, "Upper bound parameter d > %s. d = %s"%(c, d)) def pdf(self, x): a, b, c, d = self.a, self.b, self.c, self.d return Piecewise( (2*(x-a) / ((b-a)*(d+c-a-b)), And(a <= x, x < b)), (2 / (d+c-a-b), And(b <= x, x < c)), (2*(d-x) / ((d-c)*(d+c-a-b)), And(c <= x, x <= d)), (S.Zero, True)) def Trapezoidal(name, a, b, c, d): r""" Create a continuous random variable with a trapezoidal distribution. The density of the trapezoidal distribution is given by .. math:: f(x) := \begin{cases} 0 & \mathrm{for\ } x < a, \\ \frac{2(x-a)}{(b-a)(d+c-a-b)} & \mathrm{for\ } a \le x < b, \\ \frac{2}{d+c-a-b} & \mathrm{for\ } b \le x < c, \\ \frac{2(d-x)}{(d-c)(d+c-a-b)} & \mathrm{for\ } c \le x < d, \\ 0 & \mathrm{for\ } d < x. \end{cases} Parameters ========== a : Real number, :math:`a < d` b : Real number, :math:`a <= b < c` c : Real number, :math:`b < c <= d` d : Real number Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Trapezoidal, density, E >>> from sympy import Symbol, pprint >>> a = Symbol("a") >>> b = Symbol("b") >>> c = Symbol("c") >>> d = Symbol("d") >>> z = Symbol("z") >>> X = Trapezoidal("x", a,b,c,d) >>> pprint(density(X)(z), use_unicode=False) / -2*a + 2*z |------------------------- for And(a <= z, b > z) |(-a + b)*(-a - b + c + d) | | 2 | -------------- for And(b <= z, c > z) < -a - b + c + d | | 2*d - 2*z |------------------------- for And(d >= z, c <= z) |(-c + d)*(-a - b + c + d) | \ 0 otherwise References ========== .. [1] https://en.wikipedia.org/wiki/Trapezoidal_distribution """ return rv(name, TrapezoidalDistribution, (a, b, c, d)) #------------------------------------------------------------------------------- # Triangular distribution ------------------------------------------------------ class TriangularDistribution(SingleContinuousDistribution): _argnames = ('a', 'b', 'c') @property def set(self): return Interval(self.a, self.b) @staticmethod def check(a, b, c): _value_check(b > a, "Parameter b > %s. b = %s"%(a, b)) _value_check((a <= c, c <= b), "Parameter c must be in range [%s, %s]. c = %s"%(a, b, c)) def pdf(self, x): a, b, c = self.a, self.b, self.c return Piecewise( (2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)), (2/(b - a), Eq(x, c)), (2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)), (S.Zero, True)) def _characteristic_function(self, t): a, b, c = self.a, self.b, self.c return -2 *((b-c) * exp(I*a*t) - (b-a) * exp(I*c*t) + (c-a) * exp(I*b*t)) / ((b-a)*(c-a)*(b-c)*t**2) def _moment_generating_function(self, t): a, b, c = self.a, self.b, self.c return 2 * ((b - c) * exp(a * t) - (b - a) * exp(c * t) + (c - a) * exp(b * t)) / ( (b - a) * (c - a) * (b - c) * t ** 2) def Triangular(name, a, b, c): r""" Create a continuous random variable with a triangular distribution. The density of the triangular distribution is given by .. math:: f(x) := \begin{cases} 0 & \mathrm{for\ } x < a, \\ \frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\ \frac{2}{b-a} & \mathrm{for\ } x = c, \\ \frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\ 0 & \mathrm{for\ } b < x. \end{cases} Parameters ========== a : Real number, :math:`a \in \left(-\infty, \infty\right)` b : Real number, :math:`a < b` c : Real number, :math:`a \leq c \leq b` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Triangular, density, E >>> from sympy import Symbol, pprint >>> a = Symbol("a") >>> b = Symbol("b") >>> c = Symbol("c") >>> z = Symbol("z") >>> X = Triangular("x", a,b,c) >>> pprint(density(X)(z), use_unicode=False) / -2*a + 2*z |----------------- for And(a <= z, c > z) |(-a + b)*(-a + c) | | 2 | ------ for c = z < -a + b | | 2*b - 2*z |---------------- for And(b >= z, c < z) |(-a + b)*(b - c) | \ 0 otherwise References ========== .. [1] https://en.wikipedia.org/wiki/Triangular_distribution .. [2] http://mathworld.wolfram.com/TriangularDistribution.html """ return rv(name, TriangularDistribution, (a, b, c)) #------------------------------------------------------------------------------- # Uniform distribution --------------------------------------------------------- class UniformDistribution(SingleContinuousDistribution): _argnames = ('left', 'right') @property def set(self): return Interval(self.left, self.right) @staticmethod def check(left, right): _value_check(left < right, "Lower limit should be less than Upper limit.") def pdf(self, x): left, right = self.left, self.right return Piecewise( (S.One/(right - left), And(left <= x, x <= right)), (S.Zero, True) ) def _cdf(self, x): left, right = self.left, self.right return Piecewise( (S.Zero, x < left), ((x - left)/(right - left), x <= right), (S.One, True) ) def _characteristic_function(self, t): left, right = self.left, self.right return Piecewise(((exp(I*t*right) - exp(I*t*left)) / (I*t*(right - left)), Ne(t, 0)), (S.One, True)) def _moment_generating_function(self, t): left, right = self.left, self.right return Piecewise(((exp(t*right) - exp(t*left)) / (t * (right - left)), Ne(t, 0)), (S.One, True)) def expectation(self, expr, var, **kwargs): from sympy import Max, Min kwargs['evaluate'] = True result = SingleContinuousDistribution.expectation(self, expr, var, **kwargs) result = result.subs({Max(self.left, self.right): self.right, Min(self.left, self.right): self.left}) return result def sample(self, size=()): if not size: return random.uniform(self.left, self.right) else: return [random.uniform(self.left, self.right)]*size def Uniform(name, left, right): r""" Create a continuous random variable with a uniform distribution. The density of the uniform distribution is given by .. math:: f(x) := \begin{cases} \frac{1}{b - a} & \text{for } x \in [a,b] \\ 0 & \text{otherwise} \end{cases} with :math:`x \in [a,b]`. Parameters ========== a : Real number, :math:`-\infty < a` the left boundary b : Real number, :math:`a < b < \infty` the right boundary Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Uniform, density, cdf, E, variance, skewness >>> from sympy import Symbol, simplify >>> a = Symbol("a", negative=True) >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = Uniform("x", a, b) >>> density(X)(z) Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True)) >>> cdf(X)(z) Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True)) >>> E(X) a/2 + b/2 >>> simplify(variance(X)) a**2/12 - a*b/6 + b**2/12 References ========== .. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 .. [2] http://mathworld.wolfram.com/UniformDistribution.html """ return rv(name, UniformDistribution, (left, right)) #------------------------------------------------------------------------------- # UniformSum distribution ------------------------------------------------------ class UniformSumDistribution(SingleContinuousDistribution): _argnames = ('n',) @property def set(self): return Interval(0, self.n) @staticmethod def check(n): _value_check((n > 0, n.is_integer), "Parameter n must be positive integer.") def pdf(self, x): n = self.n k = Dummy("k") return 1/factorial( n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x))) def _cdf(self, x): n = self.n k = Dummy("k") return Piecewise((S.Zero, x < 0), (1/factorial(n)*Sum((-1)**k*binomial(n, k)*(x - k)**(n), (k, 0, floor(x))), x <= n), (S.One, True)) def _characteristic_function(self, t): return ((exp(I*t) - 1) / (I*t))**self.n def _moment_generating_function(self, t): return ((exp(t) - 1) / t)**self.n def UniformSum(name, n): r""" Create a continuous random variable with an Irwin-Hall distribution. The probability distribution function depends on a single parameter `n` which is an integer. The density of the Irwin-Hall distribution is given by .. math :: f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\left\lfloor x\right\rfloor}(-1)^k \binom{n}{k}(x-k)^{n-1} Parameters ========== n : A positive Integer, `n > 0` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import UniformSum, density, cdf >>> from sympy import Symbol, pprint >>> n = Symbol("n", integer=True) >>> z = Symbol("z") >>> X = UniformSum("x", n) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) floor(z) ___ \ ` \ k n - 1 /n\ ) (-1) *(-k + z) *| | / \k/ /__, k = 0 -------------------------------- (n - 1)! >>> cdf(X)(z) Piecewise((0, z < 0), (Sum((-1)**_k*(-_k + z)**n*binomial(n, _k), (_k, 0, floor(z)))/factorial(n), n >= z), (1, True)) Compute cdf with specific 'x' and 'n' values as follows : >>> cdf(UniformSum("x", 5), evaluate=False)(2).doit() 9/40 The argument evaluate=False prevents an attempt at evaluation of the sum for general n, before the argument 2 is passed. References ========== .. [1] https://en.wikipedia.org/wiki/Uniform_sum_distribution .. [2] http://mathworld.wolfram.com/UniformSumDistribution.html """ return rv(name, UniformSumDistribution, (n, )) #------------------------------------------------------------------------------- # VonMises distribution -------------------------------------------------------- class VonMisesDistribution(SingleContinuousDistribution): _argnames = ('mu', 'k') set = Interval(0, 2*pi) @staticmethod def check(mu, k): _value_check(k > 0, "k must be positive") def pdf(self, x): mu, k = self.mu, self.k return exp(k*cos(x-mu)) / (2*pi*besseli(0, k)) def VonMises(name, mu, k): r""" Create a Continuous Random Variable with a von Mises distribution. The density of the von Mises distribution is given by .. math:: f(x) := \frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)} with :math:`x \in [0,2\pi]`. Parameters ========== mu : Real number, measure of location k : Real number, measure of concentration Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import VonMises, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu") >>> k = Symbol("k", positive=True) >>> z = Symbol("z") >>> X = VonMises("x", mu, k) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) k*cos(mu - z) e ------------------ 2*pi*besseli(0, k) References ========== .. [1] https://en.wikipedia.org/wiki/Von_Mises_distribution .. [2] http://mathworld.wolfram.com/vonMisesDistribution.html """ return rv(name, VonMisesDistribution, (mu, k)) #------------------------------------------------------------------------------- # Weibull distribution --------------------------------------------------------- class WeibullDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta') set = Interval(0, oo) @staticmethod def check(alpha, beta): _value_check(alpha > 0, "Alpha must be positive") _value_check(beta > 0, "Beta must be positive") def pdf(self, x): alpha, beta = self.alpha, self.beta return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha def sample(self, size=()): if not size: return random.weibullvariate(self.alpha, self.beta) else: return [random.weibullvariate(self.alpha, self.beta)]*size def Weibull(name, alpha, beta): r""" Create a continuous random variable with a Weibull distribution. The density of the Weibull distribution is given by .. math:: f(x) := \begin{cases} \frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1} e^{-(x/\lambda)^{k}} & x\geq0\\ 0 & x<0 \end{cases} Parameters ========== lambda : Real number, :math:`\lambda > 0` a scale k : Real number, `k > 0` a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Weibull, density, E, variance >>> from sympy import Symbol, simplify >>> l = Symbol("lambda", positive=True) >>> k = Symbol("k", positive=True) >>> z = Symbol("z") >>> X = Weibull("x", l, k) >>> density(X)(z) k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda >>> simplify(E(X)) lambda*gamma(1 + 1/k) >>> simplify(variance(X)) lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k)) References ========== .. [1] https://en.wikipedia.org/wiki/Weibull_distribution .. [2] http://mathworld.wolfram.com/WeibullDistribution.html """ return rv(name, WeibullDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Wigner semicircle distribution ----------------------------------------------- class WignerSemicircleDistribution(SingleContinuousDistribution): _argnames = ('R',) @property def set(self): return Interval(-self.R, self.R) @staticmethod def check(R): _value_check(R > 0, "Radius R must be positive.") def pdf(self, x): R = self.R return 2/(pi*R**2)*sqrt(R**2 - x**2) def _characteristic_function(self, t): return Piecewise((2 * besselj(1, self.R*t) / (self.R*t), Ne(t, 0)), (S.One, True)) def _moment_generating_function(self, t): return Piecewise((2 * besseli(1, self.R*t) / (self.R*t), Ne(t, 0)), (S.One, True)) def WignerSemicircle(name, R): r""" Create a continuous random variable with a Wigner semicircle distribution. The density of the Wigner semicircle distribution is given by .. math:: f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2} with :math:`x \in [-R,R]`. Parameters ========== R : Real number, `R > 0`, the radius Returns ======= A `RandomSymbol`. Examples ======== >>> from sympy.stats import WignerSemicircle, density, E >>> from sympy import Symbol, simplify >>> R = Symbol("R", positive=True) >>> z = Symbol("z") >>> X = WignerSemicircle("x", R) >>> density(X)(z) 2*sqrt(R**2 - z**2)/(pi*R**2) >>> E(X) 0 References ========== .. [1] https://en.wikipedia.org/wiki/Wigner_semicircle_distribution .. [2] http://mathworld.wolfram.com/WignersSemicircleLaw.html """ return rv(name, WignerSemicircleDistribution, (R,))
9f80d7032482605f96b3b0570d86cbd96af437dc1a2ade8ab08c16b129373753
""" Finite Discrete Random Variables - Prebuilt variable types Contains ======== FiniteRV DiscreteUniform Die Bernoulli Coin Binomial BetaBinomial Hypergeometric Rademacher """ from __future__ import print_function, division import random from sympy import (S, sympify, Rational, binomial, cacheit, Integer, Dummy, Eq, Intersection, Interval, Symbol, Lambda, Piecewise, Or, Gt, Lt, Ge, Le, Contains) from sympy import beta as beta_fn from sympy.external import import_module from sympy.tensor.array import ArrayComprehensionMap from sympy.stats.frv import (SingleFiniteDistribution, SingleFinitePSpace) from sympy.stats.rv import _value_check, Density, RandomSymbol numpy = import_module('numpy') scipy = import_module('scipy') pymc3 = import_module('pymc3') __all__ = ['FiniteRV', 'DiscreteUniform', 'Die', 'Bernoulli', 'Coin', 'Binomial', 'BetaBinomial', 'Hypergeometric', 'Rademacher' ] def rv(name, cls, *args): args = list(map(sympify, args)) dist = cls(*args) dist.check(*args) return SingleFinitePSpace(name, dist).value class FiniteDistributionHandmade(SingleFiniteDistribution): @property def dict(self): return self.args[0] def pmf(self, x): x = Symbol('x') return Lambda(x, Piecewise(*( [(v, Eq(k, x)) for k, v in self.dict.items()] + [(S.Zero, True)]))) @property def set(self): return set(self.dict.keys()) @staticmethod def check(density): for p in density.values(): _value_check((p >= 0, p <= 1), "Probability at a point must be between 0 and 1.") _value_check(Eq(sum(density.values()), 1), "Total Probability must be 1.") def FiniteRV(name, density): r""" Create a Finite Random Variable given a dict representing the density. Parameters ========== density: A dict Dictionary conatining the pdf of finite distribution Examples ======== >>> from sympy.stats import FiniteRV, P, E >>> density = {0: .1, 1: .2, 2: .3, 3: .4} >>> X = FiniteRV('X', density) >>> E(X) 2.00000000000000 >>> P(X >= 2) 0.700000000000000 Returns ======= RandomSymbol """ return rv(name, FiniteDistributionHandmade, density) class DiscreteUniformDistribution(SingleFiniteDistribution): @staticmethod def check(*args): # not using _value_check since there is a # suggestion for the user if len(set(args)) != len(args): from sympy.utilities.iterables import multiset from sympy.utilities.misc import filldedent weights = multiset(args) n = Integer(len(args)) for k in weights: weights[k] /= n raise ValueError(filldedent(""" Repeated args detected but set expected. For a distribution having different weights for each item use the following:""") + ( '\nS("FiniteRV(%s, %s)")' % ("'X'", weights))) @property def p(self): return Rational(1, len(self.args)) @property # type: ignore @cacheit def dict(self): return dict((k, self.p) for k in self.set) @property def set(self): return set(self.args) def pmf(self, x): if x in self.args: return self.p else: return S.Zero def _sample_random(self, size): x = Symbol('x') return ArrayComprehensionMap(lambda: self.args[random.randint(0, len(self.args)-1)], (x, 0, size)).doit() def DiscreteUniform(name, items): r""" Create a Finite Random Variable representing a uniform distribution over the input set. Parameters ========== items: list/tuple Items over which Uniform distribution is to be made Examples ======== >>> from sympy.stats import DiscreteUniform, density >>> from sympy import symbols >>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c >>> density(X).dict {a: 1/3, b: 1/3, c: 1/3} >>> Y = DiscreteUniform('Y', list(range(5))) # distribution over a range >>> density(Y).dict {0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5} Returns ======= RandomSymbol References ========== .. [1] https://en.wikipedia.org/wiki/Discrete_uniform_distribution .. [2] http://mathworld.wolfram.com/DiscreteUniformDistribution.html """ return rv(name, DiscreteUniformDistribution, *items) class DieDistribution(SingleFiniteDistribution): _argnames = ('sides',) @staticmethod def check(sides): _value_check((sides.is_positive, sides.is_integer), "number of sides must be a positive integer.") @property def is_symbolic(self): return not self.sides.is_number @property def high(self): return self.sides @property def low(self): return S.One @property def set(self): if self.is_symbolic: return Intersection(S.Naturals0, Interval(0, self.sides)) return set(map(Integer, list(range(1, self.sides + 1)))) def pmf(self, x): x = sympify(x) if not (x.is_number or x.is_Symbol or isinstance(x, RandomSymbol)): raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , " "'RandomSymbol' not %s" % (type(x))) cond = Ge(x, 1) & Le(x, self.sides) & Contains(x, S.Integers) return Piecewise((S.One/self.sides, cond), (S.Zero, True)) def Die(name, sides=6): r""" Create a Finite Random Variable representing a fair die. Parameters ========== sides: Integer Represents the number of sides of the Die, by default is 6 Examples ======== >>> from sympy.stats import Die, density >>> from sympy import Symbol >>> D6 = Die('D6', 6) # Six sided Die >>> density(D6).dict {1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6} >>> D4 = Die('D4', 4) # Four sided Die >>> density(D4).dict {1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4} >>> n = Symbol('n', positive=True, integer=True) >>> Dn = Die('Dn', n) # n sided Die >>> density(Dn).dict Density(DieDistribution(n)) >>> density(Dn).dict.subs(n, 4).doit() {1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4} Returns ======= RandomSymbol """ return rv(name, DieDistribution, sides) class BernoulliDistribution(SingleFiniteDistribution): _argnames = ('p', 'succ', 'fail') @staticmethod def check(p, succ, fail): _value_check((p >= 0, p <= 1), "p should be in range [0, 1].") @property def set(self): return set([self.succ, self.fail]) def pmf(self, x): return Piecewise((self.p, x == self.succ), (1 - self.p, x == self.fail), (S.Zero, True)) def Bernoulli(name, p, succ=1, fail=0): r""" Create a Finite Random Variable representing a Bernoulli process. Parameters ========== p : Rational number between 0 and 1 Represents probability of success succ : Integer/symbol/string Represents event of success fail : Integer/symbol/string Represents event of failure Examples ======== >>> from sympy.stats import Bernoulli, density >>> from sympy import S >>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4 >>> density(X).dict {0: 1/4, 1: 3/4} >>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss >>> density(X).dict {Heads: 1/2, Tails: 1/2} Returns ======= RandomSymbol References ========== .. [1] https://en.wikipedia.org/wiki/Bernoulli_distribution .. [2] http://mathworld.wolfram.com/BernoulliDistribution.html """ return rv(name, BernoulliDistribution, p, succ, fail) def Coin(name, p=S.Half): r""" Create a Finite Random Variable representing a Coin toss. Parameters ========== p : Rational Numeber between 0 and 1 Represents probability of getting "Heads", by default is Half Examples ======== >>> from sympy.stats import Coin, density >>> from sympy import Rational >>> C = Coin('C') # A fair coin toss >>> density(C).dict {H: 1/2, T: 1/2} >>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin >>> density(C2).dict {H: 3/5, T: 2/5} Returns ======= RandomSymbol See Also ======== sympy.stats.Binomial References ========== .. [1] https://en.wikipedia.org/wiki/Coin_flipping """ return rv(name, BernoulliDistribution, p, 'H', 'T') class BinomialDistribution(SingleFiniteDistribution): _argnames = ('n', 'p', 'succ', 'fail') @staticmethod def check(n, p, succ, fail): _value_check((n.is_integer, n.is_nonnegative), "'n' must be nonnegative integer.") _value_check((p <= 1, p >= 0), "p should be in range [0, 1].") @property def high(self): return self.n @property def low(self): return S.Zero @property def is_symbolic(self): return not self.n.is_number @property def set(self): if self.is_symbolic: return Intersection(S.Naturals0, Interval(0, self.n)) return set(self.dict.keys()) def pmf(self, x): n, p = self.n, self.p x = sympify(x) if not (x.is_number or x.is_Symbol or isinstance(x, RandomSymbol)): raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , " "'RandomSymbol' not %s" % (type(x))) cond = Ge(x, 0) & Le(x, n) & Contains(x, S.Integers) return Piecewise((binomial(n, x) * p**x * (1 - p)**(n - x), cond), (S.Zero, True)) @property # type: ignore @cacheit def dict(self): if self.is_symbolic: return Density(self) return dict((k*self.succ + (self.n-k)*self.fail, self.pmf(k)) for k in range(0, self.n + 1)) def Binomial(name, n, p, succ=1, fail=0): r""" Create a Finite Random Variable representing a binomial distribution. Parameters ========== n : Positive Integer Represents number of trials p : Rational Number between 0 and 1 Represents probability of success succ : Integer/symbol/string Represents event of success, by default is 1 fail : Integer/symbol/string Represents event of failure, by default is 0 Examples ======== >>> from sympy.stats import Binomial, density >>> from sympy import S, Symbol >>> X = Binomial('X', 4, S.Half) # Four "coin flips" >>> density(X).dict {0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16} >>> n = Symbol('n', positive=True, integer=True) >>> p = Symbol('p', positive=True) >>> X = Binomial('X', n, S.Half) # n "coin flips" >>> density(X).dict Density(BinomialDistribution(n, 1/2, 1, 0)) >>> density(X).dict.subs(n, 4).doit() {0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16} Returns ======= RandomSymbol References ========== .. [1] https://en.wikipedia.org/wiki/Binomial_distribution .. [2] http://mathworld.wolfram.com/BinomialDistribution.html """ return rv(name, BinomialDistribution, n, p, succ, fail) #------------------------------------------------------------------------------- # Beta-binomial distribution ---------------------------------------------------------- class BetaBinomialDistribution(SingleFiniteDistribution): _argnames = ('n', 'alpha', 'beta') @staticmethod def check(n, alpha, beta): _value_check((n.is_integer, n.is_nonnegative), "'n' must be nonnegative integer. n = %s." % str(n)) _value_check((alpha > 0), "'alpha' must be: alpha > 0 . alpha = %s" % str(alpha)) _value_check((beta > 0), "'beta' must be: beta > 0 . beta = %s" % str(beta)) @property def high(self): return self.n @property def low(self): return S.Zero @property def is_symbolic(self): return not self.n.is_number @property def set(self): if self.is_symbolic: return Intersection(S.Naturals0, Interval(0, self.n)) return set(map(Integer, list(range(0, self.n + 1)))) def pmf(self, k): n, a, b = self.n, self.alpha, self.beta return binomial(n, k) * beta_fn(k + a, n - k + b) / beta_fn(a, b) def _sample_pymc3(self, size): n, a, b = int(self.n), float(self.alpha), float(self.beta) with pymc3.Model(): pymc3.BetaBinomial('X', alpha=a, beta=b, n=n) return pymc3.sample(size, chains=1, progressbar=False)[:]['X'] def BetaBinomial(name, n, alpha, beta): r""" Create a Finite Random Variable representing a Beta-binomial distribution. Parameters ========== n : Positive Integer Represents number of trials alpha : Real positive number beta : Real positive number Examples ======== >>> from sympy.stats import BetaBinomial, density >>> X = BetaBinomial('X', 2, 1, 1) >>> density(X).dict {0: 1/3, 1: 2*beta(2, 2), 2: 1/3} Returns ======= RandomSymbol References ========== .. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution .. [2] http://mathworld.wolfram.com/BetaBinomialDistribution.html """ return rv(name, BetaBinomialDistribution, n, alpha, beta) class HypergeometricDistribution(SingleFiniteDistribution): _argnames = ('N', 'm', 'n') @staticmethod def check(n, N, m): _value_check((N.is_integer, N.is_nonnegative), "'N' must be nonnegative integer. N = %s." % str(n)) _value_check((n.is_integer, n.is_nonnegative), "'n' must be nonnegative integer. n = %s." % str(n)) _value_check((m.is_integer, m.is_nonnegative), "'m' must be nonnegative integer. m = %s." % str(n)) @property def is_symbolic(self): return any(not x.is_number for x in (self.N, self.m, self.n)) @property def high(self): return Piecewise((self.n, Lt(self.n, self.m) != False), (self.m, True)) @property def low(self): return Piecewise((0, Gt(0, self.n + self.m - self.N) != False), (self.n + self.m - self.N, True)) @property def set(self): N, m, n = self.N, self.m, self.n if self.is_symbolic: return Intersection(S.Naturals0, Interval(self.low, self.high)) return set([i for i in range(max(0, n + m - N), min(n, m) + 1)]) def pmf(self, k): N, m, n = self.N, self.m, self.n return S(binomial(m, k) * binomial(N - m, n - k))/binomial(N, n) def _sample_scipy(self, size): import scipy.stats # Make sure that stats is imported N, m, n = int(self.N), int(self.m), int(self.n) return scipy.stats.hypergeom.rvs(M=m, n=n, N=N, size=size) def Hypergeometric(name, N, m, n): r""" Create a Finite Random Variable representing a hypergeometric distribution. Parameters ========== N : Positive Integer Represents finite population of size N. m : Positive Integer Represents number of trials with required feature. n : Positive Integer Represents numbers of draws. Examples ======== >>> from sympy.stats import Hypergeometric, density >>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws >>> density(X).dict {0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12} Returns ======= RandomSymbol References ========== .. [1] https://en.wikipedia.org/wiki/Hypergeometric_distribution .. [2] http://mathworld.wolfram.com/HypergeometricDistribution.html """ return rv(name, HypergeometricDistribution, N, m, n) class RademacherDistribution(SingleFiniteDistribution): @property def set(self): return set([-1, 1]) @property def pmf(self): k = Dummy('k') return Lambda(k, Piecewise((S.Half, Or(Eq(k, -1), Eq(k, 1))), (S.Zero, True))) def Rademacher(name): r""" Create a Finite Random Variable representing a Rademacher distribution. Examples ======== >>> from sympy.stats import Rademacher, density >>> X = Rademacher('X') >>> density(X).dict {-1: 1/2, 1: 1/2} Returns ======= RandomSymbol See Also ======== sympy.stats.Bernoulli References ========== .. [1] https://en.wikipedia.org/wiki/Rademacher_distribution """ return rv(name, RademacherDistribution)
8ee7ef4f367ca5377a5cca102b02611aeb64ad7f139de5da29fb9ac8abe5d169
from __future__ import print_function, division from sympy import (Matrix, MatrixSymbol, S, Indexed, Basic, Set, And, Eq, FiniteSet, ImmutableMatrix, Lambda, Mul, Dummy, IndexedBase, Add, linsolve, eye, Or, Not, Intersection, Union, Expr, Function, exp, cacheit, Ge, Piecewise, Symbol) from sympy.core.relational import Relational from sympy.logic.boolalg import Boolean from sympy.stats.joint_rv import JointDistributionHandmade, JointDistribution from sympy.stats.rv import (RandomIndexedSymbol, random_symbols, RandomSymbol, _symbol_converter, _value_check, pspace, given, dependent) from sympy.stats.stochastic_process import StochasticPSpace from sympy.stats.symbolic_probability import Probability, Expectation from sympy.stats.frv_types import Bernoulli, BernoulliDistribution from sympy.core.sympify import _sympify __all__ = [ 'StochasticProcess', 'DiscreteTimeStochasticProcess', 'DiscreteMarkovChain', 'TransitionMatrixOf', 'StochasticStateSpaceOf', 'GeneratorMatrixOf', 'ContinuousMarkovChain', 'BernoulliProcess' ] def _set_converter(itr): """ Helper function for converting list/tuple/set to Set. If parameter is not an instance of list/tuple/set then no operation is performed. Returns ======= Set The argument converted to Set. Raises ====== TypeError If the argument is not an instance of list/tuple/set. """ if isinstance(itr, (list, tuple, set)): itr = FiniteSet(*itr) if not isinstance(itr, Set): raise TypeError("%s is not an instance of list/tuple/set."%(itr)) return itr def _sym_sympify(arg): """ Converts an arbitrary expression to a type that can be used inside SymPy. As generally strings are unwise to use in the expressions, it returns the Symbol of argument if the string type argument is passed. Parameters ========= arg: The parameter to be converted to be used in Sympy. Returns ======= The converted parameter. """ if isinstance(arg, str): return Symbol(arg) else: return _sympify(arg) def _matrix_checks(matrix): if not isinstance(matrix, (Matrix, MatrixSymbol, ImmutableMatrix)): raise TypeError("Transition probabilities either should " "be a Matrix or a MatrixSymbol.") if matrix.shape[0] != matrix.shape[1]: raise ValueError("%s is not a square matrix"%(matrix)) if isinstance(matrix, Matrix): matrix = ImmutableMatrix(matrix.tolist()) return matrix class StochasticProcess(Basic): """ Base class for all the stochastic processes whether discrete or continuous. Parameters ========== sym: Symbol or str state_space: Set The state space of the stochastic process, by default S.Reals. For discrete sets it is zero indexed. See Also ======== DiscreteTimeStochasticProcess """ index_set = S.Reals def __new__(cls, sym, state_space=S.Reals, **kwargs): sym = _symbol_converter(sym) state_space = _set_converter(state_space) return Basic.__new__(cls, sym, state_space) @property def symbol(self): return self.args[0] @property def state_space(self): return self.args[1] def __call__(self, time): """ Overridden in ContinuousTimeStochasticProcess. """ raise NotImplementedError("Use [] for indexing discrete time stochastic process.") def __getitem__(self, time): """ Overridden in DiscreteTimeStochasticProcess. """ raise NotImplementedError("Use () for indexing continuous time stochastic process.") def probability(self, condition): raise NotImplementedError() def joint_distribution(self, *args): """ Computes the joint distribution of the random indexed variables. Parameters ========== args: iterable The finite list of random indexed variables/the key of a stochastic process whose joint distribution has to be computed. Returns ======= JointDistribution The joint distribution of the list of random indexed variables. An unevaluated object is returned if it is not possible to compute the joint distribution. Raises ====== ValueError: When the arguments passed are not of type RandomIndexSymbol or Number. """ args = list(args) for i, arg in enumerate(args): if S(arg).is_Number: if self.index_set.is_subset(S.Integers): args[i] = self.__getitem__(arg) else: args[i] = self.__call__(arg) elif not isinstance(arg, RandomIndexedSymbol): raise ValueError("Expected a RandomIndexedSymbol or " "key not %s"%(type(arg))) if args[0].pspace.distribution == None: # checks if there is any distribution available return JointDistribution(*args) pdf = Lambda(tuple(args), expr=Mul.fromiter(arg.pspace.process.density(arg) for arg in args)) return JointDistributionHandmade(pdf) def expectation(self, condition, given_condition): raise NotImplementedError("Abstract method for expectation queries.") class DiscreteTimeStochasticProcess(StochasticProcess): """ Base class for all discrete stochastic processes. """ def __getitem__(self, time): """ For indexing discrete time stochastic processes. Returns ======= RandomIndexedSymbol """ if time not in self.index_set: raise IndexError("%s is not in the index set of %s"%(time, self.symbol)) idx_obj = Indexed(self.symbol, time) distribution = getattr(self, 'distribution', None) pspace_obj = StochasticPSpace(self.symbol, self, distribution) return RandomIndexedSymbol(idx_obj, pspace_obj) class ContinuousTimeStochasticProcess(StochasticProcess): """ Base class for all continuous time stochastic process. """ def __call__(self, time): """ For indexing continuous time stochastic processes. Returns ======= RandomIndexedSymbol """ if time not in self.index_set: raise IndexError("%s is not in the index set of %s"%(time, self.symbol)) func_obj = Function(self.symbol)(time) pspace_obj = StochasticPSpace(self.symbol, self) return RandomIndexedSymbol(func_obj, pspace_obj) class TransitionMatrixOf(Boolean): """ Assumes that the matrix is the transition matrix of the process. """ def __new__(cls, process, matrix): if not isinstance(process, DiscreteMarkovChain): raise ValueError("Currently only DiscreteMarkovChain " "support TransitionMatrixOf.") matrix = _matrix_checks(matrix) return Basic.__new__(cls, process, matrix) process = property(lambda self: self.args[0]) matrix = property(lambda self: self.args[1]) class GeneratorMatrixOf(TransitionMatrixOf): """ Assumes that the matrix is the generator matrix of the process. """ def __new__(cls, process, matrix): if not isinstance(process, ContinuousMarkovChain): raise ValueError("Currently only ContinuousMarkovChain " "support GeneratorMatrixOf.") matrix = _matrix_checks(matrix) return Basic.__new__(cls, process, matrix) class StochasticStateSpaceOf(Boolean): def __new__(cls, process, state_space): if not isinstance(process, (DiscreteMarkovChain, ContinuousMarkovChain)): raise ValueError("Currently only DiscreteMarkovChain and ContinuousMarkovChain " "support StochasticStateSpaceOf.") state_space = _set_converter(state_space) return Basic.__new__(cls, process, state_space) process = property(lambda self: self.args[0]) state_space = property(lambda self: self.args[1]) class MarkovProcess(StochasticProcess): """ Contains methods that handle queries common to Markov processes. """ def _extract_information(self, given_condition): """ Helper function to extract information, like, transition matrix/generator matrix, state space, etc. """ if isinstance(self, DiscreteMarkovChain): trans_probs = self.transition_probabilities elif isinstance(self, ContinuousMarkovChain): trans_probs = self.generator_matrix state_space = self.state_space if isinstance(given_condition, And): gcs = given_condition.args given_condition = S.true for gc in gcs: if isinstance(gc, TransitionMatrixOf): trans_probs = gc.matrix if isinstance(gc, StochasticStateSpaceOf): state_space = gc.state_space if isinstance(gc, Relational): given_condition = given_condition & gc if isinstance(given_condition, TransitionMatrixOf): trans_probs = given_condition.matrix given_condition = S.true if isinstance(given_condition, StochasticStateSpaceOf): state_space = given_condition.state_space given_condition = S.true return trans_probs, state_space, given_condition def _check_trans_probs(self, trans_probs, row_sum=1): """ Helper function for checking the validity of transition probabilities. """ if not isinstance(trans_probs, MatrixSymbol): rows = trans_probs.tolist() for row in rows: if (sum(row) - row_sum) != 0: raise ValueError("Values in a row must sum to %s. " "If you are using Float or floats then please use Rational."%(row_sum)) def _work_out_state_space(self, state_space, given_condition, trans_probs): """ Helper function to extract state space if there is a random symbol in the given condition. """ # if given condition is None, then there is no need to work out # state_space from random variables if given_condition != None: rand_var = list(given_condition.atoms(RandomSymbol) - given_condition.atoms(RandomIndexedSymbol)) if len(rand_var) == 1: state_space = rand_var[0].pspace.set if not FiniteSet(*[i for i in range(trans_probs.shape[0])]).is_subset(state_space): raise ValueError("state space is not compatible with the transition probabilites.") state_space = FiniteSet(*[i for i in range(trans_probs.shape[0])]) return state_space @cacheit def _preprocess(self, given_condition, evaluate): """ Helper function for pre-processing the information. """ is_insufficient = False if not evaluate: # avoid pre-processing if the result is not to be evaluated return (True, None, None, None) # extracting transition matrix and state space trans_probs, state_space, given_condition = self._extract_information(given_condition) # given_condition does not have sufficient information # for computations if trans_probs == None or \ given_condition == None: is_insufficient = True else: # checking transition probabilities if isinstance(self, DiscreteMarkovChain): self._check_trans_probs(trans_probs, row_sum=1) elif isinstance(self, ContinuousMarkovChain): self._check_trans_probs(trans_probs, row_sum=0) # working out state space state_space = self._work_out_state_space(state_space, given_condition, trans_probs) return is_insufficient, trans_probs, state_space, given_condition def probability(self, condition, given_condition=None, evaluate=True, **kwargs): """ Handles probability queries for Markov process. Parameters ========== condition: Relational given_condition: Relational/And Returns ======= Probability If the information is not sufficient. Expr In all other cases. Note ==== Any information passed at the time of query overrides any information passed at the time of object creation like transition probabilities, state space. Pass the transition matrix using TransitionMatrixOf, generator matrix using GeneratorMatrixOf and state space using StochasticStateSpaceOf in given_condition using & or And. """ check, mat, state_space, new_given_condition = \ self._preprocess(given_condition, evaluate) if check: return Probability(condition, new_given_condition) if isinstance(self, ContinuousMarkovChain): trans_probs = self.transition_probabilities(mat) elif isinstance(self, DiscreteMarkovChain): trans_probs = mat if isinstance(condition, Relational): rv, states = (list(condition.atoms(RandomIndexedSymbol))[0], condition.as_set()) if isinstance(new_given_condition, And): gcs = new_given_condition.args else: gcs = (new_given_condition, ) grvs = new_given_condition.atoms(RandomIndexedSymbol) min_key_rv = None for grv in grvs: if grv.key <= rv.key: min_key_rv = grv if min_key_rv == None: return Probability(condition) prob, gstate = dict(), None for gc in gcs: if gc.has(min_key_rv): if gc.has(Probability): p, gp = (gc.rhs, gc.lhs) if isinstance(gc.lhs, Probability) \ else (gc.lhs, gc.rhs) gr = gp.args[0] gset = Intersection(gr.as_set(), state_space) gstate = list(gset)[0] prob[gset] = p else: _, gstate = (gc.lhs.key, gc.rhs) if isinstance(gc.lhs, RandomIndexedSymbol) \ else (gc.rhs.key, gc.lhs) if any((k not in self.index_set) for k in (rv.key, min_key_rv.key)): raise IndexError("The timestamps of the process are not in it's index set.") states = Intersection(states, state_space) for state in Union(states, FiniteSet(gstate)): if Ge(state, mat.shape[0]) == True: raise IndexError("No information is available for (%s, %s) in " "transition probabilities of shape, (%s, %s). " "State space is zero indexed." %(gstate, state, mat.shape[0], mat.shape[1])) if prob: gstates = Union(*prob.keys()) if len(gstates) == 1: gstate = list(gstates)[0] gprob = list(prob.values())[0] prob[gstates] = gprob elif len(gstates) == len(state_space) - 1: gstate = list(state_space - gstates)[0] gprob = S.One - sum(prob.values()) prob[state_space - gstates] = gprob else: raise ValueError("Conflicting information.") else: gprob = S.One if min_key_rv == rv: return sum([prob[FiniteSet(state)] for state in states]) if isinstance(self, ContinuousMarkovChain): return gprob * sum([trans_probs(rv.key - min_key_rv.key).__getitem__((gstate, state)) for state in states]) if isinstance(self, DiscreteMarkovChain): return gprob * sum([(trans_probs**(rv.key - min_key_rv.key)).__getitem__((gstate, state)) for state in states]) if isinstance(condition, Not): expr = condition.args[0] return S.One - self.probability(expr, given_condition, evaluate, **kwargs) if isinstance(condition, And): compute_later, state2cond, conds = [], dict(), condition.args for expr in conds: if isinstance(expr, Relational): ris = list(expr.atoms(RandomIndexedSymbol))[0] if state2cond.get(ris, None) is None: state2cond[ris] = S.true state2cond[ris] &= expr else: compute_later.append(expr) ris = [] for ri in state2cond: ris.append(ri) cset = Intersection(state2cond[ri].as_set(), state_space) if len(cset) == 0: return S.Zero state2cond[ri] = cset.as_relational(ri) sorted_ris = sorted(ris, key=lambda ri: ri.key) prod = self.probability(state2cond[sorted_ris[0]], given_condition, evaluate, **kwargs) for i in range(1, len(sorted_ris)): ri, prev_ri = sorted_ris[i], sorted_ris[i-1] if not isinstance(state2cond[ri], Eq): raise ValueError("The process is in multiple states at %s, unable to determine the probability."%(ri)) mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat) prod *= self.probability(state2cond[ri], state2cond[prev_ri] & mat_of & StochasticStateSpaceOf(self, state_space), evaluate, **kwargs) for expr in compute_later: prod *= self.probability(expr, given_condition, evaluate, **kwargs) return prod if isinstance(condition, Or): return sum([self.probability(expr, given_condition, evaluate, **kwargs) for expr in condition.args]) raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been " "implemented yet."%(expr, condition)) def expectation(self, expr, condition=None, evaluate=True, **kwargs): """ Handles expectation queries for markov process. Parameters ========== expr: RandomIndexedSymbol, Relational, Logic Condition for which expectation has to be computed. Must contain a RandomIndexedSymbol of the process. condition: Relational, Logic The given conditions under which computations should be done. Returns ======= Expectation Unevaluated object if computations cannot be done due to insufficient information. Expr In all other cases when the computations are successful. Note ==== Any information passed at the time of query overrides any information passed at the time of object creation like transition probabilities, state space. Pass the transition matrix using TransitionMatrixOf, generator matrix using GeneratorMatrixOf and state space using StochasticStateSpaceOf in given_condition using & or And. """ check, mat, state_space, condition = \ self._preprocess(condition, evaluate) if check: return Expectation(expr, condition) rvs = random_symbols(expr) if isinstance(expr, Expr) and isinstance(condition, Eq) \ and len(rvs) == 1: # handle queries similar to E(f(X[i]), Eq(X[i-m], <some-state>)) rv = list(rvs)[0] lhsg, rhsg = condition.lhs, condition.rhs if not isinstance(lhsg, RandomIndexedSymbol): lhsg, rhsg = (rhsg, lhsg) if rhsg not in self.state_space: raise ValueError("%s state is not in the state space."%(rhsg)) if rv.key < lhsg.key: raise ValueError("Incorrect given condition is given, expectation " "time %s < time %s"%(rv.key, rv.key)) mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat) cond = condition & mat_of & \ StochasticStateSpaceOf(self, state_space) func = lambda s: self.probability(Eq(rv, s), cond)*expr.subs(rv, s) return sum([func(s) for s in state_space]) raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been " "implemented yet."%(expr, condition)) class DiscreteMarkovChain(DiscreteTimeStochasticProcess, MarkovProcess): """ Represents discrete time Markov chain. Parameters ========== sym: Symbol/str state_space: Set Optional, by default, S.Reals trans_probs: Matrix/ImmutableMatrix/MatrixSymbol Optional, by default, None Examples ======== >>> from sympy.stats import DiscreteMarkovChain, TransitionMatrixOf >>> from sympy import Matrix, MatrixSymbol, Eq >>> from sympy.stats import P >>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]]) >>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T) >>> YS = DiscreteMarkovChain("Y") >>> Y.state_space FiniteSet(0, 1, 2) >>> Y.transition_probabilities Matrix([ [0.5, 0.2, 0.3], [0.2, 0.5, 0.3], [0.2, 0.3, 0.5]]) >>> TS = MatrixSymbol('T', 3, 3) >>> P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TS)) T[0, 2]*T[1, 0] + T[1, 1]*T[1, 2] + T[1, 2]*T[2, 2] >>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2) 0.36 References ========== .. [1] https://en.wikipedia.org/wiki/Markov_chain#Discrete-time_Markov_chain .. [2] https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf """ index_set = S.Naturals0 def __new__(cls, sym, state_space=S.Reals, trans_probs=None): sym = _symbol_converter(sym) state_space = _set_converter(state_space) if trans_probs != None: trans_probs = _matrix_checks(trans_probs) return Basic.__new__(cls, sym, state_space, trans_probs) @property def transition_probabilities(self): """ Transition probabilities of discrete Markov chain, either an instance of Matrix or MatrixSymbol. """ return self.args[2] def _transient2transient(self): """ Computes the one step probabilities of transient states to transient states. Used in finding fundamental matrix, absorbing probabilties. """ trans_probs = self.transition_probabilities if not isinstance(trans_probs, ImmutableMatrix): return None m = trans_probs.shape[0] trans_states = [i for i in range(m) if trans_probs[i, i] != 1] t2t = [[trans_probs[si, sj] for sj in trans_states] for si in trans_states] return ImmutableMatrix(t2t) def _transient2absorbing(self): """ Computes the one step probabilities of transient states to absorbing states. Used in finding fundamental matrix, absorbing probabilties. """ trans_probs = self.transition_probabilities if not isinstance(trans_probs, ImmutableMatrix): return None m, trans_states, absorb_states = \ trans_probs.shape[0], [], [] for i in range(m): if trans_probs[i, i] == 1: absorb_states.append(i) else: trans_states.append(i) if not absorb_states or not trans_states: return None t2a = [[trans_probs[si, sj] for sj in absorb_states] for si in trans_states] return ImmutableMatrix(t2a) def fundamental_matrix(self): Q = self._transient2transient() if Q == None: return None I = eye(Q.shape[0]) if (I - Q).det() == 0: raise ValueError("Fundamental matrix doesn't exists.") return ImmutableMatrix((I - Q).inv().tolist()) def absorbing_probabilites(self): """ Computes the absorbing probabilities, i.e., the ij-th entry of the matrix denotes the probability of Markov chain being absorbed in state j starting from state i. """ R = self._transient2absorbing() N = self.fundamental_matrix() if R == None or N == None: return None return N*R def is_regular(self): w = self.fixed_row_vector() if w is None or isinstance(w, (Lambda)): return None return all((wi > 0) == True for wi in w.row(0)) def is_absorbing_state(self, state): trans_probs = self.transition_probabilities if isinstance(trans_probs, ImmutableMatrix) and \ state < trans_probs.shape[0]: return S(trans_probs[state, state]) is S.One def is_absorbing_chain(self): trans_probs = self.transition_probabilities return any(self.is_absorbing_state(state) == True for state in range(trans_probs.shape[0])) def fixed_row_vector(self): trans_probs = self.transition_probabilities if trans_probs == None: return None if isinstance(trans_probs, MatrixSymbol): wm = MatrixSymbol('wm', 1, trans_probs.shape[0]) return Lambda((wm, trans_probs), Eq(wm*trans_probs, wm)) w = IndexedBase('w') wi = [w[i] for i in range(trans_probs.shape[0])] wm = Matrix([wi]) eqs = (wm*trans_probs - wm).tolist()[0] eqs.append(sum(wi) - 1) soln = list(linsolve(eqs, wi))[0] return ImmutableMatrix([[sol for sol in soln]]) @property def limiting_distribution(self): """ The fixed row vector is the limiting distribution of a discrete Markov chain. """ return self.fixed_row_vector() class ContinuousMarkovChain(ContinuousTimeStochasticProcess, MarkovProcess): """ Represents continuous time Markov chain. Parameters ========== sym: Symbol/str state_space: Set Optional, by default, S.Reals gen_mat: Matrix/ImmutableMatrix/MatrixSymbol Optional, by default, None Examples ======== >>> from sympy.stats import ContinuousMarkovChain >>> from sympy import Matrix, S, MatrixSymbol >>> G = Matrix([[-S(1), S(1)], [S(1), -S(1)]]) >>> C = ContinuousMarkovChain('C', state_space=[0, 1], gen_mat=G) >>> C.limiting_distribution() Matrix([[1/2, 1/2]]) References ========== .. [1] https://en.wikipedia.org/wiki/Markov_chain#Continuous-time_Markov_chain .. [2] http://u.math.biu.ac.il/~amirgi/CTMCnotes.pdf """ index_set = S.Reals def __new__(cls, sym, state_space=S.Reals, gen_mat=None): sym = _symbol_converter(sym) state_space = _set_converter(state_space) if gen_mat != None: gen_mat = _matrix_checks(gen_mat) return Basic.__new__(cls, sym, state_space, gen_mat) @property def generator_matrix(self): return self.args[2] @cacheit def transition_probabilities(self, gen_mat=None): t = Dummy('t') if isinstance(gen_mat, (Matrix, ImmutableMatrix)) and \ gen_mat.is_diagonalizable(): # for faster computation use diagonalized generator matrix Q, D = gen_mat.diagonalize() return Lambda(t, Q*exp(t*D)*Q.inv()) if gen_mat != None: return Lambda(t, exp(t*gen_mat)) def limiting_distribution(self): gen_mat = self.generator_matrix if gen_mat == None: return None if isinstance(gen_mat, MatrixSymbol): wm = MatrixSymbol('wm', 1, gen_mat.shape[0]) return Lambda((wm, gen_mat), Eq(wm*gen_mat, wm)) w = IndexedBase('w') wi = [w[i] for i in range(gen_mat.shape[0])] wm = Matrix([wi]) eqs = (wm*gen_mat).tolist()[0] eqs.append(sum(wi) - 1) soln = list(linsolve(eqs, wi))[0] return ImmutableMatrix([[sol for sol in soln]]) class BernoulliProcess(DiscreteTimeStochasticProcess): """ The Bernoulli process consists of repeated independent Bernoulli process trials with the same parameter `p`. It's assumed that the probability `p` applies to every trial and that the outcomes of each trial are independent of all the rest. Therefore Bernoulli Processs is Discrete State and Discrete Time Stochastic Process. Parameters ========== sym: Symbol/str success: Integer/str The event which is considered to be success, by default is 1. failure: Integer/str The event which is considered to be failure, by default is 0. p: Real Number between 0 and 1 Represents the probability of getting success. Examples ======== >>> from sympy.stats import BernoulliProcess, P, E >>> from sympy import Eq, Gt, Lt >>> B = BernoulliProcess("B", p=0.7, success=1, failure=0) >>> B.state_space FiniteSet(0, 1) >>> (B.p).round(2) 0.70 >>> B.success 1 >>> B.failure 0 >>> X = B[1] + B[2] + B[3] >>> P(Eq(X, 0)).round(2) 0.03 >>> P(Eq(X, 2)).round(2) 0.44 >>> P(Eq(X, 4)).round(2) 0 >>> P(Gt(X, 1)).round(2) 0.78 >>> P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2) 0.04 >>> B.joint_distribution(B[1], B[2]) JointDistributionHandmade(Lambda((B[1], B[2]), Piecewise((0.7, Eq(B[1], 1)), (0.3, Eq(B[1], 0)), (0, True))*Piecewise((0.7, Eq(B[2], 1)), (0.3, Eq(B[2], 0)), (0, True)))) >>> E(2*B[1] + B[2]).round(2) 2.10 >>> P(B[1] < 1).round(2) 0.30 References ========== .. [1] https://en.wikipedia.org/wiki/Bernoulli_process .. [2] https://mathcs.clarku.edu/~djoyce/ma217/bernoulli.pdf """ index_set = S.Naturals0 def __new__(cls, sym, p, success=1, failure=0): _value_check(p >= 0 and p <= 1, 'Value of p must be between 0 and 1.') sym = _symbol_converter(sym) p = _sympify(p) success = _sym_sympify(success) failure = _sym_sympify(failure) return Basic.__new__(cls, sym, p, success, failure) @property def symbol(self): return self.args[0] @property def p(self): return self.args[1] @property def success(self): return self.args[2] @property def failure(self): return self.args[3] @property def state_space(self): return _set_converter([self.success, self.failure]) @property def distribution(self): return BernoulliDistribution(self.p) def _rvindexed_subs(self, expr, condition=None): """ Substitutes the RandomIndexedSymbol with the RandomSymbol with same name, distribution and probability as RandomIndexedSymbol. """ rvs_expr = random_symbols(expr) if len(rvs_expr) != 0: swapdict_expr = {} for rv in rvs_expr: if isinstance(rv, RandomIndexedSymbol): newrv = Bernoulli(rv.name, p=rv.pspace.process.p, succ=self.success, fail=self.failure) swapdict_expr[rv] = newrv expr = expr.subs(swapdict_expr) rvs_cond = random_symbols(condition) if len(rvs_cond)!=0: swapdict_cond = {} if condition is not None: for rv in rvs_cond: if isinstance(rv, RandomIndexedSymbol): newrv = Bernoulli(rv.name, p=rv.pspace.process.p, succ=self.success, fail=self.failure) swapdict_cond[rv] = newrv condition = condition.subs(swapdict_cond) return expr, condition def expectation(self, expr, condition=None, evaluate=True, **kwargs): """ Computes expectation. Parameters ========== expr: RandomIndexedSymbol, Relational, Logic Condition for which expectation has to be computed. Must contain a RandomIndexedSymbol of the process. condition: Relational, Logic The given conditions under which computations should be done. Returns ======= Expectation of the RandomIndexedSymbol. """ new_expr, new_condition = self._rvindexed_subs(expr, condition) new_pspace = pspace(new_expr) if new_condition is not None: new_expr = given(new_expr, new_condition) if new_expr.is_Add: # As E is Linear return Add(*[new_pspace.compute_expectation( expr=arg, evaluate=evaluate, **kwargs) for arg in new_expr.args]) return new_pspace.compute_expectation( new_expr, evaluate=evaluate, **kwargs) def probability(self, condition, given_condition=None, evaluate=True, **kwargs): """ Computes probability. Parameters ========== condition: Relational Condition for which probability has to be computed. Must contain a RandomIndexedSymbol of the process. given_condition: Relational/And The given conditions under which computations should be done. Returns ======= Probability of the condition. """ new_condition, new_givencondition = self._rvindexed_subs(condition, given_condition) if isinstance(new_givencondition, RandomSymbol): condrv = random_symbols(new_condition) if len(condrv) == 1 and condrv[0] == new_givencondition: return BernoulliDistribution(self.probability(new_condition), 0, 1) if any([dependent(rv, new_givencondition) for rv in condrv]): return Probability(new_condition, new_givencondition) else: return self.probability(new_condition) if new_givencondition is not None and \ not isinstance(new_givencondition, (Relational, Boolean)): raise ValueError("%s is not a relational or combination of relationals" % (new_givencondition)) if new_givencondition == False: return S.Zero if new_condition == True: return S.One if new_condition == False: return S.Zero if not isinstance(new_condition, (Relational, Boolean)): raise ValueError("%s is not a relational or combination of relationals" % (new_condition)) if new_givencondition is not None: # If there is a condition # Recompute on new conditional expr return self.probability(given(new_condition, new_givencondition, **kwargs), **kwargs) return pspace(new_condition).probability(new_condition, **kwargs) def density(self, x): return Piecewise((self.p, Eq(x, self.success)), (1 - self.p, Eq(x, self.failure)), (S.Zero, True))
32ec595aa88e5e65f086e9cb251e1691416fe8f96b603cf559506bceaeca7171
""" SymPy statistics module Introduces a random variable type into the SymPy language. Random variables may be declared using prebuilt functions such as Normal, Exponential, Coin, Die, etc... or built with functions like FiniteRV. Queries on random expressions can be made using the functions ========================= ============================= Expression Meaning ------------------------- ----------------------------- ``P(condition)`` Probability ``E(expression)`` Expected value ``H(expression)`` Entropy ``variance(expression)`` Variance ``density(expression)`` Probability Density Function ``sample(expression)`` Produce a realization ``where(condition)`` Where the condition is true ========================= ============================= Examples ======== >>> from sympy.stats import P, E, variance, Die, Normal >>> from sympy import Eq, simplify >>> X, Y = Die('X', 6), Die('Y', 6) # Define two six sided dice >>> Z = Normal('Z', 0, 1) # Declare a Normal random variable with mean 0, std 1 >>> P(X>3) # Probability X is greater than 3 1/2 >>> E(X+Y) # Expectation of the sum of two dice 7 >>> variance(X+Y) # Variance of the sum of two dice 35/6 >>> simplify(P(Z>1)) # Probability of Z being greater than 1 1/2 - erf(sqrt(2)/2)/2 """ __all__ = [ 'P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf','median', 'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std', 'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'independent', 'random_symbols', 'correlation', 'factorial_moment', 'moment', 'cmoment', 'sampling_density', 'moment_generating_function', 'smoment', 'quantile', 'FiniteRV', 'DiscreteUniform', 'Die', 'Bernoulli', 'Coin', 'Binomial', 'BetaBinomial', 'Hypergeometric', 'Rademacher', 'ContinuousRV', 'Arcsin', 'Benini', 'Beta', 'BetaNoncentral', 'BetaPrime', 'Cauchy', 'Chi', 'ChiNoncentral', 'ChiSquared', 'Dagum', 'Erlang', 'ExGaussian', 'Exponential', 'ExponentialPower', 'FDistribution', 'FisherZ', 'Frechet', 'Gamma', 'GammaInverse', 'Gompertz', 'Gumbel', 'Kumaraswamy', 'Laplace', 'Levy', 'Logistic', 'LogLogistic', 'LogNormal', 'Moyal', 'Maxwell', 'Nakagami', 'Normal', 'GaussianInverse', 'Pareto', 'PowerFunction', 'QuadraticU', 'RaisedCosine', 'Rayleigh','Reciprocal', 'StudentT', 'ShiftedGompertz', 'Trapezoidal', 'Triangular', 'Uniform', 'UniformSum', 'VonMises', 'Wald', 'Weibull', 'WignerSemicircle', 'Geometric','Hermite', 'Logarithmic', 'NegativeBinomial', 'Poisson', 'Skellam', 'YuleSimon', 'Zeta', 'JointRV', 'Dirichlet', 'GeneralizedMultivariateLogGamma', 'GeneralizedMultivariateLogGammaOmega', 'Multinomial', 'MultivariateBeta', 'MultivariateEwens', 'MultivariateT', 'NegativeMultinomial', 'NormalGamma', 'StochasticProcess', 'DiscreteTimeStochasticProcess', 'DiscreteMarkovChain', 'TransitionMatrixOf', 'StochasticStateSpaceOf', 'GeneratorMatrixOf', 'ContinuousMarkovChain', 'BernoulliProcess', 'CircularEnsemble', 'CircularUnitaryEnsemble', 'CircularOrthogonalEnsemble', 'CircularSymplecticEnsemble', 'GaussianEnsemble', 'GaussianUnitaryEnsemble', 'GaussianOrthogonalEnsemble', 'GaussianSymplecticEnsemble', 'joint_eigen_distribution', 'JointEigenDistribution', 'level_spacing_distribution', 'Probability', 'Expectation', 'Variance', 'Covariance', ] from .rv_interface import (P, E, H, density, where, given, sample, cdf, median, characteristic_function, pspace, sample_iter, variance, std, skewness, kurtosis, covariance, dependent, entropy, independent, random_symbols, correlation, factorial_moment, moment, cmoment, sampling_density, moment_generating_function, smoment, quantile) from .frv_types import (FiniteRV, DiscreteUniform, Die, Bernoulli, Coin, Binomial, BetaBinomial, Hypergeometric, Rademacher) from .crv_types import (ContinuousRV, Arcsin, Benini, Beta, BetaNoncentral, BetaPrime, Cauchy, Chi, ChiNoncentral, ChiSquared, Dagum, Erlang, ExGaussian, Exponential, ExponentialPower, FDistribution, FisherZ, Frechet, Gamma, GammaInverse, Gompertz, Gumbel, Kumaraswamy, Laplace, Levy, Logistic, LogLogistic, LogNormal, Maxwell, Moyal, Nakagami, Normal, GaussianInverse, Pareto, QuadraticU, RaisedCosine, Rayleigh, Reciprocal, StudentT, PowerFunction, ShiftedGompertz, Trapezoidal, Triangular, Uniform, UniformSum, VonMises, Wald, Weibull, WignerSemicircle) from .drv_types import (Geometric, Hermite, Logarithmic, NegativeBinomial, Poisson, Skellam, YuleSimon, Zeta) from .joint_rv_types import (JointRV, Dirichlet, GeneralizedMultivariateLogGamma, GeneralizedMultivariateLogGammaOmega, Multinomial, MultivariateBeta, MultivariateEwens, MultivariateT, NegativeMultinomial, NormalGamma) from .stochastic_process_types import (StochasticProcess, DiscreteTimeStochasticProcess, DiscreteMarkovChain, TransitionMatrixOf, StochasticStateSpaceOf, GeneratorMatrixOf, ContinuousMarkovChain, BernoulliProcess) from .random_matrix_models import (CircularEnsemble, CircularUnitaryEnsemble, CircularOrthogonalEnsemble, CircularSymplecticEnsemble, GaussianEnsemble, GaussianUnitaryEnsemble, GaussianOrthogonalEnsemble, GaussianSymplecticEnsemble, joint_eigen_distribution, JointEigenDistribution, level_spacing_distribution) from .symbolic_probability import (Probability, Expectation, Variance, Covariance)
3acb3e98bc993e8265c22bccc8a631f75960c805a043f73723875fddc74bc31e
from sympy import (sympify, S, pi, sqrt, exp, Lambda, Indexed, besselk, gamma, Interval, Range, factorial, Mul, Integer, Add, rf, Eq, Piecewise, ones, Symbol, Pow, Rational, Sum, Intersection, Matrix, symbols, Product, IndexedBase) from sympy.matrices import ImmutableMatrix, MatrixSymbol from sympy.matrices.expressions.determinant import det from sympy.stats.joint_rv import (JointDistribution, JointPSpace, JointDistributionHandmade, MarginalDistribution) from sympy.stats.rv import _value_check, random_symbols __all__ = ['JointRV', 'Dirichlet', 'GeneralizedMultivariateLogGamma', 'GeneralizedMultivariateLogGammaOmega', 'Multinomial', 'MultivariateBeta', 'MultivariateEwens', 'MultivariateT', 'NegativeMultinomial', 'NormalGamma' ] def multivariate_rv(cls, sym, *args): args = list(map(sympify, args)) dist = cls(*args) args = dist.args dist.check(*args) return JointPSpace(sym, dist).value def JointRV(symbol, pdf, _set=None): """ Create a Joint Random Variable where each of its component is conitinuous, given the following: -- a symbol -- a PDF in terms of indexed symbols of the symbol given as the first argument NOTE: As of now, the set for each component for a `JointRV` is equal to the set of all integers, which can not be changed. Examples ======== >>> from sympy import symbols, exp, pi, Indexed, S >>> from sympy.stats import density >>> from sympy.stats.joint_rv_types import JointRV >>> x1, x2 = (Indexed('x', i) for i in (1, 2)) >>> pdf = exp(-x1**2/2 + x1 - x2**2/2 - S(1)/2)/(2*pi) >>> N1 = JointRV('x', pdf) #Multivariate Normal distribution >>> density(N1)(1, 2) exp(-2)/(2*pi) Returns ======= A RandomSymbol. """ #TODO: Add support for sets provided by the user symbol = sympify(symbol) syms = list(i for i in pdf.free_symbols if isinstance(i, Indexed) and i.base == IndexedBase(symbol)) syms = tuple(sorted(syms, key = lambda index: index.args[1])) _set = S.Reals**len(syms) pdf = Lambda(syms, pdf) dist = JointDistributionHandmade(pdf, _set) jrv = JointPSpace(symbol, dist).value rvs = random_symbols(pdf) if len(rvs) != 0: dist = MarginalDistribution(dist, (jrv,)) return JointPSpace(symbol, dist).value return jrv #------------------------------------------------------------------------------- # Multivariate Normal distribution --------------------------------------------------------- class MultivariateNormalDistribution(JointDistribution): _argnames = ('mu', 'sigma') is_Continuous=True @property def set(self): k = self.mu.shape[0] return S.Reals**k @staticmethod def check(mu, sigma): _value_check(mu.shape[0] == sigma.shape[0], "Size of the mean vector and covariance matrix are incorrect.") #check if covariance matrix is positive definite or not. if not isinstance(sigma, MatrixSymbol): _value_check(sigma.is_positive_definite, "The covariance matrix must be positive definite. ") def pdf(self, *args): mu, sigma = self.mu, self.sigma k = mu.shape[0] args = ImmutableMatrix(args) x = args - mu return S.One/sqrt((2*pi)**(k)*det(sigma))*exp( Rational(-1, 2)*x.transpose()*(sigma.inv()*\ x))[0] def marginal_distribution(self, indices, sym): sym = ImmutableMatrix([Indexed(sym, i) for i in indices]) _mu, _sigma = self.mu, self.sigma k = self.mu.shape[0] for i in range(k): if i not in indices: _mu = _mu.row_del(i) _sigma = _sigma.col_del(i) _sigma = _sigma.row_del(i) return Lambda(tuple(sym), S.One/sqrt((2*pi)**(len(_mu))*det(_sigma))*exp( Rational(-1, 2)*(_mu - sym).transpose()*(_sigma.inv()*\ (_mu - sym)))[0]) #------------------------------------------------------------------------------- # Multivariate Laplace distribution --------------------------------------------------------- class MultivariateLaplaceDistribution(JointDistribution): _argnames = ('mu', 'sigma') is_Continuous=True @property def set(self): k = self.mu.shape[0] return S.Reals**k @staticmethod def check(mu, sigma): _value_check(mu.shape[0] == sigma.shape[0], "Size of the mean vector and covariance matrix are incorrect.") # check if covariance matrix is positive definite or not. if not isinstance(sigma, MatrixSymbol): _value_check(sigma.is_positive_definite, "The covariance matrix must be positive definite. ") def pdf(self, *args): mu, sigma = self.mu, self.sigma mu_T = mu.transpose() k = S(mu.shape[0]) sigma_inv = sigma.inv() args = ImmutableMatrix(args) args_T = args.transpose() x = (mu_T*sigma_inv*mu)[0] y = (args_T*sigma_inv*args)[0] v = 1 - k/2 return S(2)/((2*pi)**(S(k)/2)*sqrt(det(sigma)))\ *(y/(2 + x))**(S(v)/2)*besselk(v, sqrt((2 + x)*(y)))\ *exp((args_T*sigma_inv*mu)[0]) #------------------------------------------------------------------------------- # Multivariate StudentT distribution --------------------------------------------------------- class MultivariateTDistribution(JointDistribution): _argnames = ('mu', 'shape_mat', 'dof') is_Continuous=True @property def set(self): k = self.mu.shape[0] return S.Reals**k @staticmethod def check(mu, sigma, v): _value_check(mu.shape[0] == sigma.shape[0], "Size of the location vector and shape matrix are incorrect.") # check if covariance matrix is positive definite or not. if not isinstance(sigma, MatrixSymbol): _value_check(sigma.is_positive_definite, "The shape matrix must be positive definite. ") def pdf(self, *args): mu, sigma = self.mu, self.shape_mat v = S(self.dof) k = S(mu.shape[0]) sigma_inv = sigma.inv() args = ImmutableMatrix(args) x = args - mu return gamma((k + v)/2)/(gamma(v/2)*(v*pi)**(k/2)*sqrt(det(sigma)))\ *(1 + 1/v*(x.transpose()*sigma_inv*x)[0])**((-v - k)/2) def MultivariateT(syms, mu, sigma, v): """ Creates a joint random variable with multivariate T-distribution. Parameters ========== syms: A symbol/str For identifying the random variable. mu: A list/matrix Representing the location vector sigma: The shape matrix for the distribution Examples ======== >>> from sympy.stats import density, MultivariateT >>> from sympy import Symbol >>> x = Symbol("x") >>> X = MultivariateT("x", [1, 1], [[1, 0], [0, 1]], 2) >>> density(X)(1, 2) 2/(9*pi) Returns ======= A random symbol """ return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v) #------------------------------------------------------------------------------- # Multivariate Normal Gamma distribution --------------------------------------------------------- class NormalGammaDistribution(JointDistribution): _argnames = ('mu', 'lamda', 'alpha', 'beta') is_Continuous=True @staticmethod def check(mu, lamda, alpha, beta): _value_check(mu.is_real, "Location must be real.") _value_check(lamda > 0, "Lambda must be positive") _value_check(alpha > 0, "alpha must be positive") _value_check(beta > 0, "beta must be positive") @property def set(self): return S.Reals*Interval(0, S.Infinity) def pdf(self, x, tau): beta, alpha, lamda = self.beta, self.alpha, self.lamda mu = self.mu return beta**alpha*sqrt(lamda)/(gamma(alpha)*sqrt(2*pi))*\ tau**(alpha - S.Half)*exp(-1*beta*tau)*\ exp(-1*(lamda*tau*(x - mu)**2)/S(2)) def marginal_distribution(self, indices, *sym): if len(indices) == 2: return self.pdf(*sym) if indices[0] == 0: #For marginal over `x`, return non-standardized Student-T's #distribution x = sym[0] v, mu, sigma = self.alpha - S.Half, self.mu, \ S(self.beta)/(self.lamda * self.alpha) return Lambda(sym, gamma((v + 1)/2)/(gamma(v/2)*sqrt(pi*v)*sigma)*\ (1 + 1/v*((x - mu)/sigma)**2)**((-v -1)/2)) #For marginal over `tau`, return Gamma distribution as per construction from sympy.stats.crv_types import GammaDistribution return Lambda(sym, GammaDistribution(self.alpha, self.beta)(sym[0])) def NormalGamma(sym, mu, lamda, alpha, beta): """ Creates a bivariate joint random variable with multivariate Normal gamma distribution. Parameters ========== sym: A symbol/str For identifying the random variable. mu: A real number The mean of the normal distribution alpha: a positive integer beta: a positive integer lamda: a positive integer Examples ======== >>> from sympy.stats import density, NormalGamma >>> from sympy import symbols >>> X = NormalGamma('x', 0, 1, 2, 3) >>> y, z = symbols('y z') >>> density(X)(y, z) 9*sqrt(2)*z**(3/2)*exp(-3*z)*exp(-y**2*z/2)/(2*sqrt(pi)) Returns ======= A random symbol """ return multivariate_rv(NormalGammaDistribution, sym, mu, lamda, alpha, beta) #------------------------------------------------------------------------------- # Multivariate Beta/Dirichlet distribution --------------------------------------------------------- class MultivariateBetaDistribution(JointDistribution): _argnames = ('alpha',) is_Continuous = True @staticmethod def check(alpha): _value_check(len(alpha) >= 2, "At least two categories should be passed.") for a_k in alpha: _value_check((a_k > 0) != False, "Each concentration parameter" " should be positive.") @property def set(self): k = len(self.alpha) return Interval(0, 1)**k def pdf(self, *syms): alpha = self.alpha B = Mul.fromiter(map(gamma, alpha))/gamma(Add(*alpha)) return Mul.fromiter([sym**(a_k - 1) for a_k, sym in zip(alpha, syms)])/B def MultivariateBeta(syms, *alpha): """ Creates a continuous random variable with Dirichlet/Multivariate Beta Distribution. The density of the dirichlet distribution can be found at [1]. Parameters ========== alpha: positive real numbers signifying concentration numbers. Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import density >>> from sympy.stats.joint_rv import marginal_distribution >>> from sympy.stats.joint_rv_types import MultivariateBeta >>> from sympy import Symbol >>> a1 = Symbol('a1', positive=True) >>> a2 = Symbol('a2', positive=True) >>> B = MultivariateBeta('B', [a1, a2]) >>> C = MultivariateBeta('C', a1, a2) >>> x = Symbol('x') >>> y = Symbol('y') >>> density(B)(x, y) x**(a1 - 1)*y**(a2 - 1)*gamma(a1 + a2)/(gamma(a1)*gamma(a2)) >>> marginal_distribution(C, C[0])(x) x**(a1 - 1)*gamma(a1 + a2)/(a2*gamma(a1)*gamma(a2)) References ========== .. [1] https://en.wikipedia.org/wiki/Dirichlet_distribution .. [2] http://mathworld.wolfram.com/DirichletDistribution.html """ if not isinstance(alpha[0], list): alpha = (list(alpha),) return multivariate_rv(MultivariateBetaDistribution, syms, alpha[0]) Dirichlet = MultivariateBeta #------------------------------------------------------------------------------- # Multivariate Ewens distribution --------------------------------------------------------- class MultivariateEwensDistribution(JointDistribution): _argnames = ('n', 'theta') is_Discrete = True is_Continuous = False @staticmethod def check(n, theta): _value_check((n > 0), "sample size should be positive integer.") _value_check(theta.is_positive, "mutation rate should be positive.") @property def set(self): if not isinstance(self.n, Integer): i = Symbol('i', integer=True, positive=True) return Product(Intersection(S.Naturals0, Interval(0, self.n//i)), (i, 1, self.n)) prod_set = Range(0, self.n + 1) for i in range(2, self.n + 1): prod_set *= Range(0, self.n//i + 1) return prod_set.flatten() def pdf(self, *syms): n, theta = self.n, self.theta condi = isinstance(self.n, Integer) if not (isinstance(syms[0], IndexedBase) or condi): raise ValueError("Please use IndexedBase object for syms as " "the dimension is symbolic") term_1 = factorial(n)/rf(theta, n) if condi: term_2 = Mul.fromiter([theta**syms[j]/((j+1)**syms[j]*factorial(syms[j])) for j in range(n)]) cond = Eq(sum([(k + 1)*syms[k] for k in range(n)]), n) return Piecewise((term_1 * term_2, cond), (0, True)) syms = syms[0] j, k = symbols('j, k', positive=True, integer=True) term_2 = Product(theta**syms[j]/((j+1)**syms[j]*factorial(syms[j])), (j, 0, n - 1)) cond = Eq(Sum((k + 1)*syms[k], (k, 0, n - 1)), n) return Piecewise((term_1 * term_2, cond), (0, True)) def MultivariateEwens(syms, n, theta): """ Creates a discrete random variable with Multivariate Ewens Distribution. The density of the said distribution can be found at [1]. Parameters ========== n: positive integer of class Integer, size of the sample or the integer whose partitions are considered theta: mutation rate, must be positive real number. Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import density >>> from sympy.stats.joint_rv import marginal_distribution >>> from sympy.stats.joint_rv_types import MultivariateEwens >>> from sympy import Symbol >>> a1 = Symbol('a1', positive=True) >>> a2 = Symbol('a2', positive=True) >>> ed = MultivariateEwens('E', 2, 1) >>> density(ed)(a1, a2) Piecewise((2**(-a2)/(factorial(a1)*factorial(a2)), Eq(a1 + 2*a2, 2)), (0, True)) >>> marginal_distribution(ed, ed[0])(a1) Piecewise((1/factorial(a1), Eq(a1, 2)), (0, True)) References ========== .. [1] https://en.wikipedia.org/wiki/Ewens%27s_sampling_formula .. [2] http://www.stat.rutgers.edu/home/hcrane/Papers/STS529.pdf """ return multivariate_rv(MultivariateEwensDistribution, syms, n, theta) #------------------------------------------------------------------------------- # Generalized Multivariate Log Gamma distribution --------------------------------------------------------- class GeneralizedMultivariateLogGammaDistribution(JointDistribution): _argnames = ('delta', 'v', 'lamda', 'mu') is_Continuous=True def check(self, delta, v, l, mu): _value_check((delta >= 0, delta <= 1), "delta must be in range [0, 1].") _value_check((v > 0), "v must be positive") for lk in l: _value_check((lk > 0), "lamda must be a positive vector.") for muk in mu: _value_check((muk > 0), "mu must be a positive vector.") _value_check(len(l) > 1,"the distribution should have at least" " two random variables.") @property def set(self): return S.Reals**len(self.lamda) def pdf(self, *y): from sympy.functions.special.gamma_functions import gamma d, v, l, mu = self.delta, self.v, self.lamda, self.mu n = Symbol('n', negative=False, integer=True) k = len(l) sterm1 = Pow((1 - d), n)/\ ((gamma(v + n)**(k - 1))*gamma(v)*gamma(n + 1)) sterm2 = Mul.fromiter([mui*li**(-v - n) for mui, li in zip(mu, l)]) term1 = sterm1 * sterm2 sterm3 = (v + n) * sum([mui * yi for mui, yi in zip(mu, y)]) sterm4 = sum([exp(mui * yi)/li for (mui, yi, li) in zip(mu, y, l)]) term2 = exp(sterm3 - sterm4) return Pow(d, v) * Sum(term1 * term2, (n, 0, S.Infinity)) def GeneralizedMultivariateLogGamma(syms, delta, v, lamda, mu): """ Creates a joint random variable with generalized multivariate log gamma distribution. The joint pdf can be found at [1]. Parameters ========== syms: list/tuple/set of symbols for identifying each component delta: A constant in range [0, 1] v: positive real lamda: a list of positive reals mu: a list of positive reals Returns ======= A Random Symbol Examples ======== >>> from sympy.stats import density >>> from sympy.stats.joint_rv import marginal_distribution >>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma >>> from sympy import symbols, S >>> v = 1 >>> l, mu = [1, 1, 1], [1, 1, 1] >>> d = S.Half >>> y = symbols('y_1:4', positive=True) >>> Gd = GeneralizedMultivariateLogGamma('G', d, v, l, mu) >>> density(Gd)(y[0], y[1], y[2]) Sum(2**(-n)*exp((n + 1)*(y_1 + y_2 + y_3) - exp(y_1) - exp(y_2) - exp(y_3))/gamma(n + 1)**3, (n, 0, oo))/2 References ========== .. [1] https://en.wikipedia.org/wiki/Generalized_multivariate_log-gamma_distribution .. [2] https://www.researchgate.net/publication/234137346_On_a_multivariate_log-gamma_distribution_and_the_use_of_the_distribution_in_the_Bayesian_analysis Note ==== If the GeneralizedMultivariateLogGamma is too long to type use, `from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma as GMVLG` If you want to pass the matrix omega instead of the constant delta, then use, GeneralizedMultivariateLogGammaOmega. """ return multivariate_rv(GeneralizedMultivariateLogGammaDistribution, syms, delta, v, lamda, mu) def GeneralizedMultivariateLogGammaOmega(syms, omega, v, lamda, mu): """ Extends GeneralizedMultivariateLogGamma. Parameters ========== syms: list/tuple/set of symbols For identifying each component omega: A square matrix Every element of square matrix must be absolute value of square root of correlation coefficient v: positive real lamda: a list of positive reals mu: a list of positive reals Returns ======= A Random Symbol Examples ======== >>> from sympy.stats import density >>> from sympy.stats.joint_rv import marginal_distribution >>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGammaOmega >>> from sympy import Matrix, symbols, S >>> omega = Matrix([[1, S.Half, S.Half], [S.Half, 1, S.Half], [S.Half, S.Half, 1]]) >>> v = 1 >>> l, mu = [1, 1, 1], [1, 1, 1] >>> G = GeneralizedMultivariateLogGammaOmega('G', omega, v, l, mu) >>> y = symbols('y_1:4', positive=True) >>> density(G)(y[0], y[1], y[2]) sqrt(2)*Sum((1 - sqrt(2)/2)**n*exp((n + 1)*(y_1 + y_2 + y_3) - exp(y_1) - exp(y_2) - exp(y_3))/gamma(n + 1)**3, (n, 0, oo))/2 References ========== See references of GeneralizedMultivariateLogGamma. Notes ===== If the GeneralizedMultivariateLogGammaOmega is too long to type use, `from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGammaOmega as GMVLGO` """ _value_check((omega.is_square, isinstance(omega, Matrix)), "omega must be a" " square matrix") for val in omega.values(): _value_check((val >= 0, val <= 1), "all values in matrix must be between 0 and 1(both inclusive).") _value_check(omega.diagonal().equals(ones(1, omega.shape[0])), "all the elements of diagonal should be 1.") _value_check((omega.shape[0] == len(lamda), len(lamda) == len(mu)), "lamda, mu should be of same length and omega should " " be of shape (length of lamda, length of mu)") _value_check(len(lamda) > 1,"the distribution should have at least" " two random variables.") delta = Pow(Rational(omega.det()), Rational(1, len(lamda) - 1)) return GeneralizedMultivariateLogGamma(syms, delta, v, lamda, mu) #------------------------------------------------------------------------------- # Multinomial distribution --------------------------------------------------------- class MultinomialDistribution(JointDistribution): _argnames = ('n', 'p') is_Continuous=False is_Discrete = True @staticmethod def check(n, p): _value_check(n > 0, "number of trials must be a positive integer") for p_k in p: _value_check((p_k >= 0, p_k <= 1), "probability must be in range [0, 1]") _value_check(Eq(sum(p), 1), "probabilities must sum to 1") @property def set(self): return Intersection(S.Naturals0, Interval(0, self.n))**len(self.p) def pdf(self, *x): n, p = self.n, self.p term_1 = factorial(n)/Mul.fromiter([factorial(x_k) for x_k in x]) term_2 = Mul.fromiter([p_k**x_k for p_k, x_k in zip(p, x)]) return Piecewise((term_1 * term_2, Eq(sum(x), n)), (0, True)) def Multinomial(syms, n, *p): """ Creates a discrete random variable with Multinomial Distribution. The density of the said distribution can be found at [1]. Parameters ========== n: positive integer of class Integer, number of trials p: event probabilites, >= 0 and <= 1 Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import density >>> from sympy.stats.joint_rv import marginal_distribution >>> from sympy.stats.joint_rv_types import Multinomial >>> from sympy import symbols >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True) >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True) >>> M = Multinomial('M', 3, p1, p2, p3) >>> density(M)(x1, x2, x3) Piecewise((6*p1**x1*p2**x2*p3**x3/(factorial(x1)*factorial(x2)*factorial(x3)), Eq(x1 + x2 + x3, 3)), (0, True)) >>> marginal_distribution(M, M[0])(x1).subs(x1, 1) 3*p1*p2**2 + 6*p1*p2*p3 + 3*p1*p3**2 References ========== .. [1] https://en.wikipedia.org/wiki/Multinomial_distribution .. [2] http://mathworld.wolfram.com/MultinomialDistribution.html """ if not isinstance(p[0], list): p = (list(p), ) return multivariate_rv(MultinomialDistribution, syms, n, p[0]) #------------------------------------------------------------------------------- # Negative Multinomial Distribution --------------------------------------------------------- class NegativeMultinomialDistribution(JointDistribution): _argnames = ('k0', 'p') is_Continuous=False is_Discrete = True @staticmethod def check(k0, p): _value_check(k0 > 0, "number of failures must be a positive integer") for p_k in p: _value_check((p_k >= 0, p_k <= 1), "probability must be in range [0, 1].") _value_check(sum(p) <= 1, "success probabilities must not be greater than 1.") @property def set(self): return Range(0, S.Infinity)**len(self.p) def pdf(self, *k): k0, p = self.k0, self.p term_1 = (gamma(k0 + sum(k))*(1 - sum(p))**k0)/gamma(k0) term_2 = Mul.fromiter([pi**ki/factorial(ki) for pi, ki in zip(p, k)]) return term_1 * term_2 def NegativeMultinomial(syms, k0, *p): """ Creates a discrete random variable with Negative Multinomial Distribution. The density of the said distribution can be found at [1]. Parameters ========== k0: positive integer of class Integer, number of failures before the experiment is stopped p: event probabilites, >= 0 and <= 1 Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import density >>> from sympy.stats.joint_rv import marginal_distribution >>> from sympy.stats.joint_rv_types import NegativeMultinomial >>> from sympy import symbols >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True) >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True) >>> N = NegativeMultinomial('M', 3, p1, p2, p3) >>> N_c = NegativeMultinomial('M', 3, 0.1, 0.1, 0.1) >>> density(N)(x1, x2, x3) p1**x1*p2**x2*p3**x3*(-p1 - p2 - p3 + 1)**3*gamma(x1 + x2 + x3 + 3)/(2*factorial(x1)*factorial(x2)*factorial(x3)) >>> marginal_distribution(N_c, N_c[0])(1).evalf().round(2) 0.25 References ========== .. [1] https://en.wikipedia.org/wiki/Negative_multinomial_distribution .. [2] http://mathworld.wolfram.com/NegativeBinomialDistribution.html """ if not isinstance(p[0], list): p = (list(p), ) return multivariate_rv(NegativeMultinomialDistribution, syms, k0, p[0])
8898e1667a0da7cfa335c2a113f9b67f713eaba80ddd3ffd472705479893a7ba
""" Contains ======== Geometric Hermite Logarithmic NegativeBinomial Poisson Skellam YuleSimon Zeta """ from __future__ import print_function, division from sympy import (factorial, exp, S, sympify, I, zeta, polylog, log, beta, hyper, binomial, Piecewise, floor, besseli, sqrt, Sum, Dummy) from sympy.stats import density from sympy.stats.drv import SingleDiscreteDistribution, SingleDiscretePSpace from sympy.stats.joint_rv import JointPSpace, CompoundDistribution from sympy.stats.rv import _value_check, RandomSymbol from sympy.external import import_module numpy = import_module('numpy') scipy = import_module('scipy') pymc3 = import_module('pymc3') __all__ = ['Geometric', 'Hermite', 'Logarithmic', 'NegativeBinomial', 'Poisson', 'Skellam', 'YuleSimon', 'Zeta' ] def rv(symbol, cls, *args): args = list(map(sympify, args)) dist = cls(*args) dist.check(*args) pspace = SingleDiscretePSpace(symbol, dist) if any(isinstance(arg, RandomSymbol) for arg in args): pspace = JointPSpace(symbol, CompoundDistribution(dist)) return pspace.value #------------------------------------------------------------------------------- # Geometric distribution ------------------------------------------------------------ class GeometricDistribution(SingleDiscreteDistribution): _argnames = ('p',) set = S.Naturals @staticmethod def check(p): _value_check((0 < p, p <= 1), "p must be between 0 and 1") def pdf(self, k): return (1 - self.p)**(k - 1) * self.p def _characteristic_function(self, t): p = self.p return p * exp(I*t) / (1 - (1 - p)*exp(I*t)) def _moment_generating_function(self, t): p = self.p return p * exp(t) / (1 - (1 - p) * exp(t)) def _sample_numpy(self, size): p = float(self.p) return numpy.random.geometric(p=p, size=size) def _sample_scipy(self, size): p = float(self.p) from scipy.stats import geom return geom.rvs(p=p, size=size) def _sample_pymc3(self, size): p = float(self.p) with pymc3.Model(): pymc3.Geometric('X', p=p) return pymc3.sample(size, chains=1, progressbar=False)[:]['X'] def Geometric(name, p): r""" Create a discrete random variable with a Geometric distribution. The density of the Geometric distribution is given by .. math:: f(k) := p (1 - p)^{k - 1} Parameters ========== p: A probability between 0 and 1 Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Geometric, density, E, variance >>> from sympy import Symbol, S >>> p = S.One / 5 >>> z = Symbol("z") >>> X = Geometric("x", p) >>> density(X)(z) (4/5)**(z - 1)/5 >>> E(X) 5 >>> variance(X) 20 References ========== .. [1] https://en.wikipedia.org/wiki/Geometric_distribution .. [2] http://mathworld.wolfram.com/GeometricDistribution.html """ return rv(name, GeometricDistribution, p) #------------------------------------------------------------------------------- # Hermite distribution --------------------------------------------------------- class HermiteDistribution(SingleDiscreteDistribution): _argnames = ('a1', 'a2') set = S.Naturals0 @staticmethod def check(a1, a2): _value_check(a1.is_nonnegative, 'Parameter a1 must be >= 0.') _value_check(a2.is_nonnegative, 'Parameter a2 must be >= 0.') def pdf(self, k): a1, a2 = self.a1, self.a2 term1 = exp(-(a1 + a2)) j = Dummy("j", integer=True) num = a1**(k - 2*j) * a2**j den = factorial(k - 2*j) * factorial(j) return term1 * Sum(num/den, (j, 0, k//2)).doit() def _moment_generating_function(self, t): a1, a2 = self.a1, self.a2 term1 = a1 * (exp(t) - 1) term2 = a2 * (exp(2*t) - 1) return exp(term1 + term2) def _characteristic_function(self, t): a1, a2 = self.a1, self.a2 term1 = a1 * (exp(I*t) - 1) term2 = a2 * (exp(2*I*t) - 1) return exp(term1 + term2) def Hermite(name, a1, a2): r""" Create a discrete random variable with a Hermite distribution. The density of the Hermite distribution is given by .. math:: f(x):= e^{-a_1 -a_2}\sum_{j=0}^{\left \lfloor x/2 \right \rfloor} \frac{a_{1}^{x-2j}a_{2}^{j}}{(x-2j)!j!} Parameters ========== a1: A Positive number greater than equal to 0. a2: A Positive number greater than equal to 0. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Hermite, density, E, variance >>> from sympy import Symbol >>> a1 = Symbol("a1", positive=True) >>> a2 = Symbol("a2", positive=True) >>> x = Symbol("x") >>> H = Hermite("H", a1=5, a2=4) >>> density(H)(2) 33*exp(-9)/2 >>> E(H) 13 >>> variance(H) 21 References ========== .. [1] https://en.wikipedia.org/wiki/Hermite_distribution """ return rv(name, HermiteDistribution, a1, a2) #------------------------------------------------------------------------------- # Logarithmic distribution ------------------------------------------------------------ class LogarithmicDistribution(SingleDiscreteDistribution): _argnames = ('p',) set = S.Naturals @staticmethod def check(p): _value_check((p > 0, p < 1), "p should be between 0 and 1") def pdf(self, k): p = self.p return (-1) * p**k / (k * log(1 - p)) def _characteristic_function(self, t): p = self.p return log(1 - p * exp(I*t)) / log(1 - p) def _moment_generating_function(self, t): p = self.p return log(1 - p * exp(t)) / log(1 - p) def _sample_scipy(self, size): p = float(self.p) from scipy.stats import logser return logser.rvs(p=p, size=size) def Logarithmic(name, p): r""" Create a discrete random variable with a Logarithmic distribution. The density of the Logarithmic distribution is given by .. math:: f(k) := \frac{-p^k}{k \ln{(1 - p)}} Parameters ========== p: A value between 0 and 1 Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Logarithmic, density, E, variance >>> from sympy import Symbol, S >>> p = S.One / 5 >>> z = Symbol("z") >>> X = Logarithmic("x", p) >>> density(X)(z) -5**(-z)/(z*log(4/5)) >>> E(X) -1/(-4*log(5) + 8*log(2)) >>> variance(X) -1/((-4*log(5) + 8*log(2))*(-2*log(5) + 4*log(2))) + 1/(-64*log(2)*log(5) + 64*log(2)**2 + 16*log(5)**2) - 10/(-32*log(5) + 64*log(2)) References ========== .. [1] https://en.wikipedia.org/wiki/Logarithmic_distribution .. [2] http://mathworld.wolfram.com/LogarithmicDistribution.html """ return rv(name, LogarithmicDistribution, p) #------------------------------------------------------------------------------- # Negative binomial distribution ------------------------------------------------------------ class NegativeBinomialDistribution(SingleDiscreteDistribution): _argnames = ('r', 'p') set = S.Naturals0 @staticmethod def check(r, p): _value_check(r > 0, 'r should be positive') _value_check((p > 0, p < 1), 'p should be between 0 and 1') def pdf(self, k): r = self.r p = self.p return binomial(k + r - 1, k) * (1 - p)**r * p**k def _characteristic_function(self, t): r = self.r p = self.p return ((1 - p) / (1 - p * exp(I*t)))**r def _moment_generating_function(self, t): r = self.r p = self.p return ((1 - p) / (1 - p * exp(t)))**r def sample(self): ### TODO raise NotImplementedError("Sampling of %s is not implemented" % density(self)) def NegativeBinomial(name, r, p): r""" Create a discrete random variable with a Negative Binomial distribution. The density of the Negative Binomial distribution is given by .. math:: f(k) := \binom{k + r - 1}{k} (1 - p)^r p^k Parameters ========== r: A positive value p: A value between 0 and 1 Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import NegativeBinomial, density, E, variance >>> from sympy import Symbol, S >>> r = 5 >>> p = S.One / 5 >>> z = Symbol("z") >>> X = NegativeBinomial("x", r, p) >>> density(X)(z) 1024*5**(-z)*binomial(z + 4, z)/3125 >>> E(X) 5/4 >>> variance(X) 25/16 References ========== .. [1] https://en.wikipedia.org/wiki/Negative_binomial_distribution .. [2] http://mathworld.wolfram.com/NegativeBinomialDistribution.html """ return rv(name, NegativeBinomialDistribution, r, p) #------------------------------------------------------------------------------- # Poisson distribution ------------------------------------------------------------ class PoissonDistribution(SingleDiscreteDistribution): _argnames = ('lamda',) set = S.Naturals0 @staticmethod def check(lamda): _value_check(lamda > 0, "Lambda must be positive") def pdf(self, k): return self.lamda**k / factorial(k) * exp(-self.lamda) def _sample_numpy(self, size): lamda = float(self.lamda) return numpy.random.poisson(lam=lamda, size=size) def _sample_scipy(self, size): lamda = float(self.lamda) from scipy.stats import poisson return poisson.rvs(mu=lamda, size=size) def _sample_pymc3(self, size): lamda = float(self.lamda) with pymc3.Model(): pymc3.Poisson('X', mu=lamda) return pymc3.sample(size, chains=1, progressbar=False)[:]['X'] def _characteristic_function(self, t): return exp(self.lamda * (exp(I*t) - 1)) def _moment_generating_function(self, t): return exp(self.lamda * (exp(t) - 1)) def Poisson(name, lamda): r""" Create a discrete random variable with a Poisson distribution. The density of the Poisson distribution is given by .. math:: f(k) := \frac{\lambda^{k} e^{- \lambda}}{k!} Parameters ========== lamda: Positive number, a rate Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Poisson, density, E, variance >>> from sympy import Symbol, simplify >>> rate = Symbol("lambda", positive=True) >>> z = Symbol("z") >>> X = Poisson("x", rate) >>> density(X)(z) lambda**z*exp(-lambda)/factorial(z) >>> E(X) lambda >>> simplify(variance(X)) lambda References ========== .. [1] https://en.wikipedia.org/wiki/Poisson_distribution .. [2] http://mathworld.wolfram.com/PoissonDistribution.html """ return rv(name, PoissonDistribution, lamda) # ----------------------------------------------------------------------------- # Skellam distribution -------------------------------------------------------- class SkellamDistribution(SingleDiscreteDistribution): _argnames = ('mu1', 'mu2') set = S.Integers @staticmethod def check(mu1, mu2): _value_check(mu1 >= 0, 'Parameter mu1 must be >= 0') _value_check(mu2 >= 0, 'Parameter mu2 must be >= 0') def pdf(self, k): (mu1, mu2) = (self.mu1, self.mu2) term1 = exp(-(mu1 + mu2)) * (mu1 / mu2) ** (k / 2) term2 = besseli(k, 2 * sqrt(mu1 * mu2)) return term1 * term2 def _cdf(self, x): raise NotImplementedError( "Skellam doesn't have closed form for the CDF.") def _sample_scipy(self, size): mu1, mu2 = float(self.mu1), float(self.mu2) from scipy.stats import skellam return skellam.rvs(mu1=mu1, mu2=mu2, size=size) def _characteristic_function(self, t): (mu1, mu2) = (self.mu1, self.mu2) return exp(-(mu1 + mu2) + mu1 * exp(I * t) + mu2 * exp(-I * t)) def _moment_generating_function(self, t): (mu1, mu2) = (self.mu1, self.mu2) return exp(-(mu1 + mu2) + mu1 * exp(t) + mu2 * exp(-t)) def Skellam(name, mu1, mu2): r""" Create a discrete random variable with a Skellam distribution. The Skellam is the distribution of the difference N1 - N2 of two statistically independent random variables N1 and N2 each Poisson-distributed with respective expected values mu1 and mu2. The density of the Skellam distribution is given by .. math:: f(k) := e^{-(\mu_1+\mu_2)}(\frac{\mu_1}{\mu_2})^{k/2}I_k(2\sqrt{\mu_1\mu_2}) Parameters ========== mu1: A non-negative value mu2: A non-negative value Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Skellam, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> z = Symbol("z", integer=True) >>> mu1 = Symbol("mu1", positive=True) >>> mu2 = Symbol("mu2", positive=True) >>> X = Skellam("x", mu1, mu2) >>> pprint(density(X)(z), use_unicode=False) z - 2 /mu1\ -mu1 - mu2 / _____ _____\ |---| *e *besseli\z, 2*\/ mu1 *\/ mu2 / \mu2/ >>> E(X) mu1 - mu2 >>> variance(X).expand() mu1 + mu2 References ========== .. [1] https://en.wikipedia.org/wiki/Skellam_distribution """ return rv(name, SkellamDistribution, mu1, mu2) #------------------------------------------------------------------------------- # Yule-Simon distribution ------------------------------------------------------------ class YuleSimonDistribution(SingleDiscreteDistribution): _argnames = ('rho',) set = S.Naturals @staticmethod def check(rho): _value_check(rho > 0, 'rho should be positive') def pdf(self, k): rho = self.rho return rho * beta(k, rho + 1) def _cdf(self, x): return Piecewise((1 - floor(x) * beta(floor(x), self.rho + 1), x >= 1), (0, True)) def _characteristic_function(self, t): rho = self.rho return rho * hyper((1, 1), (rho + 2,), exp(I*t)) * exp(I*t) / (rho + 1) def _moment_generating_function(self, t): rho = self.rho return rho * hyper((1, 1), (rho + 2,), exp(t)) * exp(t) / (rho + 1) def _sample_scipy(self, size): rho = float(self.rho) from scipy.stats import yulesimon return yulesimon.rvs(alpha=rho, size=size) def YuleSimon(name, rho): r""" Create a discrete random variable with a Yule-Simon distribution. The density of the Yule-Simon distribution is given by .. math:: f(k) := \rho B(k, \rho + 1) Parameters ========== rho: A positive value Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import YuleSimon, density, E, variance >>> from sympy import Symbol, simplify >>> p = 5 >>> z = Symbol("z") >>> X = YuleSimon("x", p) >>> density(X)(z) 5*beta(z, 6) >>> simplify(E(X)) 5/4 >>> simplify(variance(X)) 25/48 References ========== .. [1] https://en.wikipedia.org/wiki/Yule%E2%80%93Simon_distribution """ return rv(name, YuleSimonDistribution, rho) #------------------------------------------------------------------------------- # Zeta distribution ------------------------------------------------------------ class ZetaDistribution(SingleDiscreteDistribution): _argnames = ('s',) set = S.Naturals @staticmethod def check(s): _value_check(s > 1, 's should be greater than 1') def pdf(self, k): s = self.s return 1 / (k**s * zeta(s)) def _characteristic_function(self, t): return polylog(self.s, exp(I*t)) / zeta(self.s) def _moment_generating_function(self, t): return polylog(self.s, exp(t)) / zeta(self.s) def _sample_numpy(self, size): s = float(self.s) return numpy.random.zipf(a=s, size=size) def _sample_scipy(self, size): s = float(self.s) from scipy.stats import zipf return zipf.rvs(a=s, size=size) def Zeta(name, s): r""" Create a discrete random variable with a Zeta distribution. The density of the Zeta distribution is given by .. math:: f(k) := \frac{1}{k^s \zeta{(s)}} Parameters ========== s: A value greater than 1 Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Zeta, density, E, variance >>> from sympy import Symbol >>> s = 5 >>> z = Symbol("z") >>> X = Zeta("x", s) >>> density(X)(z) 1/(z**5*zeta(5)) >>> E(X) pi**4/(90*zeta(5)) >>> variance(X) -pi**8/(8100*zeta(5)**2) + zeta(3)/zeta(5) References ========== .. [1] https://en.wikipedia.org/wiki/Zeta_distribution """ return rv(name, ZetaDistribution, s)
40c1715e1b4e16bbb141c38fe652886f88575380f0819f598f84728366362d71
from __future__ import print_function, division from sympy.sets import FiniteSet from sympy import sqrt, log, exp, FallingFactorial, Rational, Eq, Dummy, piecewise_fold, solveset from .rv import (probability, expectation, density, where, given, pspace, cdf, characteristic_function, sample, sample_iter, random_symbols, independent, dependent, sampling_density, moment_generating_function, quantile) __all__ = ['P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf', 'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std', 'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'median', 'independent', 'random_symbols', 'correlation', 'factorial_moment', 'moment', 'cmoment', 'sampling_density', 'moment_generating_function', 'smoment', 'quantile'] def moment(X, n, c=0, condition=None, **kwargs): """ Return the nth moment of a random expression about c i.e. E((X-c)**n) Default value of c is 0. Examples ======== >>> from sympy.stats import Die, moment, E >>> X = Die('X', 6) >>> moment(X, 1, 6) -5/2 >>> moment(X, 2) 91/6 >>> moment(X, 1) == E(X) True """ return expectation((X - c)**n, condition, **kwargs) def variance(X, condition=None, **kwargs): """ Variance of a random expression Expectation of (X-E(X))**2 Examples ======== >>> from sympy.stats import Die, E, Bernoulli, variance >>> from sympy import simplify, Symbol >>> X = Die('X', 6) >>> p = Symbol('p') >>> B = Bernoulli('B', p, 1, 0) >>> variance(2*X) 35/3 >>> simplify(variance(B)) p*(1 - p) """ return cmoment(X, 2, condition, **kwargs) def standard_deviation(X, condition=None, **kwargs): """ Standard Deviation of a random expression Square root of the Expectation of (X-E(X))**2 Examples ======== >>> from sympy.stats import Bernoulli, std >>> from sympy import Symbol, simplify >>> p = Symbol('p') >>> B = Bernoulli('B', p, 1, 0) >>> simplify(std(B)) sqrt(p*(1 - p)) """ return sqrt(variance(X, condition, **kwargs)) std = standard_deviation def entropy(expr, condition=None, **kwargs): """ Calculuates entropy of a probability distribution Parameters ========== expression : the random expression whose entropy is to be calculated condition : optional, to specify conditions on random expression b: base of the logarithm, optional By default, it is taken as Euler's number Returns ======= result : Entropy of the expression, a constant Examples ======== >>> from sympy.stats import Normal, Die, entropy >>> X = Normal('X', 0, 1) >>> entropy(X) log(2)/2 + 1/2 + log(pi)/2 >>> D = Die('D', 4) >>> entropy(D) log(4) References ========== .. [1] https://en.wikipedia.org/wiki/Entropy_(information_theory) .. [2] https://www.crmarsh.com/static/pdf/Charles_Marsh_Continuous_Entropy.pdf .. [3] http://www.math.uconn.edu/~kconrad/blurbs/analysis/entropypost.pdf """ pdf = density(expr, condition, **kwargs) base = kwargs.get('b', exp(1)) if hasattr(pdf, 'dict'): return sum([-prob*log(prob, base) for prob in pdf.dict.values()]) return expectation(-log(pdf(expr), base)) def covariance(X, Y, condition=None, **kwargs): """ Covariance of two random expressions The expectation that the two variables will rise and fall together Covariance(X,Y) = E( (X-E(X)) * (Y-E(Y)) ) Examples ======== >>> from sympy.stats import Exponential, covariance >>> from sympy import Symbol >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> X = Exponential('X', rate) >>> Y = Exponential('Y', rate) >>> covariance(X, X) lambda**(-2) >>> covariance(X, Y) 0 >>> covariance(X, Y + rate*X) 1/lambda """ return expectation( (X - expectation(X, condition, **kwargs)) * (Y - expectation(Y, condition, **kwargs)), condition, **kwargs) def correlation(X, Y, condition=None, **kwargs): """ Correlation of two random expressions, also known as correlation coefficient or Pearson's correlation The normalized expectation that the two variables will rise and fall together Correlation(X,Y) = E( (X-E(X)) * (Y-E(Y)) / (sigma(X) * sigma(Y)) ) Examples ======== >>> from sympy.stats import Exponential, correlation >>> from sympy import Symbol >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> X = Exponential('X', rate) >>> Y = Exponential('Y', rate) >>> correlation(X, X) 1 >>> correlation(X, Y) 0 >>> correlation(X, Y + rate*X) 1/sqrt(1 + lambda**(-2)) """ return covariance(X, Y, condition, **kwargs)/(std(X, condition, **kwargs) * std(Y, condition, **kwargs)) def cmoment(X, n, condition=None, **kwargs): """ Return the nth central moment of a random expression about its mean i.e. E((X - E(X))**n) Examples ======== >>> from sympy.stats import Die, cmoment, variance >>> X = Die('X', 6) >>> cmoment(X, 3) 0 >>> cmoment(X, 2) 35/12 >>> cmoment(X, 2) == variance(X) True """ mu = expectation(X, condition, **kwargs) return moment(X, n, mu, condition, **kwargs) def smoment(X, n, condition=None, **kwargs): """ Return the nth Standardized moment of a random expression i.e. E(((X - mu)/sigma(X))**n) Examples ======== >>> from sympy.stats import skewness, Exponential, smoment >>> from sympy import Symbol >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> Y = Exponential('Y', rate) >>> smoment(Y, 4) 9 >>> smoment(Y, 4) == smoment(3*Y, 4) True >>> smoment(Y, 3) == skewness(Y) True """ sigma = std(X, condition, **kwargs) return (1/sigma)**n*cmoment(X, n, condition, **kwargs) def skewness(X, condition=None, **kwargs): """ Measure of the asymmetry of the probability distribution. Positive skew indicates that most of the values lie to the right of the mean. skewness(X) = E(((X - E(X))/sigma)**3) Parameters ========== condition : Expr containing RandomSymbols A conditional expression. skewness(X, X>0) is skewness of X given X > 0 Examples ======== >>> from sympy.stats import skewness, Exponential, Normal >>> from sympy import Symbol >>> X = Normal('X', 0, 1) >>> skewness(X) 0 >>> skewness(X, X > 0) # find skewness given X > 0 (-sqrt(2)/sqrt(pi) + 4*sqrt(2)/pi**(3/2))/(1 - 2/pi)**(3/2) >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> Y = Exponential('Y', rate) >>> skewness(Y) 2 """ return smoment(X, 3, condition=condition, **kwargs) def kurtosis(X, condition=None, **kwargs): """ Characterizes the tails/outliers of a probability distribution. Kurtosis of any univariate normal distribution is 3. Kurtosis less than 3 means that the distribution produces fewer and less extreme outliers than the normal distribution. kurtosis(X) = E(((X - E(X))/sigma)**4) Parameters ========== condition : Expr containing RandomSymbols A conditional expression. kurtosis(X, X>0) is kurtosis of X given X > 0 Examples ======== >>> from sympy.stats import kurtosis, Exponential, Normal >>> from sympy import Symbol >>> X = Normal('X', 0, 1) >>> kurtosis(X) 3 >>> kurtosis(X, X > 0) # find kurtosis given X > 0 (-4/pi - 12/pi**2 + 3)/(1 - 2/pi)**2 >>> rate = Symbol('lamda', positive=True, real=True, finite=True) >>> Y = Exponential('Y', rate) >>> kurtosis(Y) 9 References ========== .. [1] https://en.wikipedia.org/wiki/Kurtosis .. [2] http://mathworld.wolfram.com/Kurtosis.html """ return smoment(X, 4, condition=condition, **kwargs) def factorial_moment(X, n, condition=None, **kwargs): """ The factorial moment is a mathematical quantity defined as the expectation or average of the falling factorial of a random variable. factorial_moment(X, n) = E(X*(X - 1)*(X - 2)*...*(X - n + 1)) Parameters ========== n: A natural number, n-th factorial moment. condition : Expr containing RandomSymbols A conditional expression. Examples ======== >>> from sympy.stats import factorial_moment, Poisson, Binomial >>> from sympy import Symbol, S >>> lamda = Symbol('lamda') >>> X = Poisson('X', lamda) >>> factorial_moment(X, 2) lamda**2 >>> Y = Binomial('Y', 2, S.Half) >>> factorial_moment(Y, 2) 1/2 >>> factorial_moment(Y, 2, Y > 1) # find factorial moment for Y > 1 2 References ========== .. [1] https://en.wikipedia.org/wiki/Factorial_moment .. [2] http://mathworld.wolfram.com/FactorialMoment.html """ return expectation(FallingFactorial(X, n), condition=condition, **kwargs) def median(X, evaluate=True, **kwargs): r""" Calculuates the median of the probability distribution. Mathematically, median of Probability distribution is defined as all those values of `m` for which the following condition is satisfied .. math:: P(X\geq m)\geq 1/2 \hspace{5} \text{and} \hspace{5} P(X\leq m)\geq 1/2 Parameters ========== X: The random expression whose median is to be calculated. Returns ======= The FiniteSet or an Interval which contains the median of the random expression. Examples ======== >>> from sympy.stats import Normal, Die, median >>> N = Normal('N', 3, 1) >>> median(N) FiniteSet(3) >>> D = Die('D') >>> median(D) FiniteSet(3, 4) References ========== .. [1] https://en.wikipedia.org/wiki/Median#Probability_distributions """ from sympy.stats.crv import ContinuousPSpace from sympy.stats.drv import DiscretePSpace from sympy.stats.frv import FinitePSpace if isinstance(pspace(X), FinitePSpace): cdf = pspace(X).compute_cdf(X) result = [] for key, value in cdf.items(): if value>= Rational(1, 2) and (1 - value) + \ pspace(X).probability(Eq(X, key)) >= Rational(1, 2): result.append(key) return FiniteSet(*result) if isinstance(pspace(X), ContinuousPSpace) or isinstance(pspace(X), DiscretePSpace): cdf = pspace(X).compute_cdf(X) x = Dummy('x') result = solveset(piecewise_fold(cdf(x) - Rational(1, 2)), x, pspace(X).set) return result raise NotImplementedError("The median of %s is not implemeted."%str(pspace(X))) P = probability E = expectation H = entropy
3566e414f3d7b71ff10d38190dda27f885c89db8acab3f5af435fa610e591ac3
""" Main Random Variables Module Defines abstract random variable type. Contains interfaces for probability space object (PSpace) as well as standard operators, P, E, sample, density, where, quantile See Also ======== sympy.stats.crv sympy.stats.frv sympy.stats.rv_interface """ from __future__ import print_function, division from typing import Tuple as tTuple from sympy import (Basic, S, Expr, Symbol, Tuple, And, Add, Eq, lambdify, Equality, Lambda, sympify, Dummy, Ne, KroneckerDelta, DiracDelta, Mul, Indexed, MatrixSymbol, Function) from sympy.core.relational import Relational from sympy.core.sympify import _sympify from sympy.logic.boolalg import Boolean from sympy.sets.sets import FiniteSet, ProductSet, Intersection from sympy.solvers.solveset import solveset x = Symbol('x') class RandomDomain(Basic): """ Represents a set of variables and the values which they can take See Also ======== sympy.stats.crv.ContinuousDomain sympy.stats.frv.FiniteDomain """ is_ProductDomain = False is_Finite = False is_Continuous = False is_Discrete = False def __new__(cls, symbols, *args): symbols = FiniteSet(*symbols) return Basic.__new__(cls, symbols, *args) @property def symbols(self): return self.args[0] @property def set(self): return self.args[1] def __contains__(self, other): raise NotImplementedError() def compute_expectation(self, expr): raise NotImplementedError() class SingleDomain(RandomDomain): """ A single variable and its domain See Also ======== sympy.stats.crv.SingleContinuousDomain sympy.stats.frv.SingleFiniteDomain """ def __new__(cls, symbol, set): assert symbol.is_Symbol return Basic.__new__(cls, symbol, set) @property def symbol(self): return self.args[0] @property def symbols(self): return FiniteSet(self.symbol) def __contains__(self, other): if len(other) != 1: return False sym, val = tuple(other)[0] return self.symbol == sym and val in self.set class ConditionalDomain(RandomDomain): """ A RandomDomain with an attached condition See Also ======== sympy.stats.crv.ConditionalContinuousDomain sympy.stats.frv.ConditionalFiniteDomain """ def __new__(cls, fulldomain, condition): condition = condition.xreplace(dict((rs, rs.symbol) for rs in random_symbols(condition))) return Basic.__new__(cls, fulldomain, condition) @property def symbols(self): return self.fulldomain.symbols @property def fulldomain(self): return self.args[0] @property def condition(self): return self.args[1] @property def set(self): raise NotImplementedError("Set of Conditional Domain not Implemented") def as_boolean(self): return And(self.fulldomain.as_boolean(), self.condition) class PSpace(Basic): """ A Probability Space Probability Spaces encode processes that equal different values probabilistically. These underly Random Symbols which occur in SymPy expressions and contain the mechanics to evaluate statistical statements. See Also ======== sympy.stats.crv.ContinuousPSpace sympy.stats.frv.FinitePSpace """ is_Finite = None # type: bool is_Continuous = None # type: bool is_Discrete = None # type: bool is_real = None # type: bool @property def domain(self): return self.args[0] @property def density(self): return self.args[1] @property def values(self): return frozenset(RandomSymbol(sym, self) for sym in self.symbols) @property def symbols(self): return self.domain.symbols def where(self, condition): raise NotImplementedError() def compute_density(self, expr): raise NotImplementedError() def sample(self): raise NotImplementedError() def probability(self, condition): raise NotImplementedError() def compute_expectation(self, expr): raise NotImplementedError() class SinglePSpace(PSpace): """ Represents the probabilities of a set of random events that can be attributed to a single variable/symbol. """ def __new__(cls, s, distribution): if isinstance(s, str): s = Symbol(s) if not isinstance(s, Symbol): raise TypeError("s should have been string or Symbol") return Basic.__new__(cls, s, distribution) @property def value(self): return RandomSymbol(self.symbol, self) @property def symbol(self): return self.args[0] @property def distribution(self): return self.args[1] @property def pdf(self): return self.distribution.pdf(self.symbol) class RandomSymbol(Expr): """ Random Symbols represent ProbabilitySpaces in SymPy Expressions In principle they can take on any value that their symbol can take on within the associated PSpace with probability determined by the PSpace Density. Random Symbols contain pspace and symbol properties. The pspace property points to the represented Probability Space The symbol is a standard SymPy Symbol that is used in that probability space for example in defining a density. You can form normal SymPy expressions using RandomSymbols and operate on those expressions with the Functions E - Expectation of a random expression P - Probability of a condition density - Probability Density of an expression given - A new random expression (with new random symbols) given a condition An object of the RandomSymbol type should almost never be created by the user. They tend to be created instead by the PSpace class's value method. Traditionally a user doesn't even do this but instead calls one of the convenience functions Normal, Exponential, Coin, Die, FiniteRV, etc.... """ def __new__(cls, symbol, pspace=None): from sympy.stats.joint_rv import JointRandomSymbol if pspace is None: # Allow single arg, representing pspace == PSpace() pspace = PSpace() if not isinstance(symbol, Symbol): raise TypeError("symbol should be of type Symbol") if not isinstance(pspace, PSpace): raise TypeError("pspace variable should be of type PSpace") if cls == JointRandomSymbol and isinstance(pspace, SinglePSpace): cls = RandomSymbol return Basic.__new__(cls, symbol, pspace) is_finite = True is_symbol = True is_Atom = True _diff_wrt = True pspace = property(lambda self: self.args[1]) symbol = property(lambda self: self.args[0]) name = property(lambda self: self.symbol.name) def _eval_is_positive(self): return self.symbol.is_positive def _eval_is_integer(self): return self.symbol.is_integer def _eval_is_real(self): return self.symbol.is_real or self.pspace.is_real @property def is_commutative(self): return self.symbol.is_commutative @property def free_symbols(self): return {self} class RandomIndexedSymbol(RandomSymbol): def __new__(cls, idx_obj, pspace=None): if not isinstance(idx_obj, (Indexed, Function)): raise TypeError("An Function or Indexed object is expected not %s"%(idx_obj)) return Basic.__new__(cls, idx_obj, pspace) symbol = property(lambda self: self.args[0]) name = property(lambda self: str(self.args[0])) @property def key(self): if isinstance(self.symbol, Indexed): return self.symbol.args[1] elif isinstance(self.symbol, Function): return self.symbol.args[0] class RandomMatrixSymbol(MatrixSymbol): def __new__(cls, symbol, n, m, pspace=None): n, m = _sympify(n), _sympify(m) symbol = _symbol_converter(symbol) return Basic.__new__(cls, symbol, n, m, pspace) symbol = property(lambda self: self.args[0]) pspace = property(lambda self: self.args[3]) class ProductPSpace(PSpace): """ Abstract class for representing probability spaces with multiple random variables. See Also ======== sympy.stats.rv.IndependentProductPSpace sympy.stats.joint_rv.JointPSpace """ pass class IndependentProductPSpace(ProductPSpace): """ A probability space resulting from the merger of two independent probability spaces. Often created using the function, pspace """ def __new__(cls, *spaces): rs_space_dict = {} for space in spaces: for value in space.values: rs_space_dict[value] = space symbols = FiniteSet(*[val.symbol for val in rs_space_dict.keys()]) # Overlapping symbols from sympy.stats.joint_rv import MarginalDistribution, CompoundDistribution if len(symbols) < sum(len(space.symbols) for space in spaces if not isinstance(space.distribution, ( CompoundDistribution, MarginalDistribution))): raise ValueError("Overlapping Random Variables") if all(space.is_Finite for space in spaces): from sympy.stats.frv import ProductFinitePSpace cls = ProductFinitePSpace obj = Basic.__new__(cls, *FiniteSet(*spaces)) return obj @property def pdf(self): p = Mul(*[space.pdf for space in self.spaces]) return p.subs(dict((rv, rv.symbol) for rv in self.values)) @property def rs_space_dict(self): d = {} for space in self.spaces: for value in space.values: d[value] = space return d @property def symbols(self): return FiniteSet(*[val.symbol for val in self.rs_space_dict.keys()]) @property def spaces(self): return FiniteSet(*self.args) @property def values(self): return sumsets(space.values for space in self.spaces) def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs): rvs = rvs or self.values rvs = frozenset(rvs) for space in self.spaces: expr = space.compute_expectation(expr, rvs & space.values, evaluate=False, **kwargs) if evaluate and hasattr(expr, 'doit'): return expr.doit(**kwargs) return expr @property def domain(self): return ProductDomain(*[space.domain for space in self.spaces]) @property def density(self): raise NotImplementedError("Density not available for ProductSpaces") def sample(self, size=()): return {k: v for space in self.spaces for k, v in space.sample().items()} def probability(self, condition, **kwargs): cond_inv = False if isinstance(condition, Ne): condition = Eq(condition.args[0], condition.args[1]) cond_inv = True expr = condition.lhs - condition.rhs rvs = random_symbols(expr) dens = self.compute_density(expr) if any([pspace(rv).is_Continuous for rv in rvs]): from sympy.stats.crv import (ContinuousDistributionHandmade, SingleContinuousPSpace) if expr in self.values: # Marginalize all other random symbols out of the density randomsymbols = tuple(set(self.values) - frozenset([expr])) symbols = tuple(rs.symbol for rs in randomsymbols) pdf = self.domain.integrate(self.pdf, symbols, **kwargs) return Lambda(expr.symbol, pdf) dens = ContinuousDistributionHandmade(dens) z = Dummy('z', real=True) space = SingleContinuousPSpace(z, dens) result = space.probability(condition.__class__(space.value, 0)) else: from sympy.stats.drv import (DiscreteDistributionHandmade, SingleDiscretePSpace) dens = DiscreteDistributionHandmade(dens) z = Dummy('z', integer=True) space = SingleDiscretePSpace(z, dens) result = space.probability(condition.__class__(space.value, 0)) return result if not cond_inv else S.One - result def compute_density(self, expr, **kwargs): rvs = random_symbols(expr) if any(pspace(rv).is_Continuous for rv in rvs): z = Dummy('z', real=True) expr = self.compute_expectation(DiracDelta(expr - z), **kwargs) else: z = Dummy('z', integer=True) expr = self.compute_expectation(KroneckerDelta(expr, z), **kwargs) return Lambda(z, expr) def compute_cdf(self, expr, **kwargs): raise ValueError("CDF not well defined on multivariate expressions") def conditional_space(self, condition, normalize=True, **kwargs): rvs = random_symbols(condition) condition = condition.xreplace(dict((rv, rv.symbol) for rv in self.values)) if any([pspace(rv).is_Continuous for rv in rvs]): from sympy.stats.crv import (ConditionalContinuousDomain, ContinuousPSpace) space = ContinuousPSpace domain = ConditionalContinuousDomain(self.domain, condition) elif any([pspace(rv).is_Discrete for rv in rvs]): from sympy.stats.drv import (ConditionalDiscreteDomain, DiscretePSpace) space = DiscretePSpace domain = ConditionalDiscreteDomain(self.domain, condition) elif all([pspace(rv).is_Finite for rv in rvs]): from sympy.stats.frv import FinitePSpace return FinitePSpace.conditional_space(self, condition) if normalize: replacement = {rv: Dummy(str(rv)) for rv in self.symbols} norm = domain.compute_expectation(self.pdf, **kwargs) pdf = self.pdf / norm.xreplace(replacement) # XXX: Converting symbols from set to tuple. The order matters to # Lambda though so we shouldn't be starting with a set here... density = Lambda(tuple(domain.symbols), pdf) return space(domain, density) class ProductDomain(RandomDomain): """ A domain resulting from the merger of two independent domains See Also ======== sympy.stats.crv.ProductContinuousDomain sympy.stats.frv.ProductFiniteDomain """ is_ProductDomain = True def __new__(cls, *domains): # Flatten any product of products domains2 = [] for domain in domains: if not domain.is_ProductDomain: domains2.append(domain) else: domains2.extend(domain.domains) domains2 = FiniteSet(*domains2) if all(domain.is_Finite for domain in domains2): from sympy.stats.frv import ProductFiniteDomain cls = ProductFiniteDomain if all(domain.is_Continuous for domain in domains2): from sympy.stats.crv import ProductContinuousDomain cls = ProductContinuousDomain if all(domain.is_Discrete for domain in domains2): from sympy.stats.drv import ProductDiscreteDomain cls = ProductDiscreteDomain return Basic.__new__(cls, *domains2) @property def sym_domain_dict(self): return dict((symbol, domain) for domain in self.domains for symbol in domain.symbols) @property def symbols(self): return FiniteSet(*[sym for domain in self.domains for sym in domain.symbols]) @property def domains(self): return self.args @property def set(self): return ProductSet(*(domain.set for domain in self.domains)) def __contains__(self, other): # Split event into each subdomain for domain in self.domains: # Collect the parts of this event which associate to this domain elem = frozenset([item for item in other if sympify(domain.symbols.contains(item[0])) is S.true]) # Test this sub-event if elem not in domain: return False # All subevents passed return True def as_boolean(self): return And(*[domain.as_boolean() for domain in self.domains]) def random_symbols(expr): """ Returns all RandomSymbols within a SymPy Expression. """ atoms = getattr(expr, 'atoms', None) if atoms is not None: comp = lambda rv: rv.symbol.name l = list(atoms(RandomSymbol)) return sorted(l, key=comp) else: return [] def pspace(expr): """ Returns the underlying Probability Space of a random expression. For internal use. Examples ======== >>> from sympy.stats import pspace, Normal >>> from sympy.stats.rv import IndependentProductPSpace >>> X = Normal('X', 0, 1) >>> pspace(2*X + 1) == X.pspace True """ expr = sympify(expr) if isinstance(expr, RandomSymbol) and expr.pspace is not None: return expr.pspace if expr.has(RandomMatrixSymbol): rm = list(expr.atoms(RandomMatrixSymbol))[0] return rm.pspace rvs = random_symbols(expr) if not rvs: raise ValueError("Expression containing Random Variable expected, not %s" % (expr)) # If only one space present if all(rv.pspace == rvs[0].pspace for rv in rvs): return rvs[0].pspace # Otherwise make a product space return IndependentProductPSpace(*[rv.pspace for rv in rvs]) def sumsets(sets): """ Union of sets """ return frozenset().union(*sets) def rs_swap(a, b): """ Build a dictionary to swap RandomSymbols based on their underlying symbol. i.e. if ``X = ('x', pspace1)`` and ``Y = ('x', pspace2)`` then ``X`` and ``Y`` match and the key, value pair ``{X:Y}`` will appear in the result Inputs: collections a and b of random variables which share common symbols Output: dict mapping RVs in a to RVs in b """ d = {} for rsa in a: d[rsa] = [rsb for rsb in b if rsa.symbol == rsb.symbol][0] return d def given(expr, condition=None, **kwargs): r""" Conditional Random Expression From a random expression and a condition on that expression creates a new probability space from the condition and returns the same expression on that conditional probability space. Examples ======== >>> from sympy.stats import given, density, Die >>> X = Die('X', 6) >>> Y = given(X, X > 3) >>> density(Y).dict {4: 1/3, 5: 1/3, 6: 1/3} Following convention, if the condition is a random symbol then that symbol is considered fixed. >>> from sympy.stats import Normal >>> from sympy import pprint >>> from sympy.abc import z >>> X = Normal('X', 0, 1) >>> Y = Normal('Y', 0, 1) >>> pprint(density(X + Y, Y)(z), use_unicode=False) 2 -(-Y + z) ----------- ___ 2 \/ 2 *e ------------------ ____ 2*\/ pi """ if not random_symbols(condition) or pspace_independent(expr, condition): return expr if isinstance(condition, RandomSymbol): condition = Eq(condition, condition.symbol) condsymbols = random_symbols(condition) if (isinstance(condition, Equality) and len(condsymbols) == 1 and not isinstance(pspace(expr).domain, ConditionalDomain)): rv = tuple(condsymbols)[0] results = solveset(condition, rv) if isinstance(results, Intersection) and S.Reals in results.args: results = list(results.args[1]) sums = 0 for res in results: temp = expr.subs(rv, res) if temp == True: return True if temp != False: # XXX: This seems nonsensical but preserves existing behaviour # after the change that Relational is no longer a subclass of # Expr. Here expr is sometimes Relational and sometimes Expr # but we are trying to add them with +=. This needs to be # fixed somehow. if sums == 0 and isinstance(expr, Relational): sums = expr.subs(rv, res) else: sums += expr.subs(rv, res) if sums == 0: return False return sums # Get full probability space of both the expression and the condition fullspace = pspace(Tuple(expr, condition)) # Build new space given the condition space = fullspace.conditional_space(condition, **kwargs) # Dictionary to swap out RandomSymbols in expr with new RandomSymbols # That point to the new conditional space swapdict = rs_swap(fullspace.values, space.values) # Swap random variables in the expression expr = expr.xreplace(swapdict) return expr def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs): """ Returns the expected value of a random expression Parameters ========== expr : Expr containing RandomSymbols The expression of which you want to compute the expectation value given : Expr containing RandomSymbols A conditional expression. E(X, X>0) is expectation of X given X > 0 numsamples : int Enables sampling and approximates the expectation with this many samples evalf : Bool (defaults to True) If sampling return a number rather than a complex expression evaluate : Bool (defaults to True) In case of continuous systems return unevaluated integral Examples ======== >>> from sympy.stats import E, Die >>> X = Die('X', 6) >>> E(X) 7/2 >>> E(2*X + 1) 8 >>> E(X, X > 3) # Expectation of X given that it is above 3 5 """ if not random_symbols(expr): # expr isn't random? return expr if numsamples: # Computing by monte carlo sampling? return sampling_E(expr, condition, numsamples=numsamples) if expr.has(RandomIndexedSymbol): return pspace(expr).compute_expectation(expr, condition, evaluate, **kwargs) # Create new expr and recompute E if condition is not None: # If there is a condition return expectation(given(expr, condition), evaluate=evaluate) # A few known statements for efficiency if expr.is_Add: # We know that E is Linear return Add(*[expectation(arg, evaluate=evaluate) for arg in expr.args]) # Otherwise case is simple, pass work off to the ProbabilitySpace result = pspace(expr).compute_expectation(expr, evaluate=evaluate, **kwargs) if evaluate and hasattr(result, 'doit'): return result.doit(**kwargs) else: return result def probability(condition, given_condition=None, numsamples=None, evaluate=True, **kwargs): """ Probability that a condition is true, optionally given a second condition Parameters ========== condition : Combination of Relationals containing RandomSymbols The condition of which you want to compute the probability given_condition : Combination of Relationals containing RandomSymbols A conditional expression. P(X > 1, X > 0) is expectation of X > 1 given X > 0 numsamples : int Enables sampling and approximates the probability with this many samples evaluate : Bool (defaults to True) In case of continuous systems return unevaluated integral Examples ======== >>> from sympy.stats import P, Die >>> from sympy import Eq >>> X, Y = Die('X', 6), Die('Y', 6) >>> P(X > 3) 1/2 >>> P(Eq(X, 5), X > 2) # Probability that X == 5 given that X > 2 1/4 >>> P(X > Y) 5/12 """ condition = sympify(condition) given_condition = sympify(given_condition) if condition.has(RandomIndexedSymbol): return pspace(condition).probability(condition, given_condition, evaluate, **kwargs) if isinstance(given_condition, RandomSymbol): condrv = random_symbols(condition) if len(condrv) == 1 and condrv[0] == given_condition: from sympy.stats.frv_types import BernoulliDistribution return BernoulliDistribution(probability(condition), 0, 1) if any([dependent(rv, given_condition) for rv in condrv]): from sympy.stats.symbolic_probability import Probability return Probability(condition, given_condition) else: return probability(condition) if given_condition is not None and \ not isinstance(given_condition, (Relational, Boolean)): raise ValueError("%s is not a relational or combination of relationals" % (given_condition)) if given_condition == False: return S.Zero if not isinstance(condition, (Relational, Boolean)): raise ValueError("%s is not a relational or combination of relationals" % (condition)) if condition is S.true: return S.One if condition is S.false: return S.Zero if numsamples: return sampling_P(condition, given_condition, numsamples=numsamples, **kwargs) if given_condition is not None: # If there is a condition # Recompute on new conditional expr return probability(given(condition, given_condition, **kwargs), **kwargs) # Otherwise pass work off to the ProbabilitySpace result = pspace(condition).probability(condition, **kwargs) if evaluate and hasattr(result, 'doit'): return result.doit() else: return result class Density(Basic): expr = property(lambda self: self.args[0]) @property def condition(self): if len(self.args) > 1: return self.args[1] else: return None def doit(self, evaluate=True, **kwargs): from sympy.stats.joint_rv import JointPSpace from sympy.stats.frv import SingleFiniteDistribution expr, condition = self.expr, self.condition if _sympify(expr).has(RandomMatrixSymbol): return pspace(expr).compute_density(expr) if isinstance(expr, SingleFiniteDistribution): return expr.dict if condition is not None: # Recompute on new conditional expr expr = given(expr, condition, **kwargs) if isinstance(expr, RandomSymbol) and \ isinstance(expr.pspace, JointPSpace): return expr.pspace.distribution if not random_symbols(expr): return Lambda(x, DiracDelta(x - expr)) if (isinstance(expr, RandomSymbol) and hasattr(expr.pspace, 'distribution') and isinstance(pspace(expr), (SinglePSpace))): return expr.pspace.distribution result = pspace(expr).compute_density(expr, **kwargs) if evaluate and hasattr(result, 'doit'): return result.doit() else: return result def density(expr, condition=None, evaluate=True, numsamples=None, **kwargs): """ Probability density of a random expression, optionally given a second condition. This density will take on different forms for different types of probability spaces. Discrete variables produce Dicts. Continuous variables produce Lambdas. Parameters ========== expr : Expr containing RandomSymbols The expression of which you want to compute the density value condition : Relational containing RandomSymbols A conditional expression. density(X > 1, X > 0) is density of X > 1 given X > 0 numsamples : int Enables sampling and approximates the density with this many samples Examples ======== >>> from sympy.stats import density, Die, Normal >>> from sympy import Symbol >>> x = Symbol('x') >>> D = Die('D', 6) >>> X = Normal(x, 0, 1) >>> density(D).dict {1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6} >>> density(2*D).dict {2: 1/6, 4: 1/6, 6: 1/6, 8: 1/6, 10: 1/6, 12: 1/6} >>> density(X)(x) sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) """ if numsamples: return sampling_density(expr, condition, numsamples=numsamples, **kwargs) return Density(expr, condition).doit(evaluate=evaluate, **kwargs) def cdf(expr, condition=None, evaluate=True, **kwargs): """ Cumulative Distribution Function of a random expression. optionally given a second condition This density will take on different forms for different types of probability spaces. Discrete variables produce Dicts. Continuous variables produce Lambdas. Examples ======== >>> from sympy.stats import density, Die, Normal, cdf >>> D = Die('D', 6) >>> X = Normal('X', 0, 1) >>> density(D).dict {1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6} >>> cdf(D) {1: 1/6, 2: 1/3, 3: 1/2, 4: 2/3, 5: 5/6, 6: 1} >>> cdf(3*D, D > 2) {9: 1/4, 12: 1/2, 15: 3/4, 18: 1} >>> cdf(X) Lambda(_z, erf(sqrt(2)*_z/2)/2 + 1/2) """ if condition is not None: # If there is a condition # Recompute on new conditional expr return cdf(given(expr, condition, **kwargs), **kwargs) # Otherwise pass work off to the ProbabilitySpace result = pspace(expr).compute_cdf(expr, **kwargs) if evaluate and hasattr(result, 'doit'): return result.doit() else: return result def characteristic_function(expr, condition=None, evaluate=True, **kwargs): """ Characteristic function of a random expression, optionally given a second condition Returns a Lambda Examples ======== >>> from sympy.stats import Normal, DiscreteUniform, Poisson, characteristic_function >>> X = Normal('X', 0, 1) >>> characteristic_function(X) Lambda(_t, exp(-_t**2/2)) >>> Y = DiscreteUniform('Y', [1, 2, 7]) >>> characteristic_function(Y) Lambda(_t, exp(7*_t*I)/3 + exp(2*_t*I)/3 + exp(_t*I)/3) >>> Z = Poisson('Z', 2) >>> characteristic_function(Z) Lambda(_t, exp(2*exp(_t*I) - 2)) """ if condition is not None: return characteristic_function(given(expr, condition, **kwargs), **kwargs) result = pspace(expr).compute_characteristic_function(expr, **kwargs) if evaluate and hasattr(result, 'doit'): return result.doit() else: return result def moment_generating_function(expr, condition=None, evaluate=True, **kwargs): if condition is not None: return moment_generating_function(given(expr, condition, **kwargs), **kwargs) result = pspace(expr).compute_moment_generating_function(expr, **kwargs) if evaluate and hasattr(result, 'doit'): return result.doit() else: return result def where(condition, given_condition=None, **kwargs): """ Returns the domain where a condition is True. Examples ======== >>> from sympy.stats import where, Die, Normal >>> from sympy import symbols, And >>> D1, D2 = Die('a', 6), Die('b', 6) >>> a, b = D1.symbol, D2.symbol >>> X = Normal('x', 0, 1) >>> where(X**2<1) Domain: (-1 < x) & (x < 1) >>> where(X**2<1).set Interval.open(-1, 1) >>> where(And(D1<=D2 , D2<3)) Domain: (Eq(a, 1) & Eq(b, 1)) | (Eq(a, 1) & Eq(b, 2)) | (Eq(a, 2) & Eq(b, 2)) """ if given_condition is not None: # If there is a condition # Recompute on new conditional expr return where(given(condition, given_condition, **kwargs), **kwargs) # Otherwise pass work off to the ProbabilitySpace return pspace(condition).where(condition, **kwargs) def sample(expr, condition=None, size=(), **kwargs): """ A realization of the random expression Examples ======== >>> from sympy.stats import Die, sample, Normal >>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6) >>> die_roll = sample(X + Y + Z) # A random realization of three dice >>> N = Normal('N', 3, 4) >>> samp = sample(N) >>> samp in N.pspace.domain.set True >>> samp_list = sample(N, size=4) >>> [sam in N.pspace.domain.set for sam in samp_list] [True, True, True, True] """ return next(sample_iter(expr, condition, size=size, numsamples=1)) def sample_iter(expr, condition=None, size=(), numsamples=S.Infinity, **kwargs): """ Returns an iterator of realizations from the expression given a condition Parameters ========== expr: Expr Random expression to be realized condition: Expr, optional A conditional expression numsamples: integer, optional Length of the iterator (defaults to infinity) Examples ======== >>> from sympy.stats import Normal, sample_iter >>> X = Normal('X', 0, 1) >>> expr = X*X + 3 >>> iterator = sample_iter(expr, numsamples=3) >>> list(iterator) # doctest: +SKIP [12, 4, 7] See Also ======== sample sampling_P sampling_E sample_iter_lambdify sample_iter_subs """ # lambdify is much faster but not as robust try: return sample_iter_lambdify(expr, condition, size=size, numsamples=numsamples, **kwargs) # use subs when lambdify fails except TypeError: return sample_iter_subs(expr, condition, size=size, numsamples=numsamples, **kwargs) def quantile(expr, evaluate=True, **kwargs): r""" Return the :math:`p^{th}` order quantile of a probability distribution. Quantile is defined as the value at which the probability of the random variable is less than or equal to the given probability. ..math:: Q(p) = inf{x \in (-\infty, \infty) such that p <= F(x)} Examples ======== >>> from sympy.stats import quantile, Die, Exponential >>> from sympy import Symbol, pprint >>> p = Symbol("p") >>> l = Symbol("lambda", positive=True) >>> X = Exponential("x", l) >>> quantile(X)(p) -log(1 - p)/lambda >>> D = Die("d", 6) >>> pprint(quantile(D)(p), use_unicode=False) /nan for Or(p > 1, p < 0) | | 1 for p <= 1/6 | | 2 for p <= 1/3 | < 3 for p <= 1/2 | | 4 for p <= 2/3 | | 5 for p <= 5/6 | \ 6 for p <= 1 """ result = pspace(expr).compute_quantile(expr, **kwargs) if evaluate and hasattr(result, 'doit'): return result.doit() else: return result def sample_iter_lambdify(expr, condition=None, size=(), numsamples=S.Infinity, **kwargs): """ Uses lambdify for computation. This is fast but does not always work. See Also ======== sample_iter """ if condition: ps = pspace(Tuple(expr, condition)) else: ps = pspace(expr) rvs = list(ps.values) fn = lambdify(rvs, expr, **kwargs) if condition: given_fn = lambdify(rvs, condition, **kwargs) # Check that lambdify can handle the expression # Some operations like Sum can prove difficult d = ps.sample(size) # a dictionary that maps RVs to values args = [d[rv] for rv in rvs] fn(*args) if condition: given_fn(*args) def return_generator(): count = 0 while count < numsamples: d = ps.sample(size) # a dictionary that maps RVs to values args = [d[rv] for rv in rvs] if condition: # Check that these values satisfy the condition gd = given_fn(*args) if gd != True and gd != False: raise ValueError( "Conditions must not contain free symbols") if not gd: # If the values don't satisfy then try again continue yield fn(*args) count += 1 return return_generator() def sample_iter_subs(expr, condition=None, size=(), numsamples=S.Infinity, **kwargs): """ Uses subs for computation. This is slow but almost always works. See Also ======== sample_iter """ if condition is not None: ps = pspace(Tuple(expr, condition)) else: ps = pspace(expr) count = 0 while count < numsamples: d = ps.sample(size) # a dictionary that maps RVs to values if condition is not None: # Check that these values satisfy the condition gd = condition.xreplace(d) if gd != True and gd != False: raise ValueError("Conditions must not contain free symbols") if not gd: # If the values don't satisfy then try again continue yield expr.xreplace(d) count += 1 def sampling_P(condition, given_condition=None, numsamples=1, evalf=True, **kwargs): """ Sampling version of P See Also ======== P sampling_E sampling_density """ count_true = 0 count_false = 0 samples = sample_iter(condition, given_condition, numsamples=numsamples, **kwargs) for sample in samples: if sample != True and sample != False: raise ValueError("Conditions must not contain free symbols") if sample: count_true += 1 else: count_false += 1 result = S(count_true) / numsamples if evalf: return result.evalf() else: return result def sampling_E(expr, given_condition=None, numsamples=1, evalf=True, **kwargs): """ Sampling version of E See Also ======== P sampling_P sampling_density """ samples = sample_iter(expr, given_condition, numsamples=numsamples, **kwargs) result = Add(*list(samples)) / numsamples if evalf: return result.evalf() else: return result def sampling_density(expr, given_condition=None, numsamples=1, **kwargs): """ Sampling version of density See Also ======== density sampling_P sampling_E """ results = {} for result in sample_iter(expr, given_condition, numsamples=numsamples, **kwargs): results[result] = results.get(result, 0) + 1 return results def dependent(a, b): """ Dependence of two random expressions Two expressions are independent if knowledge of one does not change computations on the other. Examples ======== >>> from sympy.stats import Normal, dependent, given >>> from sympy import Tuple, Eq >>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1) >>> dependent(X, Y) False >>> dependent(2*X + Y, -Y) True >>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3)) >>> dependent(X, Y) True See Also ======== independent """ if pspace_independent(a, b): return False z = Symbol('z', real=True) # Dependent if density is unchanged when one is given information about # the other return (density(a, Eq(b, z)) != density(a) or density(b, Eq(a, z)) != density(b)) def independent(a, b): """ Independence of two random expressions Two expressions are independent if knowledge of one does not change computations on the other. Examples ======== >>> from sympy.stats import Normal, independent, given >>> from sympy import Tuple, Eq >>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1) >>> independent(X, Y) True >>> independent(2*X + Y, -Y) False >>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3)) >>> independent(X, Y) False See Also ======== dependent """ return not dependent(a, b) def pspace_independent(a, b): """ Tests for independence between a and b by checking if their PSpaces have overlapping symbols. This is a sufficient but not necessary condition for independence and is intended to be used internally. Notes ===== pspace_independent(a, b) implies independent(a, b) independent(a, b) does not imply pspace_independent(a, b) """ a_symbols = set(pspace(b).symbols) b_symbols = set(pspace(a).symbols) if len(set(random_symbols(a)).intersection(random_symbols(b))) != 0: return False if len(a_symbols.intersection(b_symbols)) == 0: return True return None def rv_subs(expr, symbols=None): """ Given a random expression replace all random variables with their symbols. If symbols keyword is given restrict the swap to only the symbols listed. """ if symbols is None: symbols = random_symbols(expr) if not symbols: return expr swapdict = {rv: rv.symbol for rv in symbols} return expr.subs(swapdict) class NamedArgsMixin(object): _argnames = () # type: tTuple[str, ...] def __getattr__(self, attr): try: return self.args[self._argnames.index(attr)] except ValueError: raise AttributeError("'%s' object has no attribute '%s'" % ( type(self).__name__, attr)) def _value_check(condition, message): """ Raise a ValueError with message if condition is False, else return True if all conditions were True, else False. Examples ======== >>> from sympy.stats.rv import _value_check >>> from sympy.abc import a, b, c >>> from sympy import And, Dummy >>> _value_check(2 < 3, '') True Here, the condition is not False, but it doesn't evaluate to True so False is returned (but no error is raised). So checking if the return value is True or False will tell you if all conditions were evaluated. >>> _value_check(a < b, '') False In this case the condition is False so an error is raised: >>> r = Dummy(real=True) >>> _value_check(r < r - 1, 'condition is not true') Traceback (most recent call last): ... ValueError: condition is not true If no condition of many conditions must be False, they can be checked by passing them as an iterable: >>> _value_check((a < 0, b < 0, c < 0), '') False The iterable can be a generator, too: >>> _value_check((i < 0 for i in (a, b, c)), '') False The following are equivalent to the above but do not pass an iterable: >>> all(_value_check(i < 0, '') for i in (a, b, c)) False >>> _value_check(And(a < 0, b < 0, c < 0), '') False """ from sympy.core.compatibility import iterable from sympy.core.logic import fuzzy_and if not iterable(condition): condition = [condition] truth = fuzzy_and(condition) if truth == False: raise ValueError(message) return truth == True def _symbol_converter(sym): """ Casts the parameter to Symbol if it is 'str' otherwise no operation is performed on it. Parameters ========== sym The parameter to be converted. Returns ======= Symbol the parameter converted to Symbol. Raises ====== TypeError If the parameter is not an instance of both str and Symbol. Examples ======== >>> from sympy import Symbol >>> from sympy.stats.rv import _symbol_converter >>> s = _symbol_converter('s') >>> isinstance(s, Symbol) True >>> _symbol_converter(1) Traceback (most recent call last): ... TypeError: 1 is neither a Symbol nor a string >>> r = Symbol('r') >>> isinstance(r, Symbol) True """ if isinstance(sym, str): sym = Symbol(sym) if not isinstance(sym, Symbol): raise TypeError("%s is neither a Symbol nor a string"%(sym)) return sym
773b0fb978845bc614becbd6e5230b38220df80207b2bb7780dde5209de643b9
""" Joint Random Variables Module See Also ======== sympy.stats.rv sympy.stats.frv sympy.stats.crv sympy.stats.drv """ from __future__ import print_function, division from sympy import (Basic, Lambda, sympify, Indexed, Symbol, ProductSet, S, Dummy) from sympy.concrete.products import Product from sympy.concrete.summations import Sum, summation from sympy.core.compatibility import iterable from sympy.core.containers import Tuple from sympy.integrals.integrals import Integral, integrate from sympy.matrices import ImmutableMatrix from sympy.stats.crv import (ContinuousDistribution, SingleContinuousDistribution, SingleContinuousPSpace) from sympy.stats.drv import (DiscreteDistribution, SingleDiscreteDistribution, SingleDiscretePSpace) from sympy.stats.rv import (ProductPSpace, NamedArgsMixin, ProductDomain, RandomSymbol, random_symbols, SingleDomain) from sympy.utilities.misc import filldedent # __all__ = ['marginal_distribution'] class JointPSpace(ProductPSpace): """ Represents a joint probability space. Represented using symbols for each component and a distribution. """ def __new__(cls, sym, dist): if isinstance(dist, SingleContinuousDistribution): return SingleContinuousPSpace(sym, dist) if isinstance(dist, SingleDiscreteDistribution): return SingleDiscretePSpace(sym, dist) if isinstance(sym, str): sym = Symbol(sym) if not isinstance(sym, Symbol): raise TypeError("s should have been string or Symbol") return Basic.__new__(cls, sym, dist) @property def set(self): return self.domain.set @property def symbol(self): return self.args[0] @property def distribution(self): return self.args[1] @property def value(self): return JointRandomSymbol(self.symbol, self) @property def component_count(self): _set = self.distribution.set if isinstance(_set, ProductSet): return S(len(_set.args)) elif isinstance(_set, Product): return _set.limits[0][-1] return S.One @property def pdf(self): sym = [Indexed(self.symbol, i) for i in range(self.component_count)] return self.distribution(*sym) @property def domain(self): rvs = random_symbols(self.distribution) if not rvs: return SingleDomain(self.symbol, self.distribution.set) return ProductDomain(*[rv.pspace.domain for rv in rvs]) def component_domain(self, index): return self.set.args[index] def marginal_distribution(self, *indices): count = self.component_count if count.atoms(Symbol): raise ValueError("Marginal distributions cannot be computed " "for symbolic dimensions. It is a work under progress.") orig = [Indexed(self.symbol, i) for i in range(count)] all_syms = [Symbol(str(i)) for i in orig] replace_dict = dict(zip(all_syms, orig)) sym = tuple(Symbol(str(Indexed(self.symbol, i))) for i in indices) limits = list([i,] for i in all_syms if i not in sym) index = 0 for i in range(count): if i not in indices: limits[index].append(self.distribution.set.args[i]) limits[index] = tuple(limits[index]) index += 1 if self.distribution.is_Continuous: f = Lambda(sym, integrate(self.distribution(*all_syms), *limits)) elif self.distribution.is_Discrete: f = Lambda(sym, summation(self.distribution(*all_syms), *limits)) return f.xreplace(replace_dict) def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs): syms = tuple(self.value[i] for i in range(self.component_count)) rvs = rvs or syms if not any([i in rvs for i in syms]): return expr expr = expr*self.pdf for rv in rvs: if isinstance(rv, Indexed): expr = expr.xreplace({rv: Indexed(str(rv.base), rv.args[1])}) elif isinstance(rv, RandomSymbol): expr = expr.xreplace({rv: rv.symbol}) if self.value in random_symbols(expr): raise NotImplementedError(filldedent(''' Expectations of expression with unindexed joint random symbols cannot be calculated yet.''')) limits = tuple((Indexed(str(rv.base),rv.args[1]), self.distribution.set.args[rv.args[1]]) for rv in syms) return Integral(expr, *limits) def where(self, condition): raise NotImplementedError() def compute_density(self, expr): raise NotImplementedError() def sample(self): raise NotImplementedError() def probability(self, condition): raise NotImplementedError() class JointDistribution(Basic, NamedArgsMixin): """ Represented by the random variables part of the joint distribution. Contains methods for PDF, CDF, sampling, marginal densities, etc. """ _argnames = ('pdf', ) def __new__(cls, *args): args = list(map(sympify, args)) for i in range(len(args)): if isinstance(args[i], list): args[i] = ImmutableMatrix(args[i]) return Basic.__new__(cls, *args) @property def domain(self): return ProductDomain(self.symbols) @property def pdf(self): return self.density.args[1] def cdf(self, other): if not isinstance(other, dict): raise ValueError("%s should be of type dict, got %s"%(other, type(other))) rvs = other.keys() _set = self.domain.set.sets expr = self.pdf(tuple(i.args[0] for i in self.symbols)) for i in range(len(other)): if rvs[i].is_Continuous: density = Integral(expr, (rvs[i], _set[i].inf, other[rvs[i]])) elif rvs[i].is_Discrete: density = Sum(expr, (rvs[i], _set[i].inf, other[rvs[i]])) return density def __call__(self, *args): return self.pdf(*args) class JointRandomSymbol(RandomSymbol): """ Representation of random symbols with joint probability distributions to allow indexing." """ def __getitem__(self, key): if isinstance(self.pspace, JointPSpace): if (self.pspace.component_count <= key) == True: raise ValueError("Index keys for %s can only up to %s." % (self.name, self.pspace.component_count - 1)) return Indexed(self, key) class JointDistributionHandmade(JointDistribution, NamedArgsMixin): _argnames = ('pdf',) is_Continuous = True @property def set(self): return self.args[1] def marginal_distribution(rv, *indices): """ Marginal distribution function of a joint random variable. Parameters ========== rv: A random variable with a joint probability distribution. indices: component indices or the indexed random symbol for whom the joint distribution is to be calculated Returns ======= A Lambda expression n `sym`. Examples ======== >>> from sympy.stats.crv_types import Normal >>> from sympy.stats.joint_rv import marginal_distribution >>> m = Normal('X', [1, 2], [[2, 1], [1, 2]]) >>> marginal_distribution(m, m[0])(1) 1/(2*sqrt(pi)) """ indices = list(indices) for i in range(len(indices)): if isinstance(indices[i], Indexed): indices[i] = indices[i].args[1] prob_space = rv.pspace if not indices: raise ValueError( "At least one component for marginal density is needed.") if hasattr(prob_space.distribution, 'marginal_distribution'): return prob_space.distribution.marginal_distribution(indices, rv.symbol) return prob_space.marginal_distribution(*indices) class CompoundDistribution(Basic, NamedArgsMixin): """ Represents a compound probability distribution. Constructed using a single probability distribution with a parameter distributed according to some given distribution. """ def __new__(cls, dist): if not isinstance(dist, (ContinuousDistribution, DiscreteDistribution)): raise ValueError(filldedent('''CompoundDistribution can only be initialized from ContinuousDistribution or DiscreteDistribution ''')) _args = dist.args if not any([isinstance(i, RandomSymbol) for i in _args]): return dist return Basic.__new__(cls, dist) @property def latent_distributions(self): return random_symbols(self.args[0]) def pdf(self, *x): dist = self.args[0] z = Dummy('z') if isinstance(dist, ContinuousDistribution): rv = SingleContinuousPSpace(z, dist).value elif isinstance(dist, DiscreteDistribution): rv = SingleDiscretePSpace(z, dist).value return MarginalDistribution(self, (rv,)).pdf(*x) def set(self): return self.args[0].set def __call__(self, *args): return self.pdf(*args) class MarginalDistribution(Basic): """ Represents the marginal distribution of a joint probability space. Initialised using a probability distribution and random variables(or their indexed components) which should be a part of the resultant distribution. """ def __new__(cls, dist, *rvs): if len(rvs) == 1 and iterable(rvs[0]): rvs = tuple(rvs[0]) if not all([isinstance(rv, (Indexed, RandomSymbol))] for rv in rvs): raise ValueError(filldedent('''Marginal distribution can be intitialised only in terms of random variables or indexed random variables''')) rvs = Tuple.fromiter(rv for rv in rvs) if not isinstance(dist, JointDistribution) and len(random_symbols(dist)) == 0: return dist return Basic.__new__(cls, dist, rvs) def check(self): pass @property def set(self): rvs = [i for i in self.args[1] if isinstance(i, RandomSymbol)] return ProductSet(*[rv.pspace.set for rv in rvs]) @property def symbols(self): rvs = self.args[1] return set([rv.pspace.symbol for rv in rvs]) def pdf(self, *x): expr, rvs = self.args[0], self.args[1] marginalise_out = [i for i in random_symbols(expr) if i not in rvs] if isinstance(expr, CompoundDistribution): syms = Dummy('x', real=True) expr = expr.args[0].pdf(syms) elif isinstance(expr, JointDistribution): count = len(expr.domain.args) x = Dummy('x', real=True, finite=True) syms = tuple(Indexed(x, i) for i in count) expr = expr.pdf(syms) else: syms = tuple(rv.pspace.symbol if isinstance(rv, RandomSymbol) else rv.args[0] for rv in rvs) return Lambda(syms, self.compute_pdf(expr, marginalise_out))(*x) def compute_pdf(self, expr, rvs): for rv in rvs: lpdf = 1 if isinstance(rv, RandomSymbol): lpdf = rv.pspace.pdf expr = self.marginalise_out(expr*lpdf, rv) return expr def marginalise_out(self, expr, rv): from sympy.concrete.summations import Sum if isinstance(rv, RandomSymbol): dom = rv.pspace.set elif isinstance(rv, Indexed): dom = rv.base.component_domain( rv.pspace.component_domain(rv.args[1])) expr = expr.xreplace({rv: rv.pspace.symbol}) if rv.pspace.is_Continuous: #TODO: Modify to support integration #for all kinds of sets. expr = Integral(expr, (rv.pspace.symbol, dom)) elif rv.pspace.is_Discrete: #incorporate this into `Sum`/`summation` if dom in (S.Integers, S.Naturals, S.Naturals0): dom = (dom.inf, dom.sup) expr = Sum(expr, (rv.pspace.symbol, dom)) return expr def __call__(self, *args): return self.pdf(*args)
81352f00aba515bf9bedff85314cdfece1eb2d417d36f93827504dc6469d6f00
from __future__ import print_function, division from sympy import (Basic, sympify, symbols, Dummy, Lambda, summation, Piecewise, S, cacheit, Sum, exp, I, Ne, Eq, poly, series, factorial, And) from sympy.polys.polyerrors import PolynomialError from sympy.solvers.solveset import solveset from sympy.stats.crv import reduce_rational_inequalities_wrap from sympy.stats.rv import (NamedArgsMixin, SinglePSpace, SingleDomain, random_symbols, PSpace, ConditionalDomain, RandomDomain, ProductDomain) from sympy.stats.symbolic_probability import Probability from sympy.functions.elementary.integers import floor from sympy.sets.fancysets import Range, FiniteSet from sympy.sets.sets import Union from sympy.sets.contains import Contains from sympy.utilities import filldedent from sympy.core.sympify import _sympify import random from sympy.external import import_module class DiscreteDistribution(Basic): def __call__(self, *args): return self.pdf(*args) class SingleDiscreteDistribution(DiscreteDistribution, NamedArgsMixin): """ Discrete distribution of a single variable Serves as superclass for PoissonDistribution etc.... Provides methods for pdf, cdf, and sampling See Also: sympy.stats.crv_types.* """ set = S.Integers def __new__(cls, *args): args = list(map(sympify, args)) return Basic.__new__(cls, *args) @staticmethod def check(*args): pass def sample(self, size=()): """ A random realization from the distribution""" if getattr(self,'_sample_scipy', None) and import_module('scipy'): return self._sample_scipy(size) icdf = self._inverse_cdf_expression() samp_list = [] while True: sample_ = floor(list(icdf(random.uniform(0, 1)))[0]) if sample_ >= self.set.inf: if not size: return sample_ else: samp_list.append(sample_) if len(samp_list) == size: return samp_list @cacheit def _inverse_cdf_expression(self): """ Inverse of the CDF Used by sample """ x = Dummy('x', positive=True, integer=True) z = Dummy('z', positive=True) cdf_temp = self.cdf(x) # Invert CDF try: inverse_cdf = solveset(cdf_temp - z, x, domain=S.Reals) except NotImplementedError: inverse_cdf = None if not inverse_cdf or len(inverse_cdf.free_symbols) != 1: raise NotImplementedError("Could not invert CDF") return Lambda(z, inverse_cdf) @cacheit def compute_cdf(self, **kwargs): """ Compute the CDF from the PDF Returns a Lambda """ x, z = symbols('x, z', integer=True, cls=Dummy) left_bound = self.set.inf # CDF is integral of PDF from left bound to z pdf = self.pdf(x) cdf = summation(pdf, (x, left_bound, z), **kwargs) # CDF Ensure that CDF left of left_bound is zero cdf = Piecewise((cdf, z >= left_bound), (0, True)) return Lambda(z, cdf) def _cdf(self, x): return None def cdf(self, x, **kwargs): """ Cumulative density function """ if not kwargs: cdf = self._cdf(x) if cdf is not None: return cdf return self.compute_cdf(**kwargs)(x) @cacheit def compute_characteristic_function(self, **kwargs): """ Compute the characteristic function from the PDF Returns a Lambda """ x, t = symbols('x, t', real=True, cls=Dummy) pdf = self.pdf(x) cf = summation(exp(I*t*x)*pdf, (x, self.set.inf, self.set.sup)) return Lambda(t, cf) def _characteristic_function(self, t): return None def characteristic_function(self, t, **kwargs): """ Characteristic function """ if not kwargs: cf = self._characteristic_function(t) if cf is not None: return cf return self.compute_characteristic_function(**kwargs)(t) @cacheit def compute_moment_generating_function(self, **kwargs): t = Dummy('t', real=True) x = Dummy('x', integer=True) pdf = self.pdf(x) mgf = summation(exp(t*x)*pdf, (x, self.set.inf, self.set.sup)) return Lambda(t, mgf) def _moment_generating_function(self, t): return None def moment_generating_function(self, t, **kwargs): if not kwargs: mgf = self._moment_generating_function(t) if mgf is not None: return mgf return self.compute_moment_generating_function(**kwargs)(t) @cacheit def compute_quantile(self, **kwargs): """ Compute the Quantile from the PDF Returns a Lambda """ x = Dummy('x', integer=True) p = Dummy('p', real=True) left_bound = self.set.inf pdf = self.pdf(x) cdf = summation(pdf, (x, left_bound, x), **kwargs) set = ((x, p <= cdf), ) return Lambda(p, Piecewise(*set)) def _quantile(self, x): return None def quantile(self, x, **kwargs): """ Cumulative density function """ if not kwargs: quantile = self._quantile(x) if quantile is not None: return quantile return self.compute_quantile(**kwargs)(x) def expectation(self, expr, var, evaluate=True, **kwargs): """ Expectation of expression over distribution """ # TODO: support discrete sets with non integer stepsizes if evaluate: try: p = poly(expr, var) t = Dummy('t', real=True) mgf = self.moment_generating_function(t) deg = p.degree() taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t) result = 0 for k in range(deg+1): result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k) return result except PolynomialError: return summation(expr * self.pdf(var), (var, self.set.inf, self.set.sup), **kwargs) else: return Sum(expr * self.pdf(var), (var, self.set.inf, self.set.sup), **kwargs) def __call__(self, *args): return self.pdf(*args) class DiscreteDistributionHandmade(SingleDiscreteDistribution): _argnames = ('pdf',) @property def set(self): return self.args[1] def __new__(cls, pdf, set=S.Integers): return Basic.__new__(cls, pdf, set) class DiscreteDomain(RandomDomain): """ A domain with discrete support with step size one. Represented using symbols and Range. """ is_Discrete = True class SingleDiscreteDomain(DiscreteDomain, SingleDomain): def as_boolean(self): return Contains(self.symbol, self.set) class ConditionalDiscreteDomain(DiscreteDomain, ConditionalDomain): """ Domain with discrete support of step size one, that is restricted by some condition. """ @property def set(self): rv = self.symbols if len(self.symbols) > 1: raise NotImplementedError(filldedent(''' Multivariate conditional domains are not yet implemented.''')) rv = list(rv)[0] return reduce_rational_inequalities_wrap(self.condition, rv).intersect(self.fulldomain.set) class DiscretePSpace(PSpace): is_real = True is_Discrete = True @property def pdf(self): return self.density(*self.symbols) def where(self, condition): rvs = random_symbols(condition) assert all(r.symbol in self.symbols for r in rvs) if len(rvs) > 1: raise NotImplementedError(filldedent('''Multivariate discrete random variables are not yet supported.''')) conditional_domain = reduce_rational_inequalities_wrap(condition, rvs[0]) conditional_domain = conditional_domain.intersect(self.domain.set) return SingleDiscreteDomain(rvs[0].symbol, conditional_domain) def probability(self, condition): complement = isinstance(condition, Ne) if complement: condition = Eq(condition.args[0], condition.args[1]) try: _domain = self.where(condition).set if condition == False or _domain is S.EmptySet: return S.Zero if condition == True or _domain == self.domain.set: return S.One prob = self.eval_prob(_domain) except NotImplementedError: from sympy.stats.rv import density expr = condition.lhs - condition.rhs dens = density(expr) if not isinstance(dens, DiscreteDistribution): dens = DiscreteDistributionHandmade(dens) z = Dummy('z', real=True) space = SingleDiscretePSpace(z, dens) prob = space.probability(condition.__class__(space.value, 0)) if prob is None: prob = Probability(condition) return prob if not complement else S.One - prob def eval_prob(self, _domain): sym = list(self.symbols)[0] if isinstance(_domain, Range): n = symbols('n', integer=True) inf, sup, step = (r for r in _domain.args) summand = ((self.pdf).replace( sym, n*step)) rv = summation(summand, (n, inf/step, (sup)/step - 1)).doit() return rv elif isinstance(_domain, FiniteSet): pdf = Lambda(sym, self.pdf) rv = sum(pdf(x) for x in _domain) return rv elif isinstance(_domain, Union): rv = sum(self.eval_prob(x) for x in _domain.args) return rv def conditional_space(self, condition): # XXX: Converting from set to tuple. The order matters to Lambda # though so we should be starting with a set... density = Lambda(tuple(self.symbols), self.pdf/self.probability(condition)) condition = condition.xreplace(dict((rv, rv.symbol) for rv in self.values)) domain = ConditionalDiscreteDomain(self.domain, condition) return DiscretePSpace(domain, density) class ProductDiscreteDomain(ProductDomain, DiscreteDomain): def as_boolean(self): return And(*[domain.as_boolean for domain in self.domains]) class SingleDiscretePSpace(DiscretePSpace, SinglePSpace): """ Discrete probability space over a single univariate variable """ is_real = True @property def set(self): return self.distribution.set @property def domain(self): return SingleDiscreteDomain(self.symbol, self.set) def sample(self, size=()): """ Internal sample method Returns dictionary mapping RandomSymbol to realization value. """ return {self.value: self.distribution.sample(size)} def compute_expectation(self, expr, rvs=None, evaluate=True, **kwargs): rvs = rvs or (self.value,) if self.value not in rvs: return expr expr = _sympify(expr) expr = expr.xreplace(dict((rv, rv.symbol) for rv in rvs)) x = self.value.symbol try: return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs) except NotImplementedError: return Sum(expr * self.pdf, (x, self.set.inf, self.set.sup), **kwargs) def compute_cdf(self, expr, **kwargs): if expr == self.value: x = Dummy("x", real=True) return Lambda(x, self.distribution.cdf(x, **kwargs)) else: raise NotImplementedError() def compute_density(self, expr, **kwargs): if expr == self.value: return self.distribution raise NotImplementedError() def compute_characteristic_function(self, expr, **kwargs): if expr == self.value: t = Dummy("t", real=True) return Lambda(t, self.distribution.characteristic_function(t, **kwargs)) else: raise NotImplementedError() def compute_moment_generating_function(self, expr, **kwargs): if expr == self.value: t = Dummy("t", real=True) return Lambda(t, self.distribution.moment_generating_function(t, **kwargs)) else: raise NotImplementedError() def compute_quantile(self, expr, **kwargs): if expr == self.value: p = Dummy("p", real=True) return Lambda(p, self.distribution.quantile(p, **kwargs)) else: raise NotImplementedError()
cd3e63fcea5093c7c5192607ab22d7cdbf16353638fa8040f427e8cd4e85d61d
""" Continuous Random Variables Module See Also ======== sympy.stats.crv_types sympy.stats.rv sympy.stats.frv """ from __future__ import print_function, division from sympy import (Interval, Intersection, symbols, sympify, Dummy, nan, Integral, And, Or, Piecewise, cacheit, integrate, oo, Lambda, Basic, S, exp, I, FiniteSet, Ne, Eq, Union, poly, series, factorial) from sympy.core.function import PoleError from sympy.functions.special.delta_functions import DiracDelta from sympy.polys.polyerrors import PolynomialError from sympy.solvers.solveset import solveset from sympy.solvers.inequalities import reduce_rational_inequalities from sympy.core.sympify import _sympify from sympy.stats.rv import (RandomDomain, SingleDomain, ConditionalDomain, ProductDomain, PSpace, SinglePSpace, random_symbols, NamedArgsMixin) import random class ContinuousDomain(RandomDomain): """ A domain with continuous support Represented using symbols and Intervals. """ is_Continuous = True def as_boolean(self): raise NotImplementedError("Not Implemented for generic Domains") class SingleContinuousDomain(ContinuousDomain, SingleDomain): """ A univariate domain with continuous support Represented using a single symbol and interval. """ def compute_expectation(self, expr, variables=None, **kwargs): if variables is None: variables = self.symbols if not variables: return expr if frozenset(variables) != frozenset(self.symbols): raise ValueError("Values should be equal") # assumes only intervals return Integral(expr, (self.symbol, self.set), **kwargs) def as_boolean(self): return self.set.as_relational(self.symbol) class ProductContinuousDomain(ProductDomain, ContinuousDomain): """ A collection of independent domains with continuous support """ def compute_expectation(self, expr, variables=None, **kwargs): if variables is None: variables = self.symbols for domain in self.domains: domain_vars = frozenset(variables) & frozenset(domain.symbols) if domain_vars: expr = domain.compute_expectation(expr, domain_vars, **kwargs) return expr def as_boolean(self): return And(*[domain.as_boolean() for domain in self.domains]) class ConditionalContinuousDomain(ContinuousDomain, ConditionalDomain): """ A domain with continuous support that has been further restricted by a condition such as x > 3 """ def compute_expectation(self, expr, variables=None, **kwargs): if variables is None: variables = self.symbols if not variables: return expr # Extract the full integral fullintgrl = self.fulldomain.compute_expectation(expr, variables) # separate into integrand and limits integrand, limits = fullintgrl.function, list(fullintgrl.limits) conditions = [self.condition] while conditions: cond = conditions.pop() if cond.is_Boolean: if isinstance(cond, And): conditions.extend(cond.args) elif isinstance(cond, Or): raise NotImplementedError("Or not implemented here") elif cond.is_Relational: if cond.is_Equality: # Add the appropriate Delta to the integrand integrand *= DiracDelta(cond.lhs - cond.rhs) else: symbols = cond.free_symbols & set(self.symbols) if len(symbols) != 1: # Can't handle x > y raise NotImplementedError( "Multivariate Inequalities not yet implemented") # Can handle x > 0 symbol = symbols.pop() # Find the limit with x, such as (x, -oo, oo) for i, limit in enumerate(limits): if limit[0] == symbol: # Make condition into an Interval like [0, oo] cintvl = reduce_rational_inequalities_wrap( cond, symbol) # Make limit into an Interval like [-oo, oo] lintvl = Interval(limit[1], limit[2]) # Intersect them to get [0, oo] intvl = cintvl.intersect(lintvl) # Put back into limits list limits[i] = (symbol, intvl.left, intvl.right) else: raise TypeError( "Condition %s is not a relational or Boolean" % cond) return Integral(integrand, *limits, **kwargs) def as_boolean(self): return And(self.fulldomain.as_boolean(), self.condition) @property def set(self): if len(self.symbols) == 1: return (self.fulldomain.set & reduce_rational_inequalities_wrap( self.condition, tuple(self.symbols)[0])) else: raise NotImplementedError( "Set of Conditional Domain not Implemented") class ContinuousDistribution(Basic): def __call__(self, *args): return self.pdf(*args) class SingleContinuousDistribution(ContinuousDistribution, NamedArgsMixin): """ Continuous distribution of a single variable Serves as superclass for Normal/Exponential/UniformDistribution etc.... Represented by parameters for each of the specific classes. E.g NormalDistribution is represented by a mean and standard deviation. Provides methods for pdf, cdf, and sampling See Also ======== sympy.stats.crv_types.* """ set = Interval(-oo, oo) def __new__(cls, *args): args = list(map(sympify, args)) return Basic.__new__(cls, *args) @staticmethod def check(*args): pass def sample(self, size=()): """ A random realization from the distribution """ icdf = self._inverse_cdf_expression() if not size: return icdf(random.uniform(0, 1)) else: return [icdf(random.uniform(0, 1))]*size @cacheit def _inverse_cdf_expression(self): """ Inverse of the CDF Used by sample """ x, z = symbols('x, z', positive=True, cls=Dummy) # Invert CDF try: inverse_cdf = solveset(self.cdf(x) - z, x, S.Reals) if isinstance(inverse_cdf, Intersection) and S.Reals in inverse_cdf.args: inverse_cdf = list(inverse_cdf.args[1]) except NotImplementedError: inverse_cdf = None if not inverse_cdf or len(inverse_cdf) != 1: raise NotImplementedError("Could not invert CDF") (icdf,) = inverse_cdf return Lambda(z, icdf) @cacheit def compute_cdf(self, **kwargs): """ Compute the CDF from the PDF Returns a Lambda """ x, z = symbols('x, z', real=True, cls=Dummy) left_bound = self.set.start # CDF is integral of PDF from left bound to z pdf = self.pdf(x) cdf = integrate(pdf.doit(), (x, left_bound, z), **kwargs) # CDF Ensure that CDF left of left_bound is zero cdf = Piecewise((cdf, z >= left_bound), (0, True)) return Lambda(z, cdf) def _cdf(self, x): return None def cdf(self, x, **kwargs): """ Cumulative density function """ if len(kwargs) == 0: cdf = self._cdf(x) if cdf is not None: return cdf return self.compute_cdf(**kwargs)(x) @cacheit def compute_characteristic_function(self, **kwargs): """ Compute the characteristic function from the PDF Returns a Lambda """ x, t = symbols('x, t', real=True, cls=Dummy) pdf = self.pdf(x) cf = integrate(exp(I*t*x)*pdf, (x, -oo, oo)) return Lambda(t, cf) def _characteristic_function(self, t): return None def characteristic_function(self, t, **kwargs): """ Characteristic function """ if len(kwargs) == 0: cf = self._characteristic_function(t) if cf is not None: return cf return self.compute_characteristic_function(**kwargs)(t) @cacheit def compute_moment_generating_function(self, **kwargs): """ Compute the moment generating function from the PDF Returns a Lambda """ x, t = symbols('x, t', real=True, cls=Dummy) pdf = self.pdf(x) mgf = integrate(exp(t * x) * pdf, (x, -oo, oo)) return Lambda(t, mgf) def _moment_generating_function(self, t): return None def moment_generating_function(self, t, **kwargs): """ Moment generating function """ if not kwargs: mgf = self._moment_generating_function(t) if mgf is not None: return mgf return self.compute_moment_generating_function(**kwargs)(t) def expectation(self, expr, var, evaluate=True, **kwargs): """ Expectation of expression over distribution """ if evaluate: try: p = poly(expr, var) t = Dummy('t', real=True) mgf = self._moment_generating_function(t) if mgf is None: return integrate(expr * self.pdf(var), (var, self.set), **kwargs) deg = p.degree() taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t) result = 0 for k in range(deg+1): result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k) return result except PolynomialError: return integrate(expr * self.pdf(var), (var, self.set), **kwargs) else: return Integral(expr * self.pdf(var), (var, self.set), **kwargs) @cacheit def compute_quantile(self, **kwargs): """ Compute the Quantile from the PDF Returns a Lambda """ x, p = symbols('x, p', real=True, cls=Dummy) left_bound = self.set.start pdf = self.pdf(x) cdf = integrate(pdf, (x, left_bound, x), **kwargs) quantile = solveset(cdf - p, x, self.set) return Lambda(p, Piecewise((quantile, (p >= 0) & (p <= 1) ), (nan, True))) def _quantile(self, x): return None def quantile(self, x, **kwargs): """ Cumulative density function """ if len(kwargs) == 0: quantile = self._quantile(x) if quantile is not None: return quantile return self.compute_quantile(**kwargs)(x) class ContinuousDistributionHandmade(SingleContinuousDistribution): _argnames = ('pdf',) @property def set(self): return self.args[1] def __new__(cls, pdf, set=Interval(-oo, oo)): return Basic.__new__(cls, pdf, set) class ContinuousPSpace(PSpace): """ Continuous Probability Space Represents the likelihood of an event space defined over a continuum. Represented with a ContinuousDomain and a PDF (Lambda-Like) """ is_Continuous = True is_real = True @property def pdf(self): return self.density(*self.domain.symbols) def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs): if rvs is None: rvs = self.values else: rvs = frozenset(rvs) expr = expr.xreplace(dict((rv, rv.symbol) for rv in rvs)) domain_symbols = frozenset(rv.symbol for rv in rvs) return self.domain.compute_expectation(self.pdf * expr, domain_symbols, **kwargs) def compute_density(self, expr, **kwargs): # Common case Density(X) where X in self.values if expr in self.values: # Marginalize all other random symbols out of the density randomsymbols = tuple(set(self.values) - frozenset([expr])) symbols = tuple(rs.symbol for rs in randomsymbols) pdf = self.domain.compute_expectation(self.pdf, symbols, **kwargs) return Lambda(expr.symbol, pdf) z = Dummy('z', real=True) return Lambda(z, self.compute_expectation(DiracDelta(expr - z), **kwargs)) @cacheit def compute_cdf(self, expr, **kwargs): if not self.domain.set.is_Interval: raise ValueError( "CDF not well defined on multivariate expressions") d = self.compute_density(expr, **kwargs) x, z = symbols('x, z', real=True, cls=Dummy) left_bound = self.domain.set.start # CDF is integral of PDF from left bound to z cdf = integrate(d(x), (x, left_bound, z), **kwargs) # CDF Ensure that CDF left of left_bound is zero cdf = Piecewise((cdf, z >= left_bound), (0, True)) return Lambda(z, cdf) @cacheit def compute_characteristic_function(self, expr, **kwargs): if not self.domain.set.is_Interval: raise NotImplementedError("Characteristic function of multivariate expressions not implemented") d = self.compute_density(expr, **kwargs) x, t = symbols('x, t', real=True, cls=Dummy) cf = integrate(exp(I*t*x)*d(x), (x, -oo, oo), **kwargs) return Lambda(t, cf) @cacheit def compute_moment_generating_function(self, expr, **kwargs): if not self.domain.set.is_Interval: raise NotImplementedError("Moment generating function of multivariate expressions not implemented") d = self.compute_density(expr, **kwargs) x, t = symbols('x, t', real=True, cls=Dummy) mgf = integrate(exp(t * x) * d(x), (x, -oo, oo), **kwargs) return Lambda(t, mgf) @cacheit def compute_quantile(self, expr, **kwargs): if not self.domain.set.is_Interval: raise ValueError( "Quantile not well defined on multivariate expressions") d = self.compute_cdf(expr, **kwargs) x = Dummy('x', real=True) p = Dummy('p', positive=True) quantile = solveset(d(x) - p, x, self.set) return Lambda(p, quantile) def probability(self, condition, **kwargs): z = Dummy('z', real=True) cond_inv = False if isinstance(condition, Ne): condition = Eq(condition.args[0], condition.args[1]) cond_inv = True # Univariate case can be handled by where try: domain = self.where(condition) rv = [rv for rv in self.values if rv.symbol == domain.symbol][0] # Integrate out all other random variables pdf = self.compute_density(rv, **kwargs) # return S.Zero if `domain` is empty set if domain.set is S.EmptySet or isinstance(domain.set, FiniteSet): return S.Zero if not cond_inv else S.One if isinstance(domain.set, Union): return sum( Integral(pdf(z), (z, subset), **kwargs) for subset in domain.set.args if isinstance(subset, Interval)) # Integrate out the last variable over the special domain return Integral(pdf(z), (z, domain.set), **kwargs) # Other cases can be turned into univariate case # by computing a density handled by density computation except NotImplementedError: from sympy.stats.rv import density expr = condition.lhs - condition.rhs if not random_symbols(expr): dens = self.density comp = condition.rhs else: dens = density(expr, **kwargs) comp = 0 if not isinstance(dens, ContinuousDistribution): dens = ContinuousDistributionHandmade(dens, set=self.domain.set) # Turn problem into univariate case space = SingleContinuousPSpace(z, dens) result = space.probability(condition.__class__(space.value, comp)) return result if not cond_inv else S.One - result def where(self, condition): rvs = frozenset(random_symbols(condition)) if not (len(rvs) == 1 and rvs.issubset(self.values)): raise NotImplementedError( "Multiple continuous random variables not supported") rv = tuple(rvs)[0] interval = reduce_rational_inequalities_wrap(condition, rv) interval = interval.intersect(self.domain.set) return SingleContinuousDomain(rv.symbol, interval) def conditional_space(self, condition, normalize=True, **kwargs): condition = condition.xreplace(dict((rv, rv.symbol) for rv in self.values)) domain = ConditionalContinuousDomain(self.domain, condition) if normalize: # create a clone of the variable to # make sure that variables in nested integrals are different # from the variables outside the integral # this makes sure that they are evaluated separately # and in the correct order replacement = {rv: Dummy(str(rv)) for rv in self.symbols} norm = domain.compute_expectation(self.pdf, **kwargs) pdf = self.pdf / norm.xreplace(replacement) # XXX: Converting set to tuple. The order matters to Lambda though # so we shouldn't be starting with a set here... density = Lambda(tuple(domain.symbols), pdf) return ContinuousPSpace(domain, density) class SingleContinuousPSpace(ContinuousPSpace, SinglePSpace): """ A continuous probability space over a single univariate variable These consist of a Symbol and a SingleContinuousDistribution This class is normally accessed through the various random variable functions, Normal, Exponential, Uniform, etc.... """ @property def set(self): return self.distribution.set @property def domain(self): return SingleContinuousDomain(sympify(self.symbol), self.set) def sample(self, size=()): """ Internal sample method Returns dictionary mapping RandomSymbol to realization value. """ return {self.value: self.distribution.sample(size)} def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs): rvs = rvs or (self.value,) if self.value not in rvs: return expr expr = _sympify(expr) expr = expr.xreplace(dict((rv, rv.symbol) for rv in rvs)) x = self.value.symbol try: return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs) except PoleError: return Integral(expr * self.pdf, (x, self.set), **kwargs) def compute_cdf(self, expr, **kwargs): if expr == self.value: z = Dummy("z", real=True) return Lambda(z, self.distribution.cdf(z, **kwargs)) else: return ContinuousPSpace.compute_cdf(self, expr, **kwargs) def compute_characteristic_function(self, expr, **kwargs): if expr == self.value: t = Dummy("t", real=True) return Lambda(t, self.distribution.characteristic_function(t, **kwargs)) else: return ContinuousPSpace.compute_characteristic_function(self, expr, **kwargs) def compute_moment_generating_function(self, expr, **kwargs): if expr == self.value: t = Dummy("t", real=True) return Lambda(t, self.distribution.moment_generating_function(t, **kwargs)) else: return ContinuousPSpace.compute_moment_generating_function(self, expr, **kwargs) def compute_density(self, expr, **kwargs): # https://en.wikipedia.org/wiki/Random_variable#Functions_of_random_variables if expr == self.value: return self.density y = Dummy('y', real=True) gs = solveset(expr - y, self.value, S.Reals) if isinstance(gs, Intersection) and S.Reals in gs.args: gs = list(gs.args[1]) if not gs: raise ValueError("Can not solve %s for %s"%(expr, self.value)) fx = self.compute_density(self.value) fy = sum(fx(g) * abs(g.diff(y)) for g in gs) return Lambda(y, fy) def compute_quantile(self, expr, **kwargs): if expr == self.value: p = Dummy("p", real=True) return Lambda(p, self.distribution.quantile(p, **kwargs)) else: return ContinuousPSpace.compute_quantile(self, expr, **kwargs) def _reduce_inequalities(conditions, var, **kwargs): try: return reduce_rational_inequalities(conditions, var, **kwargs) except PolynomialError: raise ValueError("Reduction of condition failed %s\n" % conditions[0]) def reduce_rational_inequalities_wrap(condition, var): if condition.is_Relational: return _reduce_inequalities([[condition]], var, relational=False) if isinstance(condition, Or): return Union(*[_reduce_inequalities([[arg]], var, relational=False) for arg in condition.args]) if isinstance(condition, And): intervals = [_reduce_inequalities([[arg]], var, relational=False) for arg in condition.args] I = intervals[0] for i in intervals: I = I.intersect(i) return I
f36d42ae877f12f67da4e395cc725649d7cb8aea5cf807f9014c07f51cad2d2f
""" Finite Discrete Random Variables Module See Also ======== sympy.stats.frv_types sympy.stats.rv sympy.stats.crv """ from __future__ import print_function, division import random from itertools import product from sympy import (Basic, Symbol, cacheit, sympify, Mul, And, Or, Tuple, Piecewise, Eq, Lambda, exp, I, Dummy, nan, Sum, Intersection, S) from sympy.core.containers import Dict from sympy.core.logic import Logic from sympy.core.relational import Relational from sympy.core.sympify import _sympify from sympy.sets.sets import FiniteSet from sympy.stats.rv import (RandomDomain, ProductDomain, ConditionalDomain, PSpace, IndependentProductPSpace, SinglePSpace, random_symbols, sumsets, rv_subs, NamedArgsMixin, Density) class FiniteDensity(dict): """ A domain with Finite Density. """ def __call__(self, item): """ Make instance of a class callable. If item belongs to current instance of a class, return it. Otherwise, return 0. """ item = sympify(item) if item in self: return self[item] else: return 0 @property def dict(self): """ Return item as dictionary. """ return dict(self) class FiniteDomain(RandomDomain): """ A domain with discrete finite support Represented using a FiniteSet. """ is_Finite = True @property def symbols(self): return FiniteSet(sym for sym, val in self.elements) @property def elements(self): return self.args[0] @property def dict(self): return FiniteSet(*[Dict(dict(el)) for el in self.elements]) def __contains__(self, other): return other in self.elements def __iter__(self): return self.elements.__iter__() def as_boolean(self): return Or(*[And(*[Eq(sym, val) for sym, val in item]) for item in self]) class SingleFiniteDomain(FiniteDomain): """ A FiniteDomain over a single symbol/set Example: The possibilities of a *single* die roll. """ def __new__(cls, symbol, set): if not isinstance(set, FiniteSet) and \ not isinstance(set, Intersection): set = FiniteSet(*set) return Basic.__new__(cls, symbol, set) @property def symbol(self): return self.args[0] @property def symbols(self): return FiniteSet(self.symbol) @property def set(self): return self.args[1] @property def elements(self): return FiniteSet(*[frozenset(((self.symbol, elem), )) for elem in self.set]) def __iter__(self): return (frozenset(((self.symbol, elem),)) for elem in self.set) def __contains__(self, other): sym, val = tuple(other)[0] return sym == self.symbol and val in self.set class ProductFiniteDomain(ProductDomain, FiniteDomain): """ A Finite domain consisting of several other FiniteDomains Example: The possibilities of the rolls of three independent dice """ def __iter__(self): proditer = product(*self.domains) return (sumsets(items) for items in proditer) @property def elements(self): return FiniteSet(*self) class ConditionalFiniteDomain(ConditionalDomain, ProductFiniteDomain): """ A FiniteDomain that has been restricted by a condition Example: The possibilities of a die roll under the condition that the roll is even. """ def __new__(cls, domain, condition): """ Create a new instance of ConditionalFiniteDomain class """ if condition is True: return domain cond = rv_subs(condition) return Basic.__new__(cls, domain, cond) def _test(self, elem): """ Test the value. If value is boolean, return it. If value is equality relational (two objects are equal), return it with left-hand side being equal to right-hand side. Otherwise, raise ValueError exception. """ val = self.condition.xreplace(dict(elem)) if val in [True, False]: return val elif val.is_Equality: return val.lhs == val.rhs raise ValueError("Undecidable if %s" % str(val)) def __contains__(self, other): return other in self.fulldomain and self._test(other) def __iter__(self): return (elem for elem in self.fulldomain if self._test(elem)) @property def set(self): if isinstance(self.fulldomain, SingleFiniteDomain): return FiniteSet(*[elem for elem in self.fulldomain.set if frozenset(((self.fulldomain.symbol, elem),)) in self]) else: raise NotImplementedError( "Not implemented on multi-dimensional conditional domain") def as_boolean(self): return FiniteDomain.as_boolean(self) class SingleFiniteDistribution(Basic, NamedArgsMixin): def __new__(cls, *args): args = list(map(sympify, args)) return Basic.__new__(cls, *args) @staticmethod def check(*args): pass @property # type: ignore @cacheit def dict(self): if self.is_symbolic: return Density(self) return dict((k, self.pmf(k)) for k in self.set) def pmf(self, *args): # to be overridden by specific distribution raise NotImplementedError() @property def set(self): # to be overridden by specific distribution raise NotImplementedError() values = property(lambda self: self.dict.values) items = property(lambda self: self.dict.items) is_symbolic = property(lambda self: False) __iter__ = property(lambda self: self.dict.__iter__) __getitem__ = property(lambda self: self.dict.__getitem__) def __call__(self, *args): return self.pmf(*args) def __contains__(self, other): return other in self.set #============================================= #========= Probability Space =============== #============================================= class FinitePSpace(PSpace): """ A Finite Probability Space Represents the probabilities of a finite number of events. """ is_Finite = True def __new__(cls, domain, density): density = dict((sympify(key), sympify(val)) for key, val in density.items()) public_density = Dict(density) obj = PSpace.__new__(cls, domain, public_density) obj._density = density return obj def prob_of(self, elem): elem = sympify(elem) density = self._density if isinstance(list(density.keys())[0], FiniteSet): return density.get(elem, S.Zero) return density.get(tuple(elem)[0][1], S.Zero) def where(self, condition): assert all(r.symbol in self.symbols for r in random_symbols(condition)) return ConditionalFiniteDomain(self.domain, condition) def compute_density(self, expr): expr = rv_subs(expr, self.values) d = FiniteDensity() for elem in self.domain: val = expr.xreplace(dict(elem)) prob = self.prob_of(elem) d[val] = d.get(val, S.Zero) + prob return d @cacheit def compute_cdf(self, expr): d = self.compute_density(expr) cum_prob = S.Zero cdf = [] for key in sorted(d): prob = d[key] cum_prob += prob cdf.append((key, cum_prob)) return dict(cdf) @cacheit def sorted_cdf(self, expr, python_float=False): cdf = self.compute_cdf(expr) items = list(cdf.items()) sorted_items = sorted(items, key=lambda val_cumprob: val_cumprob[1]) if python_float: sorted_items = [(v, float(cum_prob)) for v, cum_prob in sorted_items] return sorted_items @cacheit def compute_characteristic_function(self, expr): d = self.compute_density(expr) t = Dummy('t', real=True) return Lambda(t, sum(exp(I*k*t)*v for k,v in d.items())) @cacheit def compute_moment_generating_function(self, expr): d = self.compute_density(expr) t = Dummy('t', real=True) return Lambda(t, sum(exp(k*t)*v for k,v in d.items())) def compute_expectation(self, expr, rvs=None, **kwargs): rvs = rvs or self.values expr = rv_subs(expr, rvs) probs = [self.prob_of(elem) for elem in self.domain] if isinstance(expr, (Logic, Relational)): parse_domain = [tuple(elem)[0][1] for elem in self.domain] bools = [expr.xreplace(dict(elem)) for elem in self.domain] else: parse_domain = [expr.xreplace(dict(elem)) for elem in self.domain] bools = [True for elem in self.domain] return sum([Piecewise((prob * elem, blv), (S.Zero, True)) for prob, elem, blv in zip(probs, parse_domain, bools)]) def compute_quantile(self, expr): cdf = self.compute_cdf(expr) p = Dummy('p', real=True) set = ((nan, (p < 0) | (p > 1)),) for key, value in cdf.items(): set = set + ((key, p <= value), ) return Lambda(p, Piecewise(*set)) def probability(self, condition): cond_symbols = frozenset(rs.symbol for rs in random_symbols(condition)) cond = rv_subs(condition) if not cond_symbols.issubset(self.symbols): raise ValueError("Cannot compare foreign random symbols, %s" %(str(cond_symbols - self.symbols))) if isinstance(condition, Relational) and \ (not cond.free_symbols.issubset(self.domain.free_symbols)): rv = condition.lhs if isinstance(condition.rhs, Symbol) else condition.rhs return sum(Piecewise( (self.prob_of(elem), condition.subs(rv, list(elem)[0][1])), (S.Zero, True)) for elem in self.domain) return sympify(sum(self.prob_of(elem) for elem in self.where(condition))) def conditional_space(self, condition): domain = self.where(condition) prob = self.probability(condition) density = dict((key, val / prob) for key, val in self._density.items() if domain._test(key)) return FinitePSpace(domain, density) def sample(self, size=()): """ Internal sample method Returns dictionary mapping RandomSymbol to realization value. """ expr = Tuple(*self.values) cdf = self.sorted_cdf(expr, python_float=True) x = random.uniform(0, 1) # Find first occurrence with cumulative probability less than x # This should be replaced with binary search for value, cum_prob in cdf: if x < cum_prob: # return dictionary mapping RandomSymbols to values return dict(list(zip(expr, value))) assert False, "We should never have gotten to this point" class SingleFinitePSpace(SinglePSpace, FinitePSpace): """ A single finite probability space Represents the probabilities of a set of random events that can be attributed to a single variable/symbol. This class is implemented by many of the standard FiniteRV types such as Die, Bernoulli, Coin, etc.... """ @property def domain(self): return SingleFiniteDomain(self.symbol, self.distribution.set) @property def _is_symbolic(self): """ Helper property to check if the distribution of the random variable is having symbolic dimension. """ return self.distribution.is_symbolic @property def distribution(self): return self.args[1] def pmf(self, expr): return self.distribution.pmf(expr) @property # type: ignore @cacheit def _density(self): return dict((FiniteSet((self.symbol, val)), prob) for val, prob in self.distribution.dict.items()) @cacheit def compute_characteristic_function(self, expr): if self._is_symbolic: d = self.compute_density(expr) t = Dummy('t', real=True) ki = Dummy('ki') return Lambda(t, Sum(d(ki)*exp(I*ki*t), (ki, self.args[1].low, self.args[1].high))) expr = rv_subs(expr, self.values) return FinitePSpace(self.domain, self.distribution).compute_characteristic_function(expr) @cacheit def compute_moment_generating_function(self, expr): if self._is_symbolic: d = self.compute_density(expr) t = Dummy('t', real=True) ki = Dummy('ki') return Lambda(t, Sum(d(ki)*exp(ki*t), (ki, self.args[1].low, self.args[1].high))) expr = rv_subs(expr, self.values) return FinitePSpace(self.domain, self.distribution).compute_moment_generating_function(expr) def compute_quantile(self, expr): if self._is_symbolic: raise NotImplementedError("Computing quantile for random variables " "with symbolic dimension because the bounds of searching the required " "value is undetermined.") expr = rv_subs(expr, self.values) return FinitePSpace(self.domain, self.distribution).compute_quantile(expr) def compute_density(self, expr): if self._is_symbolic: rv = list(random_symbols(expr))[0] k = Dummy('k', integer=True) cond = True if not isinstance(expr, (Relational, Logic)) \ else expr.subs(rv, k) return Lambda(k, Piecewise((self.pmf(k), And(k >= self.args[1].low, k <= self.args[1].high, cond)), (S.Zero, True))) expr = rv_subs(expr, self.values) return FinitePSpace(self.domain, self.distribution).compute_density(expr) def compute_cdf(self, expr): if self._is_symbolic: d = self.compute_density(expr) k = Dummy('k') ki = Dummy('ki') return Lambda(k, Sum(d(ki), (ki, self.args[1].low, k))) expr = rv_subs(expr, self.values) return FinitePSpace(self.domain, self.distribution).compute_cdf(expr) def compute_expectation(self, expr, rvs=None, **kwargs): if self._is_symbolic: rv = random_symbols(expr)[0] k = Dummy('k', integer=True) expr = expr.subs(rv, k) cond = True if not isinstance(expr, (Relational, Logic)) \ else expr func = self.pmf(k) * k if cond != True else self.pmf(k) * expr return Sum(Piecewise((func, cond), (S.Zero, True)), (k, self.distribution.low, self.distribution.high)).doit() expr = _sympify(expr) expr = rv_subs(expr, rvs) return FinitePSpace(self.domain, self.distribution).compute_expectation(expr, rvs, **kwargs) def probability(self, condition): if self._is_symbolic: #TODO: Implement the mechanism for handling queries for symbolic sized distributions. raise NotImplementedError("Currently, probability queries are not " "supported for random variables with symbolic sized distributions.") condition = rv_subs(condition) return FinitePSpace(self.domain, self.distribution).probability(condition) def conditional_space(self, condition): """ This method is used for transferring the computation to probability method because conditional space of random variables with symbolic dimensions is currently not possible. """ if self._is_symbolic: self domain = self.where(condition) prob = self.probability(condition) density = dict((key, val / prob) for key, val in self._density.items() if domain._test(key)) return FinitePSpace(domain, density) class ProductFinitePSpace(IndependentProductPSpace, FinitePSpace): """ A collection of several independent finite probability spaces """ @property def domain(self): return ProductFiniteDomain(*[space.domain for space in self.spaces]) @property # type: ignore @cacheit def _density(self): proditer = product(*[iter(space._density.items()) for space in self.spaces]) d = {} for items in proditer: elems, probs = list(zip(*items)) elem = sumsets(elems) prob = Mul(*probs) d[elem] = d.get(elem, S.Zero) + prob return Dict(d) @property # type: ignore @cacheit def density(self): return Dict(self._density) def probability(self, condition): return FinitePSpace.probability(self, condition) def compute_density(self, expr): return FinitePSpace.compute_density(self, expr)
c999924ef5b4d1ff5c927d189b77a88ad06d6b23803f77a7a51963b4cf2e2203
#!/usr/bin/env python from __future__ import print_function, division from random import random from sympy import factor, I, Integer, pi, simplify, sin, sqrt, Symbol, sympify from sympy.abc import x, y, z from timeit import default_timer as clock def bench_R1(): "real(f(f(f(f(f(f(f(f(f(f(i/2)))))))))))" def f(z): return sqrt(Integer(1)/3)*z**2 + I/3 f(f(f(f(f(f(f(f(f(f(I/2)))))))))).as_real_imag()[0] def bench_R2(): "Hermite polynomial hermite(15, y)" def hermite(n, y): if n == 1: return 2*y if n == 0: return 1 return (2*y*hermite(n - 1, y) - 2*(n - 1)*hermite(n - 2, y)).expand() hermite(15, y) def bench_R3(): "a = [bool(f==f) for _ in range(10)]" f = x + y + z [bool(f == f) for _ in range(10)] def bench_R4(): # we don't have Tuples pass def bench_R5(): "blowup(L, 8); L=uniq(L)" def blowup(L, n): for i in range(n): L.append( (L[i] + L[i + 1]) * L[i + 2] ) def uniq(x): v = set(x) return v L = [x, y, z] blowup(L, 8) L = uniq(L) def bench_R6(): "sum(simplify((x+sin(i))/x+(x-sin(i))/x) for i in range(100))" sum(simplify((x + sin(i))/x + (x - sin(i))/x) for i in range(100)) def bench_R7(): "[f.subs(x, random()) for _ in range(10**4)]" f = x**24 + 34*x**12 + 45*x**3 + 9*x**18 + 34*x**10 + 32*x**21 [f.subs(x, random()) for _ in range(10**4)] def bench_R8(): "right(x^2,0,5,10^4)" def right(f, a, b, n): a = sympify(a) b = sympify(b) n = sympify(n) x = f.atoms(Symbol).pop() Deltax = (b - a)/n c = a est = 0 for i in range(n): c += Deltax est += f.subs(x, c) return est*Deltax right(x**2, 0, 5, 10**4) def _bench_R9(): "factor(x^20 - pi^5*y^20)" factor(x**20 - pi**5*y**20) def bench_R10(): "v = [-pi,-pi+1/10..,pi]" def srange(min, max, step): v = [min] while (max - v[-1]).evalf() > 0: v.append(v[-1] + step) return v[:-1] srange(-pi, pi, sympify(1)/10) def bench_R11(): "a = [random() + random()*I for w in [0..1000]]" [random() + random()*I for w in range(1000)] def bench_S1(): "e=(x+y+z+1)**7;f=e*(e+1);f.expand()" e = (x + y + z + 1)**7 f = e*(e + 1) f.expand() if __name__ == '__main__': benchmarks = [ bench_R1, bench_R2, bench_R3, bench_R5, bench_R6, bench_R7, bench_R8, #_bench_R9, bench_R10, bench_R11, #bench_S1, ] report = [] for b in benchmarks: t = clock() b() t = clock() - t print("%s%65s: %f" % (b.__name__, b.__doc__, t))
46c7cd479b889c02006313a37a61eac5d0b2d2c92df03aff8bd67f56149de954
''' This implementation is a heavily modified fixed point implementation of BBP_formula for calculating the nth position of pi. The original hosted at: http://en.literateprograms.org/Pi_with_the_BBP_formula_(Python) # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sub-license, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Modifications: 1.Once the nth digit and desired number of digits is selected, the number of digits of working precision is calculated to ensure that the hexadecimal digits returned are accurate. This is calculated as int(math.log(start + prec)/math.log(16) + prec + 3) --------------------------------------- -------- / / number of hex digits additional digits This was checked by the following code which completed without errors (and dig are the digits included in the test_bbp.py file): for i in range(0,1000): for j in range(1,1000): a, b = pi_hex_digits(i, j), dig[i:i+j] if a != b: print('%s\n%s'%(a,b)) Deceasing the additional digits by 1 generated errors, so '3' is the smallest additional precision needed to calculate the above loop without errors. The following trailing 10 digits were also checked to be accurate (and the times were slightly faster with some of the constant modifications that were made): >> from time import time >> t=time();pi_hex_digits(10**2-10 + 1, 10), time()-t ('e90c6cc0ac', 0.0) >> t=time();pi_hex_digits(10**4-10 + 1, 10), time()-t ('26aab49ec6', 0.17100000381469727) >> t=time();pi_hex_digits(10**5-10 + 1, 10), time()-t ('a22673c1a5', 4.7109999656677246) >> t=time();pi_hex_digits(10**6-10 + 1, 10), time()-t ('9ffd342362', 59.985999822616577) >> t=time();pi_hex_digits(10**7-10 + 1, 10), time()-t ('c1a42e06a1', 689.51800012588501) 2. The while loop to evaluate whether the series has converged quits when the addition amount `dt` has dropped to zero. 3. the formatting string to convert the decimal to hexadecimal is calculated for the given precision. 4. pi_hex_digits(n) changed to have coefficient to the formula in an array (perhaps just a matter of preference). ''' from __future__ import print_function, division import math from sympy.core.compatibility import as_int def _series(j, n, prec=14): # Left sum from the bbp algorithm s = 0 D = _dn(n, prec) D4 = 4 * D k = 0 d = 8 * k + j for k in range(n + 1): s += (pow(16, n - k, d) << D4) // d d += 8 # Right sum iterates to infinity for full precision, but we # stop at the point where one iteration is beyond the precision # specified. t = 0 k = n + 1 e = 4*(D + n - k) d = 8 * k + j while True: dt = (1 << e) // d if not dt: break t += dt # k += 1 e -= 4 d += 8 total = s + t return total def pi_hex_digits(n, prec=14): """Returns a string containing ``prec`` (default 14) digits starting at the nth digit of pi in hex. Counting of digits starts at 0 and the decimal is not counted, so for n = 0 the returned value starts with 3; n = 1 corresponds to the first digit past the decimal point (which in hex is 2). Examples ======== >>> from sympy.ntheory.bbp_pi import pi_hex_digits >>> pi_hex_digits(0) '3243f6a8885a30' >>> pi_hex_digits(0, 3) '324' References ========== .. [1] http://www.numberworld.org/digits/Pi/ """ n, prec = as_int(n), as_int(prec) if n < 0: raise ValueError('n cannot be negative') if prec == 0: return '' # main of implementation arrays holding formulae coefficients n -= 1 a = [4, 2, 1, 1] j = [1, 4, 5, 6] #formulae D = _dn(n, prec) x = + (a[0]*_series(j[0], n, prec) - a[1]*_series(j[1], n, prec) - a[2]*_series(j[2], n, prec) - a[3]*_series(j[3], n, prec)) & (16**D - 1) s = ("%0" + "%ix" % prec) % (x // 16**(D - prec)) return s def _dn(n, prec): # controller for n dependence on precision # n = starting digit index # prec = the number of total digits to compute n += 1 # because we subtract 1 for _series return int(math.log(n + prec)/math.log(16) + prec + 3)
2a353873755f0ad8d28ffe6929d8780c8168241db4a772edab773fc95553311c
""" Number theory module (primes, etc) """ from .generate import nextprime, prevprime, prime, primepi, primerange, \ randprime, Sieve, sieve, primorial, cycle_length, composite, compositepi from .primetest import isprime, is_gaussian_prime from .factor_ import divisors, proper_divisors, factorint, multiplicity, \ multiplicity_in_factorial, perfect_power, pollard_pm1, pollard_rho, \ primefactors, totient, trailing, \ divisor_count, proper_divisor_count, divisor_sigma, factorrat, \ reduced_totient, primenu, primeomega, mersenne_prime_exponent, \ is_perfect, is_mersenne_prime, is_abundant, is_deficient, is_amicable, \ abundance, dra, drm from .partitions_ import npartitions from .residue_ntheory import is_primitive_root, is_quad_residue, \ legendre_symbol, jacobi_symbol, n_order, sqrt_mod, quadratic_residues, \ primitive_root, nthroot_mod, is_nthpow_residue, sqrt_mod_iter, mobius, \ discrete_log, quadratic_congruence, polynomial_congruence from .multinomial import binomial_coefficients, binomial_coefficients_list, \ multinomial_coefficients from .continued_fraction import continued_fraction_periodic, \ continued_fraction_iterator, continued_fraction_reduce, \ continued_fraction_convergents, continued_fraction from .egyptian_fraction import egyptian_fraction __all__ = [ 'nextprime', 'prevprime', 'prime', 'primepi', 'primerange', 'randprime', 'Sieve', 'sieve', 'primorial', 'cycle_length', 'composite', 'compositepi', 'isprime', 'is_gaussian_prime', 'divisors', 'proper_divisors', 'factorint', 'multiplicity', 'perfect_power', 'pollard_pm1', 'pollard_rho', 'primefactors', 'totient', 'trailing', 'divisor_count', 'proper_divisor_count', 'divisor_sigma', 'factorrat', 'reduced_totient', 'primenu', 'primeomega', 'mersenne_prime_exponent', 'is_perfect', 'is_mersenne_prime', 'is_abundant', 'is_deficient', 'is_amicable', 'abundance', 'dra', 'drm', 'multiplicity_in_factorial', 'npartitions', 'is_primitive_root', 'is_quad_residue', 'legendre_symbol', 'jacobi_symbol', 'n_order', 'sqrt_mod', 'quadratic_residues', 'primitive_root', 'nthroot_mod', 'is_nthpow_residue', 'sqrt_mod_iter', 'mobius', 'discrete_log', 'quadratic_congruence', 'polynomial_congruence', 'binomial_coefficients', 'binomial_coefficients_list', 'multinomial_coefficients', 'continued_fraction_periodic', 'continued_fraction_iterator', 'continued_fraction_reduce', 'continued_fraction_convergents', 'continued_fraction', 'egyptian_fraction', ]
84ddf4c8cf0c67157cff75604fd98f42dff8e86ae1e0bba9df657063794e8929
""" Generating and counting primes. """ from __future__ import print_function, division import random from bisect import bisect from itertools import count # Using arrays for sieving instead of lists greatly reduces # memory consumption from array import array as _array from sympy import Function, S from sympy.core.compatibility import as_int from .primetest import isprime def _azeros(n): return _array('l', [0]*n) def _aset(*v): return _array('l', v) def _arange(a, b): return _array('l', range(a, b)) class Sieve: """An infinite list of prime numbers, implemented as a dynamically growing sieve of Eratosthenes. When a lookup is requested involving an odd number that has not been sieved, the sieve is automatically extended up to that number. Examples ======== >>> from sympy import sieve >>> sieve._reset() # this line for doctest only >>> 25 in sieve False >>> sieve._list array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23]) """ # data shared (and updated) by all Sieve instances def __init__(self): self._n = 6 self._list = _aset(2, 3, 5, 7, 11, 13) # primes self._tlist = _aset(0, 1, 1, 2, 2, 4) # totient self._mlist = _aset(0, 1, -1, -1, 0, -1) # mobius assert all(len(i) == self._n for i in (self._list, self._tlist, self._mlist)) def __repr__(self): return ("<%s sieve (%i): %i, %i, %i, ... %i, %i\n" "%s sieve (%i): %i, %i, %i, ... %i, %i\n" "%s sieve (%i): %i, %i, %i, ... %i, %i>") % ( 'prime', len(self._list), self._list[0], self._list[1], self._list[2], self._list[-2], self._list[-1], 'totient', len(self._tlist), self._tlist[0], self._tlist[1], self._tlist[2], self._tlist[-2], self._tlist[-1], 'mobius', len(self._mlist), self._mlist[0], self._mlist[1], self._mlist[2], self._mlist[-2], self._mlist[-1]) def _reset(self, prime=None, totient=None, mobius=None): """Reset all caches (default). To reset one or more set the desired keyword to True.""" if all(i is None for i in (prime, totient, mobius)): prime = totient = mobius = True if prime: self._list = self._list[:self._n] if totient: self._tlist = self._tlist[:self._n] if mobius: self._mlist = self._mlist[:self._n] def extend(self, n): """Grow the sieve to cover all primes <= n (a real number). Examples ======== >>> from sympy import sieve >>> sieve._reset() # this line for doctest only >>> sieve.extend(30) >>> sieve[10] == 29 True """ n = int(n) if n <= self._list[-1]: return # We need to sieve against all bases up to sqrt(n). # This is a recursive call that will do nothing if there are enough # known bases already. maxbase = int(n**0.5) + 1 self.extend(maxbase) # Create a new sieve starting from sqrt(n) begin = self._list[-1] + 1 newsieve = _arange(begin, n + 1) # Now eliminate all multiples of primes in [2, sqrt(n)] for p in self.primerange(2, maxbase): # Start counting at a multiple of p, offsetting # the index to account for the new sieve's base index startindex = (-begin) % p for i in range(startindex, len(newsieve), p): newsieve[i] = 0 # Merge the sieves self._list += _array('l', [x for x in newsieve if x]) def extend_to_no(self, i): """Extend to include the ith prime number. Parameters ========== i : integer Examples ======== >>> from sympy import sieve >>> sieve._reset() # this line for doctest only >>> sieve.extend_to_no(9) >>> sieve._list array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23]) Notes ===== The list is extended by 50% if it is too short, so it is likely that it will be longer than requested. """ i = as_int(i) while len(self._list) < i: self.extend(int(self._list[-1] * 1.5)) def primerange(self, a, b): """Generate all prime numbers in the range [a, b). Examples ======== >>> from sympy import sieve >>> print([i for i in sieve.primerange(7, 18)]) [7, 11, 13, 17] """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in as_int will raise an error if there was a problem # determining whether the expression was exactly an integer or not a = max(2, as_int(ceiling(a))) b = as_int(ceiling(b)) if a >= b: return self.extend(b) i = self.search(a)[1] maxi = len(self._list) + 1 while i < maxi: p = self._list[i - 1] if p < b: yield p i += 1 else: return def totientrange(self, a, b): """Generate all totient numbers for the range [a, b). Examples ======== >>> from sympy import sieve >>> print([i for i in sieve.totientrange(7, 18)]) [6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16] """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in as_int will raise an error if there was a problem # determining whether the expression was exactly an integer or not a = max(1, as_int(ceiling(a))) b = as_int(ceiling(b)) n = len(self._tlist) if a >= b: return elif b <= n: for i in range(a, b): yield self._tlist[i] else: self._tlist += _arange(n, b) for i in range(1, n): ti = self._tlist[i] startindex = (n + i - 1) // i * i for j in range(startindex, b, i): self._tlist[j] -= ti if i >= a: yield ti for i in range(n, b): ti = self._tlist[i] for j in range(2 * i, b, i): self._tlist[j] -= ti if i >= a: yield ti def mobiusrange(self, a, b): """Generate all mobius numbers for the range [a, b). Parameters ========== a : integer First number in range b : integer First number outside of range Examples ======== >>> from sympy import sieve >>> print([i for i in sieve.mobiusrange(7, 18)]) [-1, 0, 0, 1, -1, 0, -1, 1, 1, 0, -1] """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in as_int will raise an error if there was a problem # determining whether the expression was exactly an integer or not a = max(1, as_int(ceiling(a))) b = as_int(ceiling(b)) n = len(self._mlist) if a >= b: return elif b <= n: for i in range(a, b): yield self._mlist[i] else: self._mlist += _azeros(b - n) for i in range(1, n): mi = self._mlist[i] startindex = (n + i - 1) // i * i for j in range(startindex, b, i): self._mlist[j] -= mi if i >= a: yield mi for i in range(n, b): mi = self._mlist[i] for j in range(2 * i, b, i): self._mlist[j] -= mi if i >= a: yield mi def search(self, n): """Return the indices i, j of the primes that bound n. If n is prime then i == j. Although n can be an expression, if ceiling cannot convert it to an integer then an n error will be raised. Examples ======== >>> from sympy import sieve >>> sieve.search(25) (9, 10) >>> sieve.search(23) (9, 9) """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in as_int will raise an error if there was a problem # determining whether the expression was exactly an integer or not test = as_int(ceiling(n)) n = as_int(n) if n < 2: raise ValueError("n should be >= 2 but got: %s" % n) if n > self._list[-1]: self.extend(n) b = bisect(self._list, n) if self._list[b - 1] == test: return b, b else: return b, b + 1 def __contains__(self, n): try: n = as_int(n) assert n >= 2 except (ValueError, AssertionError): return False if n % 2 == 0: return n == 2 a, b = self.search(n) return a == b def __iter__(self): for n in count(1): yield self[n] def __getitem__(self, n): """Return the nth prime number""" if isinstance(n, slice): self.extend_to_no(n.stop) # Python 2.7 slices have 0 instead of None for start, so # we can't default to 1. start = n.start if n.start is not None else 0 if start < 1: # sieve[:5] would be empty (starting at -1), let's # just be explicit and raise. raise IndexError("Sieve indices start at 1.") return self._list[start - 1:n.stop - 1:n.step] else: if n < 1: # offset is one, so forbid explicit access to sieve[0] # (would surprisingly return the last one). raise IndexError("Sieve indices start at 1.") n = as_int(n) self.extend_to_no(n) return self._list[n - 1] # Generate a global object for repeated use in trial division etc sieve = Sieve() def prime(nth): """ Return the nth prime, with the primes indexed as prime(1) = 2, prime(2) = 3, etc.... The nth prime is approximately n*log(n). Logarithmic integral of x is a pretty nice approximation for number of primes <= x, i.e. li(x) ~ pi(x) In fact, for the numbers we are concerned about( x<1e11 ), li(x) - pi(x) < 50000 Also, li(x) > pi(x) can be safely assumed for the numbers which can be evaluated by this function. Here, we find the least integer m such that li(m) > n using binary search. Now pi(m-1) < li(m-1) <= n, We find pi(m - 1) using primepi function. Starting from m, we have to find n - pi(m-1) more primes. For the inputs this implementation can handle, we will have to test primality for at max about 10**5 numbers, to get our answer. Examples ======== >>> from sympy import prime >>> prime(10) 29 >>> prime(1) 2 >>> prime(100000) 1299709 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range primepi : Return the number of primes less than or equal to n References ========== .. [1] https://en.wikipedia.org/wiki/Prime_number_theorem#Table_of_.CF.80.28x.29.2C_x_.2F_log_x.2C_and_li.28x.29 .. [2] https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number .. [3] https://en.wikipedia.org/wiki/Skewes%27_number """ n = as_int(nth) if n < 1: raise ValueError("nth must be a positive integer; prime(1) == 2") if n <= len(sieve._list): return sieve[n] from sympy.functions.special.error_functions import li from sympy.functions.elementary.exponential import log a = 2 # Lower bound for binary search b = int(n*(log(n) + log(log(n)))) # Upper bound for the search. while a < b: mid = (a + b) >> 1 if li(mid) > n: b = mid else: a = mid + 1 n_primes = primepi(a - 1) while n_primes < n: if isprime(a): n_primes += 1 a += 1 return a - 1 class primepi(Function): """ Represents the prime counting function pi(n) = the number of prime numbers less than or equal to n. Algorithm Description: In sieve method, we remove all multiples of prime p except p itself. Let phi(i,j) be the number of integers 2 <= k <= i which remain after sieving from primes less than or equal to j. Clearly, pi(n) = phi(n, sqrt(n)) If j is not a prime, phi(i,j) = phi(i, j - 1) if j is a prime, We remove all numbers(except j) whose smallest prime factor is j. Let x= j*a be such a number, where 2 <= a<= i / j Now, after sieving from primes <= j - 1, a must remain (because x, and hence a has no prime factor <= j - 1) Clearly, there are phi(i / j, j - 1) such a which remain on sieving from primes <= j - 1 Now, if a is a prime less than equal to j - 1, x= j*a has smallest prime factor = a, and has already been removed(by sieving from a). So, we don't need to remove it again. (Note: there will be pi(j - 1) such x) Thus, number of x, that will be removed are: phi(i / j, j - 1) - phi(j - 1, j - 1) (Note that pi(j - 1) = phi(j - 1, j - 1)) => phi(i,j) = phi(i, j - 1) - phi(i / j, j - 1) + phi(j - 1, j - 1) So,following recursion is used and implemented as dp: phi(a, b) = phi(a, b - 1), if b is not a prime phi(a, b) = phi(a, b-1)-phi(a / b, b-1) + phi(b-1, b-1), if b is prime Clearly a is always of the form floor(n / k), which can take at most 2*sqrt(n) values. Two arrays arr1,arr2 are maintained arr1[i] = phi(i, j), arr2[i] = phi(n // i, j) Finally the answer is arr2[1] Examples ======== >>> from sympy import primepi >>> primepi(25) 9 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range prime : Return the nth prime """ @classmethod def eval(cls, n): if n is S.Infinity: return S.Infinity if n is S.NegativeInfinity: return S.Zero try: n = int(n) except TypeError: if n.is_real == False or n is S.NaN: raise ValueError("n must be real") return if n < 2: return S.Zero if n <= sieve._list[-1]: return S(sieve.search(n)[0]) lim = int(n ** 0.5) lim -= 1 lim = max(lim, 0) while lim * lim <= n: lim += 1 lim -= 1 arr1 = [0] * (lim + 1) arr2 = [0] * (lim + 1) for i in range(1, lim + 1): arr1[i] = i - 1 arr2[i] = n // i - 1 for i in range(2, lim + 1): # Presently, arr1[k]=phi(k,i - 1), # arr2[k] = phi(n // k,i - 1) if arr1[i] == arr1[i - 1]: continue p = arr1[i - 1] for j in range(1, min(n // (i * i), lim) + 1): st = i * j if st <= lim: arr2[j] -= arr2[st] - p else: arr2[j] -= arr1[n // st] - p lim2 = min(lim, i * i - 1) for j in range(lim, lim2, -1): arr1[j] -= arr1[j // i] - p return S(arr2[1]) def nextprime(n, ith=1): """ Return the ith prime greater than n. i must be an integer. Notes ===== Potential primes are located at 6*j +/- 1. This property is used during searching. >>> from sympy import nextprime >>> [(i, nextprime(i)) for i in range(10, 15)] [(10, 11), (11, 13), (12, 13), (13, 17), (14, 17)] >>> nextprime(2, ith=2) # the 2nd prime after 2 5 See Also ======== prevprime : Return the largest prime smaller than n primerange : Generate all primes in a given range """ n = int(n) i = as_int(ith) if i > 1: pr = n j = 1 while 1: pr = nextprime(pr) j += 1 if j > i: break return pr if n < 2: return 2 if n < 7: return {2: 3, 3: 5, 4: 5, 5: 7, 6: 7}[n] if n <= sieve._list[-2]: l, u = sieve.search(n) if l == u: return sieve[u + 1] else: return sieve[u] nn = 6*(n//6) if nn == n: n += 1 if isprime(n): return n n += 4 elif n - nn == 5: n += 2 if isprime(n): return n n += 4 else: n = nn + 5 while 1: if isprime(n): return n n += 2 if isprime(n): return n n += 4 def prevprime(n): """ Return the largest prime smaller than n. Notes ===== Potential primes are located at 6*j +/- 1. This property is used during searching. >>> from sympy import prevprime >>> [(i, prevprime(i)) for i in range(10, 15)] [(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)] See Also ======== nextprime : Return the ith prime greater than n primerange : Generates all primes in a given range """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in as_int will raise an error if there was a problem # determining whether the expression was exactly an integer or not n = as_int(ceiling(n)) if n < 3: raise ValueError("no preceding primes") if n < 8: return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n] if n <= sieve._list[-1]: l, u = sieve.search(n) if l == u: return sieve[l-1] else: return sieve[l] nn = 6*(n//6) if n - nn <= 1: n = nn - 1 if isprime(n): return n n -= 4 else: n = nn + 1 while 1: if isprime(n): return n n -= 2 if isprime(n): return n n -= 4 def primerange(a, b): """ Generate a list of all prime numbers in the range [a, b). If the range exists in the default sieve, the values will be returned from there; otherwise values will be returned but will not modify the sieve. Examples ======== >>> from sympy import primerange, sieve >>> print([i for i in primerange(1, 30)]) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] The Sieve method, primerange, is generally faster but it will occupy more memory as the sieve stores values. The default instance of Sieve, named sieve, can be used: >>> list(sieve.primerange(1, 30)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] Notes ===== Some famous conjectures about the occurrence of primes in a given range are [1]: - Twin primes: though often not, the following will give 2 primes an infinite number of times: primerange(6*n - 1, 6*n + 2) - Legendre's: the following always yields at least one prime primerange(n**2, (n+1)**2+1) - Bertrand's (proven): there is always a prime in the range primerange(n, 2*n) - Brocard's: there are at least four primes in the range primerange(prime(n)**2, prime(n+1)**2) The average gap between primes is log(n) [2]; the gap between primes can be arbitrarily large since sequences of composite numbers are arbitrarily large, e.g. the numbers in the sequence n! + 2, n! + 3 ... n! + n are all composite. See Also ======== nextprime : Return the ith prime greater than n prevprime : Return the largest prime smaller than n randprime : Returns a random prime in a given range primorial : Returns the product of primes based on condition Sieve.primerange : return range from already computed primes or extend the sieve to contain the requested range. References ========== .. [1] https://en.wikipedia.org/wiki/Prime_number .. [2] http://primes.utm.edu/notes/gaps.html """ from sympy.functions.elementary.integers import ceiling if a >= b: return # if we already have the range, return it if b <= sieve._list[-1]: for i in sieve.primerange(a, b): yield i return # otherwise compute, without storing, the desired range. # wrapping ceiling in as_int will raise an error if there was a problem # determining whether the expression was exactly an integer or not a = as_int(ceiling(a)) - 1 b = as_int(ceiling(b)) while 1: a = nextprime(a) if a < b: yield a else: return def randprime(a, b): """ Return a random prime number in the range [a, b). Bertrand's postulate assures that randprime(a, 2*a) will always succeed for a > 1. Examples ======== >>> from sympy import randprime, isprime >>> randprime(1, 30) #doctest: +SKIP 13 >>> isprime(randprime(1, 30)) True See Also ======== primerange : Generate all primes in a given range References ========== .. [1] https://en.wikipedia.org/wiki/Bertrand's_postulate """ if a >= b: return a, b = map(int, (a, b)) n = random.randint(a - 1, b) p = nextprime(n) if p >= b: p = prevprime(b) if p < a: raise ValueError("no primes exist in the specified range") return p def primorial(n, nth=True): """ Returns the product of the first n primes (default) or the primes less than or equal to n (when ``nth=False``). Examples ======== >>> from sympy.ntheory.generate import primorial, randprime, primerange >>> from sympy import factorint, Mul, primefactors, sqrt >>> primorial(4) # the first 4 primes are 2, 3, 5, 7 210 >>> primorial(4, nth=False) # primes <= 4 are 2 and 3 6 >>> primorial(1) 2 >>> primorial(1, nth=False) 1 >>> primorial(sqrt(101), nth=False) 210 One can argue that the primes are infinite since if you take a set of primes and multiply them together (e.g. the primorial) and then add or subtract 1, the result cannot be divided by any of the original factors, hence either 1 or more new primes must divide this product of primes. In this case, the number itself is a new prime: >>> factorint(primorial(4) + 1) {211: 1} In this case two new primes are the factors: >>> factorint(primorial(4) - 1) {11: 1, 19: 1} Here, some primes smaller and larger than the primes multiplied together are obtained: >>> p = list(primerange(10, 20)) >>> sorted(set(primefactors(Mul(*p) + 1)).difference(set(p))) [2, 5, 31, 149] See Also ======== primerange : Generate all primes in a given range """ if nth: n = as_int(n) else: n = int(n) if n < 1: raise ValueError("primorial argument must be >= 1") p = 1 if nth: for i in range(1, n + 1): p *= prime(i) else: for i in primerange(2, n + 1): p *= i return p def cycle_length(f, x0, nmax=None, values=False): """For a given iterated sequence, return a generator that gives the length of the iterated cycle (lambda) and the length of terms before the cycle begins (mu); if ``values`` is True then the terms of the sequence will be returned instead. The sequence is started with value ``x0``. Note: more than the first lambda + mu terms may be returned and this is the cost of cycle detection with Brent's method; there are, however, generally less terms calculated than would have been calculated if the proper ending point were determined, e.g. by using Floyd's method. >>> from sympy.ntheory.generate import cycle_length This will yield successive values of i <-- func(i): >>> def iter(func, i): ... while 1: ... ii = func(i) ... yield ii ... i = ii ... A function is defined: >>> func = lambda i: (i**2 + 1) % 51 and given a seed of 4 and the mu and lambda terms calculated: >>> next(cycle_length(func, 4)) (6, 2) We can see what is meant by looking at the output: >>> n = cycle_length(func, 4, values=True) >>> list(ni for ni in n) [17, 35, 2, 5, 26, 14, 44, 50, 2, 5, 26, 14] There are 6 repeating values after the first 2. If a sequence is suspected of being longer than you might wish, ``nmax`` can be used to exit early (and mu will be returned as None): >>> next(cycle_length(func, 4, nmax = 4)) (4, None) >>> [ni for ni in cycle_length(func, 4, nmax = 4, values=True)] [17, 35, 2, 5] Code modified from: https://en.wikipedia.org/wiki/Cycle_detection. """ nmax = int(nmax or 0) # main phase: search successive powers of two power = lam = 1 tortoise, hare = x0, f(x0) # f(x0) is the element/node next to x0. i = 0 while tortoise != hare and (not nmax or i < nmax): i += 1 if power == lam: # time to start a new power of two? tortoise = hare power *= 2 lam = 0 if values: yield hare hare = f(hare) lam += 1 if nmax and i == nmax: if values: return else: yield nmax, None return if not values: # Find the position of the first repetition of length lambda mu = 0 tortoise = hare = x0 for i in range(lam): hare = f(hare) while tortoise != hare: tortoise = f(tortoise) hare = f(hare) mu += 1 if mu: mu -= 1 yield lam, mu def composite(nth): """ Return the nth composite number, with the composite numbers indexed as composite(1) = 4, composite(2) = 6, etc.... Examples ======== >>> from sympy import composite >>> composite(36) 52 >>> composite(1) 4 >>> composite(17737) 20000 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range primepi : Return the number of primes less than or equal to n prime : Return the nth prime compositepi : Return the number of positive composite numbers less than or equal to n """ n = as_int(nth) if n < 1: raise ValueError("nth must be a positive integer; composite(1) == 4") composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18] if n <= 10: return composite_arr[n - 1] a, b = 4, sieve._list[-1] if n <= b - primepi(b) - 1: while a < b - 1: mid = (a + b) >> 1 if mid - primepi(mid) - 1 > n: b = mid else: a = mid if isprime(a): a -= 1 return a from sympy.functions.special.error_functions import li from sympy.functions.elementary.exponential import log a = 4 # Lower bound for binary search b = int(n*(log(n) + log(log(n)))) # Upper bound for the search. while a < b: mid = (a + b) >> 1 if mid - li(mid) - 1 > n: b = mid else: a = mid + 1 n_composites = a - primepi(a) - 1 while n_composites > n: if not isprime(a): n_composites -= 1 a -= 1 if isprime(a): a -= 1 return a def compositepi(n): """ Return the number of positive composite numbers less than or equal to n. The first positive composite is 4, i.e. compositepi(4) = 1. Examples ======== >>> from sympy import compositepi >>> compositepi(25) 15 >>> compositepi(1000) 831 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range prime : Return the nth prime primepi : Return the number of primes less than or equal to n composite : Return the nth composite number """ n = int(n) if n < 4: return 0 return n - primepi(n) - 1
a4ac3f196e3b2a4c99889b82349ec409eb39d48591b7e9598a7ff689dc502dd0
from __future__ import print_function, division from sympy.core.compatibility import as_int from sympy.core.function import Function from sympy.utilities.iterables import cartes from sympy.core.numbers import igcd, igcdex, mod_inverse from sympy.core.power import isqrt from sympy.core.singleton import S from .primetest import isprime from .factor_ import factorint, trailing, totient, multiplicity from random import randint, Random def n_order(a, n): """Returns the order of ``a`` modulo ``n``. The order of ``a`` modulo ``n`` is the smallest integer ``k`` such that ``a**k`` leaves a remainder of 1 with ``n``. Examples ======== >>> from sympy.ntheory import n_order >>> n_order(3, 7) 6 >>> n_order(4, 7) 3 """ from collections import defaultdict a, n = as_int(a), as_int(n) if igcd(a, n) != 1: raise ValueError("The two numbers should be relatively prime") factors = defaultdict(int) f = factorint(n) for px, kx in f.items(): if kx > 1: factors[px] += kx - 1 fpx = factorint(px - 1) for py, ky in fpx.items(): factors[py] += ky group_order = 1 for px, kx in factors.items(): group_order *= px**kx order = 1 if a > n: a = a % n for p, e in factors.items(): exponent = group_order for f in range(e + 1): if pow(a, exponent, n) != 1: order *= p ** (e - f + 1) break exponent = exponent // p return order def _primitive_root_prime_iter(p): """ Generates the primitive roots for a prime ``p`` Examples ======== >>> from sympy.ntheory.residue_ntheory import _primitive_root_prime_iter >>> list(_primitive_root_prime_iter(19)) [2, 3, 10, 13, 14, 15] References ========== .. [1] W. Stein "Elementary Number Theory" (2011), page 44 """ # it is assumed that p is an int v = [(p - 1) // i for i in factorint(p - 1).keys()] a = 2 while a < p: for pw in v: # a TypeError below may indicate that p was not an int if pow(a, pw, p) == 1: break else: yield a a += 1 def primitive_root(p): """ Returns the smallest primitive root or None Parameters ========== p : positive integer Examples ======== >>> from sympy.ntheory.residue_ntheory import primitive_root >>> primitive_root(19) 2 References ========== .. [1] W. Stein "Elementary Number Theory" (2011), page 44 .. [2] P. Hackman "Elementary Number Theory" (2009), Chapter C """ p = as_int(p) if p < 1: raise ValueError('p is required to be positive') if p <= 2: return 1 f = factorint(p) if len(f) > 2: return None if len(f) == 2: if 2 not in f or f[2] > 1: return None # case p = 2*p1**k, p1 prime for p1, e1 in f.items(): if p1 != 2: break i = 1 while i < p: i += 2 if i % p1 == 0: continue if is_primitive_root(i, p): return i else: if 2 in f: if p == 4: return 3 return None p1, n = list(f.items())[0] if n > 1: # see Ref [2], page 81 g = primitive_root(p1) if is_primitive_root(g, p1**2): return g else: for i in range(2, g + p1 + 1): if igcd(i, p) == 1 and is_primitive_root(i, p): return i return next(_primitive_root_prime_iter(p)) def is_primitive_root(a, p): """ Returns True if ``a`` is a primitive root of ``p`` ``a`` is said to be the primitive root of ``p`` if gcd(a, p) == 1 and totient(p) is the smallest positive number s.t. a**totient(p) cong 1 mod(p) Examples ======== >>> from sympy.ntheory import is_primitive_root, n_order, totient >>> is_primitive_root(3, 10) True >>> is_primitive_root(9, 10) False >>> n_order(3, 10) == totient(10) True >>> n_order(9, 10) == totient(10) False """ a, p = as_int(a), as_int(p) if igcd(a, p) != 1: raise ValueError("The two numbers should be relatively prime") if a > p: a = a % p return n_order(a, p) == totient(p) def _sqrt_mod_tonelli_shanks(a, p): """ Returns the square root in the case of ``p`` prime with ``p == 1 (mod 8)`` References ========== .. [1] R. Crandall and C. Pomerance "Prime Numbers", 2nt Ed., page 101 """ s = trailing(p - 1) t = p >> s # find a non-quadratic residue while 1: d = randint(2, p - 1) r = legendre_symbol(d, p) if r == -1: break #assert legendre_symbol(d, p) == -1 A = pow(a, t, p) D = pow(d, t, p) m = 0 for i in range(s): adm = A*pow(D, m, p) % p adm = pow(adm, 2**(s - 1 - i), p) if adm % p == p - 1: m += 2**i #assert A*pow(D, m, p) % p == 1 x = pow(a, (t + 1)//2, p)*pow(D, m//2, p) % p return x def sqrt_mod(a, p, all_roots=False): """ Find a root of ``x**2 = a mod p`` Parameters ========== a : integer p : positive integer all_roots : if True the list of roots is returned or None Notes ===== If there is no root it is returned None; else the returned root is less or equal to ``p // 2``; in general is not the smallest one. It is returned ``p // 2`` only if it is the only root. Use ``all_roots`` only when it is expected that all the roots fit in memory; otherwise use ``sqrt_mod_iter``. Examples ======== >>> from sympy.ntheory import sqrt_mod >>> sqrt_mod(11, 43) 21 >>> sqrt_mod(17, 32, True) [7, 9, 23, 25] """ if all_roots: return sorted(list(sqrt_mod_iter(a, p))) try: p = abs(as_int(p)) it = sqrt_mod_iter(a, p) r = next(it) if r > p // 2: return p - r elif r < p // 2: return r else: try: r = next(it) if r > p // 2: return p - r except StopIteration: pass return r except StopIteration: return None def _product(*iters): """ Cartesian product generator Notes ===== Unlike itertools.product, it works also with iterables which do not fit in memory. See http://bugs.python.org/issue10109 Author: Fernando Sumudu with small changes """ import itertools inf_iters = tuple(itertools.cycle(enumerate(it)) for it in iters) num_iters = len(inf_iters) cur_val = [None]*num_iters first_v = True while True: i, p = 0, num_iters while p and not i: p -= 1 i, cur_val[p] = next(inf_iters[p]) if not p and not i: if first_v: first_v = False else: break yield cur_val def sqrt_mod_iter(a, p, domain=int): """ Iterate over solutions to ``x**2 = a mod p`` Parameters ========== a : integer p : positive integer domain : integer domain, ``int``, ``ZZ`` or ``Integer`` Examples ======== >>> from sympy.ntheory.residue_ntheory import sqrt_mod_iter >>> list(sqrt_mod_iter(11, 43)) [21, 22] """ from sympy.polys.galoistools import gf_crt1, gf_crt2 from sympy.polys.domains import ZZ a, p = as_int(a), abs(as_int(p)) if isprime(p): a = a % p if a == 0: res = _sqrt_mod1(a, p, 1) else: res = _sqrt_mod_prime_power(a, p, 1) if res: if domain is ZZ: for x in res: yield x else: for x in res: yield domain(x) else: f = factorint(p) v = [] pv = [] for px, ex in f.items(): if a % px == 0: rx = _sqrt_mod1(a, px, ex) if not rx: return else: rx = _sqrt_mod_prime_power(a, px, ex) if not rx: return v.append(rx) pv.append(px**ex) mm, e, s = gf_crt1(pv, ZZ) if domain is ZZ: for vx in _product(*v): r = gf_crt2(vx, pv, mm, e, s, ZZ) yield r else: for vx in _product(*v): r = gf_crt2(vx, pv, mm, e, s, ZZ) yield domain(r) def _sqrt_mod_prime_power(a, p, k): """ Find the solutions to ``x**2 = a mod p**k`` when ``a % p != 0`` Parameters ========== a : integer p : prime number k : positive integer Examples ======== >>> from sympy.ntheory.residue_ntheory import _sqrt_mod_prime_power >>> _sqrt_mod_prime_power(11, 43, 1) [21, 22] References ========== .. [1] P. Hackman "Elementary Number Theory" (2009), page 160 .. [2] http://www.numbertheory.org/php/squareroot.html .. [3] [Gathen99]_ """ from sympy.core.numbers import igcdex from sympy.polys.domains import ZZ pk = p**k a = a % pk if k == 1: if p == 2: return [ZZ(a)] if not (a % p < 2 or pow(a, (p - 1) // 2, p) == 1): return None if p % 4 == 3: res = pow(a, (p + 1) // 4, p) elif p % 8 == 5: sign = pow(a, (p - 1) // 4, p) if sign == 1: res = pow(a, (p + 3) // 8, p) else: b = pow(4*a, (p - 5) // 8, p) x = (2*a*b) % p if pow(x, 2, p) == a: res = x else: res = _sqrt_mod_tonelli_shanks(a, p) # ``_sqrt_mod_tonelli_shanks(a, p)`` is not deterministic; # sort to get always the same result return sorted([ZZ(res), ZZ(p - res)]) if k > 1: # see Ref.[2] if p == 2: if a % 8 != 1: return None if k <= 3: s = set() for i in range(0, pk, 4): s.add(1 + i) s.add(-1 + i) return list(s) # according to Ref.[2] for k > 2 there are two solutions # (mod 2**k-1), that is four solutions (mod 2**k), which can be # obtained from the roots of x**2 = 0 (mod 8) rv = [ZZ(1), ZZ(3), ZZ(5), ZZ(7)] # hensel lift them to solutions of x**2 = 0 (mod 2**k) # if r**2 - a = 0 mod 2**nx but not mod 2**(nx+1) # then r + 2**(nx - 1) is a root mod 2**(nx+1) n = 3 res = [] for r in rv: nx = n while nx < k: r1 = (r**2 - a) >> nx if r1 % 2: r = r + (1 << (nx - 1)) #assert (r**2 - a)% (1 << (nx + 1)) == 0 nx += 1 if r not in res: res.append(r) x = r + (1 << (k - 1)) #assert (x**2 - a) % pk == 0 if x < (1 << nx) and x not in res: if (x**2 - a) % pk == 0: res.append(x) return res rv = _sqrt_mod_prime_power(a, p, 1) if not rv: return None r = rv[0] fr = r**2 - a # hensel lifting with Newton iteration, see Ref.[3] chapter 9 # with f(x) = x**2 - a; one has f'(a) != 0 (mod p) for p != 2 n = 1 px = p while 1: n1 = n n1 *= 2 if n1 > k: break n = n1 px = px**2 frinv = igcdex(2*r, px)[0] r = (r - fr*frinv) % px fr = r**2 - a if n < k: px = p**k frinv = igcdex(2*r, px)[0] r = (r - fr*frinv) % px return [r, px - r] def _sqrt_mod1(a, p, n): """ Find solution to ``x**2 == a mod p**n`` when ``a % p == 0`` see http://www.numbertheory.org/php/squareroot.html """ pn = p**n a = a % pn if a == 0: # case gcd(a, p**k) = p**n m = n // 2 if n % 2 == 1: pm1 = p**(m + 1) def _iter0a(): i = 0 while i < pn: yield i i += pm1 return _iter0a() else: pm = p**m def _iter0b(): i = 0 while i < pn: yield i i += pm return _iter0b() # case gcd(a, p**k) = p**r, r < n f = factorint(a) r = f[p] if r % 2 == 1: return None m = r // 2 a1 = a >> r if p == 2: if n - r == 1: pnm1 = 1 << (n - m + 1) pm1 = 1 << (m + 1) def _iter1(): k = 1 << (m + 2) i = 1 << m while i < pnm1: j = i while j < pn: yield j j += k i += pm1 return _iter1() if n - r == 2: res = _sqrt_mod_prime_power(a1, p, n - r) if res is None: return None pnm = 1 << (n - m) def _iter2(): s = set() for r in res: i = 0 while i < pn: x = (r << m) + i if x not in s: s.add(x) yield x i += pnm return _iter2() if n - r > 2: res = _sqrt_mod_prime_power(a1, p, n - r) if res is None: return None pnm1 = 1 << (n - m - 1) def _iter3(): s = set() for r in res: i = 0 while i < pn: x = ((r << m) + i) % pn if x not in s: s.add(x) yield x i += pnm1 return _iter3() else: m = r // 2 a1 = a // p**r res1 = _sqrt_mod_prime_power(a1, p, n - r) if res1 is None: return None pm = p**m pnr = p**(n-r) pnm = p**(n-m) def _iter4(): s = set() pm = p**m for rx in res1: i = 0 while i < pnm: x = ((rx + i) % pn) if x not in s: s.add(x) yield x*pm i += pnr return _iter4() def is_quad_residue(a, p): """ Returns True if ``a`` (mod ``p``) is in the set of squares mod ``p``, i.e a % p in set([i**2 % p for i in range(p)]). If ``p`` is an odd prime, an iterative method is used to make the determination: >>> from sympy.ntheory import is_quad_residue >>> sorted(set([i**2 % 7 for i in range(7)])) [0, 1, 2, 4] >>> [j for j in range(7) if is_quad_residue(j, 7)] [0, 1, 2, 4] See Also ======== legendre_symbol, jacobi_symbol """ a, p = as_int(a), as_int(p) if p < 1: raise ValueError('p must be > 0') if a >= p or a < 0: a = a % p if a < 2 or p < 3: return True if not isprime(p): if p % 2 and jacobi_symbol(a, p) == -1: return False r = sqrt_mod(a, p) if r is None: return False else: return True return pow(a, (p - 1) // 2, p) == 1 def is_nthpow_residue(a, n, m): """ Returns True if ``x**n == a (mod m)`` has solutions. References ========== .. [1] P. Hackman "Elementary Number Theory" (2009), page 76 """ a = a % m a, n, m = as_int(a), as_int(n), as_int(m) if m <= 0: raise ValueError('m must be > 0') if n < 0: raise ValueError('n must be >= 0') if n == 0: if m == 1: return False return a == 1 if a == 0: return True if n == 1: return True if n == 2: return is_quad_residue(a, m) return _is_nthpow_residue_bign(a, n, m) def _is_nthpow_residue_bign(a, n, m): """Returns True if ``x**n == a (mod m)`` has solutions for n > 2.""" # assert n > 2 # assert a > 0 and m > 0 if primitive_root(m) is None or igcd(a, m) != 1: # assert m >= 8 for prime, power in factorint(m).items(): if not _is_nthpow_residue_bign_prime_power(a, n, prime, power): return False return True f = totient(m) k = f // igcd(f, n) return pow(a, k, m) == 1 def _is_nthpow_residue_bign_prime_power(a, n, p, k): """Returns True/False if a solution for ``x**n == a (mod(p**k))`` does/doesn't exist.""" # assert a > 0 # assert n > 2 # assert p is prime # assert k > 0 if a % p: if p != 2: return _is_nthpow_residue_bign(a, n, pow(p, k)) if n & 1: return True c = trailing(n) return a % pow(2, min(c + 2, k)) == 1 else: a %= pow(p, k) if not a: return True mu = multiplicity(p, a) if mu % n: return False pm = pow(p, mu) return _is_nthpow_residue_bign_prime_power(a//pm, n, p, k - mu) def _nthroot_mod2(s, q, p): f = factorint(q) v = [] for b, e in f.items(): v.extend([b]*e) for qx in v: s = _nthroot_mod1(s, qx, p, False) return s def _nthroot_mod1(s, q, p, all_roots): """ Root of ``x**q = s mod p``, ``p`` prime and ``q`` divides ``p - 1`` References ========== .. [1] A. M. Johnston "A Generalized qth Root Algorithm" """ g = primitive_root(p) if not isprime(q): r = _nthroot_mod2(s, q, p) else: f = p - 1 assert (p - 1) % q == 0 # determine k k = 0 while f % q == 0: k += 1 f = f // q # find z, x, r1 f1 = igcdex(-f, q)[0] % q z = f*f1 x = (1 + z) // q r1 = pow(s, x, p) s1 = pow(s, f, p) h = pow(g, f*q, p) t = discrete_log(p, s1, h) g2 = pow(g, z*t, p) g3 = igcdex(g2, p)[0] r = r1*g3 % p #assert pow(r, q, p) == s res = [r] h = pow(g, (p - 1) // q, p) #assert pow(h, q, p) == 1 hx = r for i in range(q - 1): hx = (hx*h) % p res.append(hx) if all_roots: res.sort() return res return min(res) def _help(m, prime_modulo_method, diff_method, expr_val): """ Helper function for _nthroot_mod_composite and polynomial_congruence. Parameters ========== m : positive integer prime_modulo_method : function to calculate the root of the congruence equation for the prime divisors of m diff_method : function to calculate derivative of expression at any given point expr_val : function to calculate value of the expression at any given point """ from sympy.ntheory.modular import crt f = factorint(m) dd = {} for p, e in f.items(): tot_roots = set() if e == 1: tot_roots.update(prime_modulo_method(p)) else: for root in prime_modulo_method(p): diff = diff_method(root, p) if diff != 0: ppow = p m_inv = mod_inverse(diff, p) for j in range(1, e): ppow *= p root = (root - expr_val(root, ppow) * m_inv) % ppow tot_roots.add(root) else: new_base = p roots_in_base = {root} while new_base < pow(p, e): new_base *= p new_roots = set() for k in roots_in_base: if expr_val(k, new_base)!= 0: continue while k not in new_roots: new_roots.add(k) k = (k + (new_base // p)) % new_base roots_in_base = new_roots tot_roots = tot_roots | roots_in_base if tot_roots == set(): return [] dd[pow(p, e)] = tot_roots a = [] m = [] for x, y in dd.items(): m.append(x) a.append(list(y)) return sorted(set(crt(m, list(i))[0] for i in cartes(*a))) def _nthroot_mod_composite(a, n, m): """ Find the solutions to ``x**n = a mod m`` when m is not prime. """ return _help(m, lambda p: nthroot_mod(a, n, p, True), lambda root, p: (pow(root, n - 1, p) * (n % p)) % p, lambda root, p: (pow(root, n, p) - a) % p) def nthroot_mod(a, n, p, all_roots=False): """ Find the solutions to ``x**n = a mod p`` Parameters ========== a : integer n : positive integer p : positive integer all_roots : if False returns the smallest root, else the list of roots Examples ======== >>> from sympy.ntheory.residue_ntheory import nthroot_mod >>> nthroot_mod(11, 4, 19) 8 >>> nthroot_mod(11, 4, 19, True) [8, 11] >>> nthroot_mod(68, 3, 109) 23 """ from sympy.core.numbers import igcdex a = a % p a, n, p = as_int(a), as_int(n), as_int(p) if n == 2: return sqrt_mod(a, p, all_roots) # see Hackman "Elementary Number Theory" (2009), page 76 if not isprime(p): return _nthroot_mod_composite(a, n, p) if a % p == 0: return [0] if not is_nthpow_residue(a, n, p): return [] if all_roots else None if (p - 1) % n == 0: return _nthroot_mod1(a, n, p, all_roots) # The roots of ``x**n - a = 0 (mod p)`` are roots of # ``gcd(x**n - a, x**(p - 1) - 1) = 0 (mod p)`` pa = n pb = p - 1 b = 1 if pa < pb: a, pa, b, pb = b, pb, a, pa while pb: # x**pa - a = 0; x**pb - b = 0 # x**pa - a = x**(q*pb + r) - a = (x**pb)**q * x**r - a = # b**q * x**r - a; x**r - c = 0; c = b**-q * a mod p q, r = divmod(pa, pb) c = pow(b, q, p) c = igcdex(c, p)[0] c = (c * a) % p pa, pb = pb, r a, b = b, c if pa == 1: if all_roots: res = [a] else: res = a elif pa == 2: return sqrt_mod(a, p , all_roots) else: res = _nthroot_mod1(a, pa, p, all_roots) return res def quadratic_residues(p): """ Returns the list of quadratic residues. Examples ======== >>> from sympy.ntheory.residue_ntheory import quadratic_residues >>> quadratic_residues(7) [0, 1, 2, 4] """ p = as_int(p) r = set() for i in range(p // 2 + 1): r.add(pow(i, 2, p)) return sorted(list(r)) def legendre_symbol(a, p): r""" Returns the Legendre symbol `(a / p)`. For an integer ``a`` and an odd prime ``p``, the Legendre symbol is defined as .. math :: \genfrac(){}{}{a}{p} = \begin{cases} 0 & \text{if } p \text{ divides } a\\ 1 & \text{if } a \text{ is a quadratic residue modulo } p\\ -1 & \text{if } a \text{ is a quadratic nonresidue modulo } p \end{cases} Parameters ========== a : integer p : odd prime Examples ======== >>> from sympy.ntheory import legendre_symbol >>> [legendre_symbol(i, 7) for i in range(7)] [0, 1, 1, -1, 1, -1, -1] >>> sorted(set([i**2 % 7 for i in range(7)])) [0, 1, 2, 4] See Also ======== is_quad_residue, jacobi_symbol """ a, p = as_int(a), as_int(p) if not isprime(p) or p == 2: raise ValueError("p should be an odd prime") a = a % p if not a: return 0 if pow(a, (p - 1) // 2, p) == 1: return 1 return -1 def jacobi_symbol(m, n): r""" Returns the Jacobi symbol `(m / n)`. For any integer ``m`` and any positive odd integer ``n`` the Jacobi symbol is defined as the product of the Legendre symbols corresponding to the prime factors of ``n``: .. math :: \genfrac(){}{}{m}{n} = \genfrac(){}{}{m}{p^{1}}^{\alpha_1} \genfrac(){}{}{m}{p^{2}}^{\alpha_2} ... \genfrac(){}{}{m}{p^{k}}^{\alpha_k} \text{ where } n = p_1^{\alpha_1} p_2^{\alpha_2} ... p_k^{\alpha_k} Like the Legendre symbol, if the Jacobi symbol `\genfrac(){}{}{m}{n} = -1` then ``m`` is a quadratic nonresidue modulo ``n``. But, unlike the Legendre symbol, if the Jacobi symbol `\genfrac(){}{}{m}{n} = 1` then ``m`` may or may not be a quadratic residue modulo ``n``. Parameters ========== m : integer n : odd positive integer Examples ======== >>> from sympy.ntheory import jacobi_symbol, legendre_symbol >>> from sympy import Mul, S >>> jacobi_symbol(45, 77) -1 >>> jacobi_symbol(60, 121) 1 The relationship between the ``jacobi_symbol`` and ``legendre_symbol`` can be demonstrated as follows: >>> L = legendre_symbol >>> S(45).factors() {3: 2, 5: 1} >>> jacobi_symbol(7, 45) == L(7, 3)**2 * L(7, 5)**1 True See Also ======== is_quad_residue, legendre_symbol """ m, n = as_int(m), as_int(n) if n < 0 or not n % 2: raise ValueError("n should be an odd positive integer") if m < 0 or m > n: m = m % n if not m: return int(n == 1) if n == 1 or m == 1: return 1 if igcd(m, n) != 1: return 0 j = 1 if m < 0: m = -m if n % 4 == 3: j = -j while m != 0: while m % 2 == 0 and m > 0: m >>= 1 if n % 8 in [3, 5]: j = -j m, n = n, m if m % 4 == 3 and n % 4 == 3: j = -j m %= n if n != 1: j = 0 return j class mobius(Function): """ Mobius function maps natural number to {-1, 0, 1} It is defined as follows: 1) `1` if `n = 1`. 2) `0` if `n` has a squared prime factor. 3) `(-1)^k` if `n` is a square-free positive integer with `k` number of prime factors. It is an important multiplicative function in number theory and combinatorics. It has applications in mathematical series, algebraic number theory and also physics (Fermion operator has very concrete realization with Mobius Function model). Parameters ========== n : positive integer Examples ======== >>> from sympy.ntheory import mobius >>> mobius(13*7) 1 >>> mobius(1) 1 >>> mobius(13*7*5) -1 >>> mobius(13**2) 0 References ========== .. [1] https://en.wikipedia.org/wiki/M%C3%B6bius_function .. [2] Thomas Koshy "Elementary Number Theory with Applications" """ @classmethod def eval(cls, n): if n.is_integer: if n.is_positive is not True: raise ValueError("n should be a positive integer") else: raise TypeError("n should be an integer") if n.is_prime: return S.NegativeOne elif n is S.One: return S.One elif n.is_Integer: a = factorint(n) if any(i > 1 for i in a.values()): return S.Zero return S.NegativeOne**len(a) def _discrete_log_trial_mul(n, a, b, order=None): """ Trial multiplication algorithm for computing the discrete logarithm of ``a`` to the base ``b`` modulo ``n``. The algorithm finds the discrete logarithm using exhaustive search. This naive method is used as fallback algorithm of ``discrete_log`` when the group order is very small. Examples ======== >>> from sympy.ntheory.residue_ntheory import _discrete_log_trial_mul >>> _discrete_log_trial_mul(41, 15, 7) 3 See Also ======== discrete_log References ========== .. [1] "Handbook of applied cryptography", Menezes, A. J., Van, O. P. C., & Vanstone, S. A. (1997). """ a %= n b %= n if order is None: order = n x = 1 for i in range(order): if x == a: return i x = x * b % n raise ValueError("Log does not exist") def _discrete_log_shanks_steps(n, a, b, order=None): """ Baby-step giant-step algorithm for computing the discrete logarithm of ``a`` to the base ``b`` modulo ``n``. The algorithm is a time-memory trade-off of the method of exhaustive search. It uses `O(sqrt(m))` memory, where `m` is the group order. Examples ======== >>> from sympy.ntheory.residue_ntheory import _discrete_log_shanks_steps >>> _discrete_log_shanks_steps(41, 15, 7) 3 See Also ======== discrete_log References ========== .. [1] "Handbook of applied cryptography", Menezes, A. J., Van, O. P. C., & Vanstone, S. A. (1997). """ a %= n b %= n if order is None: order = n_order(b, n) m = isqrt(order) + 1 T = dict() x = 1 for i in range(m): T[x] = i x = x * b % n z = mod_inverse(b, n) z = pow(z, m, n) x = a for i in range(m): if x in T: return i * m + T[x] x = x * z % n raise ValueError("Log does not exist") def _discrete_log_pollard_rho(n, a, b, order=None, retries=10, rseed=None): """ Pollard's Rho algorithm for computing the discrete logarithm of ``a`` to the base ``b`` modulo ``n``. It is a randomized algorithm with the same expected running time as ``_discrete_log_shanks_steps``, but requires a negligible amount of memory. Examples ======== >>> from sympy.ntheory.residue_ntheory import _discrete_log_pollard_rho >>> _discrete_log_pollard_rho(227, 3**7, 3) 7 See Also ======== discrete_log References ========== .. [1] "Handbook of applied cryptography", Menezes, A. J., Van, O. P. C., & Vanstone, S. A. (1997). """ a %= n b %= n if order is None: order = n_order(b, n) prng = Random() if rseed is not None: prng.seed(rseed) for i in range(retries): aa = prng.randint(1, order - 1) ba = prng.randint(1, order - 1) xa = pow(b, aa, n) * pow(a, ba, n) % n c = xa % 3 if c == 0: xb = a * xa % n ab = aa bb = (ba + 1) % order elif c == 1: xb = xa * xa % n ab = (aa + aa) % order bb = (ba + ba) % order else: xb = b * xa % n ab = (aa + 1) % order bb = ba for j in range(order): c = xa % 3 if c == 0: xa = a * xa % n ba = (ba + 1) % order elif c == 1: xa = xa * xa % n aa = (aa + aa) % order ba = (ba + ba) % order else: xa = b * xa % n aa = (aa + 1) % order c = xb % 3 if c == 0: xb = a * xb % n bb = (bb + 1) % order elif c == 1: xb = xb * xb % n ab = (ab + ab) % order bb = (bb + bb) % order else: xb = b * xb % n ab = (ab + 1) % order c = xb % 3 if c == 0: xb = a * xb % n bb = (bb + 1) % order elif c == 1: xb = xb * xb % n ab = (ab + ab) % order bb = (bb + bb) % order else: xb = b * xb % n ab = (ab + 1) % order if xa == xb: r = (ba - bb) % order try: e = mod_inverse(r, order) * (ab - aa) % order if (pow(b, e, n) - a) % n == 0: return e except ValueError: pass break raise ValueError("Pollard's Rho failed to find logarithm") def _discrete_log_pohlig_hellman(n, a, b, order=None): """ Pohlig-Hellman algorithm for computing the discrete logarithm of ``a`` to the base ``b`` modulo ``n``. In order to compute the discrete logarithm, the algorithm takes advantage of the factorization of the group order. It is more efficient when the group order factors into many small primes. Examples ======== >>> from sympy.ntheory.residue_ntheory import _discrete_log_pohlig_hellman >>> _discrete_log_pohlig_hellman(251, 210, 71) 197 See Also ======== discrete_log References ========== .. [1] "Handbook of applied cryptography", Menezes, A. J., Van, O. P. C., & Vanstone, S. A. (1997). """ from .modular import crt a %= n b %= n if order is None: order = n_order(b, n) f = factorint(order) l = [0] * len(f) for i, (pi, ri) in enumerate(f.items()): for j in range(ri): gj = pow(b, l[i], n) aj = pow(a * mod_inverse(gj, n), order // pi**(j + 1), n) bj = pow(b, order // pi, n) cj = discrete_log(n, aj, bj, pi, True) l[i] += cj * pi**j d, _ = crt([pi**ri for pi, ri in f.items()], l) return d def discrete_log(n, a, b, order=None, prime_order=None): """ Compute the discrete logarithm of ``a`` to the base ``b`` modulo ``n``. This is a recursive function to reduce the discrete logarithm problem in cyclic groups of composite order to the problem in cyclic groups of prime order. It employs different algorithms depending on the problem (subgroup order size, prime order or not): * Trial multiplication * Baby-step giant-step * Pollard's Rho * Pohlig-Hellman Examples ======== >>> from sympy.ntheory import discrete_log >>> discrete_log(41, 15, 7) 3 References ========== .. [1] http://mathworld.wolfram.com/DiscreteLogarithm.html .. [2] "Handbook of applied cryptography", Menezes, A. J., Van, O. P. C., & Vanstone, S. A. (1997). """ n, a, b = as_int(n), as_int(a), as_int(b) if order is None: order = n_order(b, n) if prime_order is None: prime_order = isprime(order) if order < 1000: return _discrete_log_trial_mul(n, a, b, order) elif prime_order: if order < 1000000000000: return _discrete_log_shanks_steps(n, a, b, order) return _discrete_log_pollard_rho(n, a, b, order) return _discrete_log_pohlig_hellman(n, a, b, order) def quadratic_congruence(a, b, c, p): """ Find the solutions to ``a x**2 + b x + c = 0 mod p a : integer b : integer c : integer p : positive integer """ from sympy.polys.galoistools import linear_congruence a = as_int(a) b = as_int(b) c = as_int(c) p = as_int(p) a = a % p b = b % p c = c % p if a == 0: return linear_congruence(b, -c, p) if p == 2: roots = [] if c % 2 == 0: roots.append(0) if (a + b + c) % 2 == 0: roots.append(1) return roots if isprime(p): inv_a = mod_inverse(a, p) b *= inv_a c *= inv_a if b % 2 == 1: b = b + p d = ((b * b) // 4 - c) % p y = sqrt_mod(d, p, all_roots=True) res = set() for i in y: res.add((i - b // 2) % p) return sorted(res) y = sqrt_mod(b * b - 4 * a * c , 4 * a * p, all_roots=True) res = set() for i in y: root = linear_congruence(2 * a, i - b, 4 * a * p) for j in root: res.add(j % p) return sorted(res) def _polynomial_congruence_prime(coefficients, p): """A helper function used by polynomial_congruence. It returns the root of a polynomial modulo prime number by naive search from [0, p). Parameters ========== coefficients : list of integers p : prime number """ roots = [] rank = len(coefficients) for i in range(0, p): f_val = 0 for coeff in range(0,rank - 1): f_val = (f_val + pow(i, int(rank - coeff - 1), p) * coefficients[coeff]) % p f_val = f_val + coefficients[-1] if f_val % p == 0: roots.append(i) return roots def _diff_poly(root, coefficients, p): """A helper function used by polynomial_congruence. It returns the derivative of the polynomial evaluated at the root (mod p). Parameters ========== coefficients : list of integers p : prime number root : integer """ diff = 0 rank = len(coefficients) for coeff in range(0, rank - 1): if not coefficients[coeff]: continue diff = (diff + pow(root, rank - coeff - 2, p)*(rank - coeff - 1)* coefficients[coeff]) % p return diff % p def _val_poly(root, coefficients, p): """A helper function used by polynomial_congruence. It returns value of the polynomial at root (mod p). Parameters ========== coefficients : list of integers p : prime number root : integer """ rank = len(coefficients) f_val = 0 for coeff in range(0, rank - 1): f_val = (f_val + pow(root, rank - coeff - 1, p)* coefficients[coeff]) % p f_val = f_val + coefficients[-1] return f_val % p def _valid_expr(expr): """ return coefficients of expr if it is a univariate polynomial with integer coefficients else raise a ValueError. """ from sympy import Poly from sympy.polys.domains import ZZ if not expr.is_polynomial(): raise ValueError("The expression should be a polynomial") polynomial = Poly(expr) if not polynomial.is_univariate: raise ValueError("The expression should be univariate") if not polynomial.domain == ZZ: raise ValueError("The expression should should have integer coefficients") return polynomial.all_coeffs() def polynomial_congruence(expr, m): """ Find the solutions to a polynomial congruence equation modulo m. Parameters ========== coefficients : Coefficients of the Polynomial m : positive integer Examples ======== >>> from sympy.ntheory import polynomial_congruence >>> from sympy import Poly >>> from sympy.abc import x >>> expr = x**6 - 2*x**5 -35 >>> polynomial_congruence(expr, 6125) [3257] """ coefficients = _valid_expr(expr) coefficients = [num % m for num in coefficients] rank = len(coefficients) if rank == 3: return quadratic_congruence(*coefficients, m) if rank == 2: return quadratic_congruence(0, *coefficients, m) if coefficients[0] == 1 and 1 + coefficients[-1] == sum(coefficients): return nthroot_mod(-coefficients[-1], rank - 1, m, True) if isprime(m): return _polynomial_congruence_prime(coefficients, m) return _help(m, lambda p: _polynomial_congruence_prime(coefficients, p), lambda root, p: _diff_poly(root, coefficients, p), lambda root, p: _val_poly(root, coefficients, p))
b4b7d7cc65d2002035d0243cc0525c0ac222c4d0c92b78a2fe03b73495e3377b
from __future__ import print_function, division from sympy.core.compatibility import as_int def binomial_coefficients(n): """Return a dictionary containing pairs :math:`{(k1,k2) : C_kn}` where :math:`C_kn` are binomial coefficients and :math:`n=k1+k2`. Examples ======== >>> from sympy.ntheory import binomial_coefficients >>> binomial_coefficients(9) {(0, 9): 1, (1, 8): 9, (2, 7): 36, (3, 6): 84, (4, 5): 126, (5, 4): 126, (6, 3): 84, (7, 2): 36, (8, 1): 9, (9, 0): 1} See Also ======== binomial_coefficients_list, multinomial_coefficients """ n = as_int(n) d = {(0, n): 1, (n, 0): 1} a = 1 for k in range(1, n//2 + 1): a = (a * (n - k + 1))//k d[k, n - k] = d[n - k, k] = a return d def binomial_coefficients_list(n): """ Return a list of binomial coefficients as rows of the Pascal's triangle. Examples ======== >>> from sympy.ntheory import binomial_coefficients_list >>> binomial_coefficients_list(9) [1, 9, 36, 84, 126, 126, 84, 36, 9, 1] See Also ======== binomial_coefficients, multinomial_coefficients """ n = as_int(n) d = [1] * (n + 1) a = 1 for k in range(1, n//2 + 1): a = (a * (n - k + 1))//k d[k] = d[n - k] = a return d def multinomial_coefficients(m, n): r"""Return a dictionary containing pairs ``{(k1,k2,..,km) : C_kn}`` where ``C_kn`` are multinomial coefficients such that ``n=k1+k2+..+km``. Examples ======== >>> from sympy.ntheory import multinomial_coefficients >>> multinomial_coefficients(2, 5) # indirect doctest {(0, 5): 1, (1, 4): 5, (2, 3): 10, (3, 2): 10, (4, 1): 5, (5, 0): 1} Notes ===== The algorithm is based on the following result: .. math:: \binom{n}{k_1, \ldots, k_m} = \frac{k_1 + 1}{n - k_1} \sum_{i=2}^m \binom{n}{k_1 + 1, \ldots, k_i - 1, \ldots} Code contributed to Sage by Yann Laigle-Chapuy, copied with permission of the author. See Also ======== binomial_coefficients_list, binomial_coefficients """ m = as_int(m) n = as_int(n) if not m: if n: return {} return {(): 1} if m == 2: return binomial_coefficients(n) if m >= 2*n and n > 1: return dict(multinomial_coefficients_iterator(m, n)) t = [n] + [0] * (m - 1) r = {tuple(t): 1} if n: j = 0 # j will be the leftmost nonzero position else: j = m # enumerate tuples in co-lex order while j < m - 1: # compute next tuple tj = t[j] if j: t[j] = 0 t[0] = tj if tj > 1: t[j + 1] += 1 j = 0 start = 1 v = 0 else: j += 1 start = j + 1 v = r[tuple(t)] t[j] += 1 # compute the value # NB: the initialization of v was done above for k in range(start, m): if t[k]: t[k] -= 1 v += r[tuple(t)] t[k] += 1 t[0] -= 1 r[tuple(t)] = (v * tj) // (n - t[0]) return r def multinomial_coefficients_iterator(m, n, _tuple=tuple): """multinomial coefficient iterator This routine has been optimized for `m` large with respect to `n` by taking advantage of the fact that when the monomial tuples `t` are stripped of zeros, their coefficient is the same as that of the monomial tuples from ``multinomial_coefficients(n, n)``. Therefore, the latter coefficients are precomputed to save memory and time. >>> from sympy.ntheory.multinomial import multinomial_coefficients >>> m53, m33 = multinomial_coefficients(5,3), multinomial_coefficients(3,3) >>> m53[(0,0,0,1,2)] == m53[(0,0,1,0,2)] == m53[(1,0,2,0,0)] == m33[(0,1,2)] True Examples ======== >>> from sympy.ntheory.multinomial import multinomial_coefficients_iterator >>> it = multinomial_coefficients_iterator(20,3) >>> next(it) ((3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), 1) """ m = as_int(m) n = as_int(n) if m < 2*n or n == 1: mc = multinomial_coefficients(m, n) for k, v in mc.items(): yield(k, v) else: mc = multinomial_coefficients(n, n) mc1 = {} for k, v in mc.items(): mc1[_tuple(filter(None, k))] = v mc = mc1 t = [n] + [0] * (m - 1) t1 = _tuple(t) b = _tuple(filter(None, t1)) yield (t1, mc[b]) if n: j = 0 # j will be the leftmost nonzero position else: j = m # enumerate tuples in co-lex order while j < m - 1: # compute next tuple tj = t[j] if j: t[j] = 0 t[0] = tj if tj > 1: t[j + 1] += 1 j = 0 else: j += 1 t[j] += 1 t[0] -= 1 t1 = _tuple(t) b = _tuple(filter(None, t1)) yield (t1, mc[b])
83f39374085c73c86a3a449576197c4f27ffc8b67d7d204ac2148190cf230c7b
""" Primality testing """ from __future__ import print_function, division from sympy.core.compatibility import as_int from mpmath.libmp import bitcount as _bitlength def _int_tuple(*i): return tuple(int(_) for _ in i) def is_euler_pseudoprime(n, b): """Returns True if n is prime or an Euler pseudoprime to base b, else False. Euler Pseudoprime : In arithmetic, an odd composite integer n is called an euler pseudoprime to base a, if a and n are coprime and satisfy the modular arithmetic congruence relation : a ^ (n-1)/2 = + 1(mod n) or a ^ (n-1)/2 = - 1(mod n) (where mod refers to the modulo operation). Examples ======== >>> from sympy.ntheory.primetest import is_euler_pseudoprime >>> is_euler_pseudoprime(2, 5) True References ========== .. [1] https://en.wikipedia.org/wiki/Euler_pseudoprime """ from sympy.ntheory.factor_ import trailing if not mr(n, [b]): return False n = as_int(n) r = n - 1 c = pow(b, r >> trailing(r), n) if c == 1: return True while True: if c == n - 1: return True c = pow(c, 2, n) if c == 1: return False def is_square(n, prep=True): """Return True if n == a * a for some integer a, else False. If n is suspected of *not* being a square then this is a quick method of confirming that it is not. Examples ======== >>> from sympy.ntheory.primetest import is_square >>> is_square(25) True >>> is_square(2) False References ========== [1] http://mersenneforum.org/showpost.php?p=110896 See Also ======== sympy.core.power.integer_nthroot """ if prep: n = as_int(n) if n < 0: return False if n in [0, 1]: return True m = n & 127 if not ((m*0x8bc40d7d) & (m*0xa1e2f5d1) & 0x14020a): m = n % 63 if not ((m*0x3d491df7) & (m*0xc824a9f9) & 0x10f14008): from sympy.core.power import integer_nthroot return integer_nthroot(n, 2)[1] return False def _test(n, base, s, t): """Miller-Rabin strong pseudoprime test for one base. Return False if n is definitely composite, True if n is probably prime, with a probability greater than 3/4. """ # do the Fermat test b = pow(base, t, n) if b == 1 or b == n - 1: return True else: for j in range(1, s): b = pow(b, 2, n) if b == n - 1: return True # see I. Niven et al. "An Introduction to Theory of Numbers", page 78 if b == 1: return False return False def mr(n, bases): """Perform a Miller-Rabin strong pseudoprime test on n using a given list of bases/witnesses. References ========== - Richard Crandall & Carl Pomerance (2005), "Prime Numbers: A Computational Perspective", Springer, 2nd edition, 135-138 A list of thresholds and the bases they require are here: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test#Deterministic_variants_of_the_test Examples ======== >>> from sympy.ntheory.primetest import mr >>> mr(1373651, [2, 3]) False >>> mr(479001599, [31, 73]) True """ from sympy.ntheory.factor_ import trailing from sympy.polys.domains import ZZ n = as_int(n) if n < 2: return False # remove powers of 2 from n-1 (= t * 2**s) s = trailing(n - 1) t = n >> s for base in bases: # Bases >= n are wrapped, bases < 2 are invalid if base >= n: base %= n if base >= 2: base = ZZ(base) if not _test(n, base, s, t): return False return True def _lucas_sequence(n, P, Q, k): """Return the modular Lucas sequence (U_k, V_k, Q_k). Given a Lucas sequence defined by P, Q, returns the kth values for U and V, along with Q^k, all modulo n. This is intended for use with possibly very large values of n and k, where the combinatorial functions would be completely unusable. The modular Lucas sequences are used in numerous places in number theory, especially in the Lucas compositeness tests and the various n + 1 proofs. Examples ======== >>> from sympy.ntheory.primetest import _lucas_sequence >>> N = 10**2000 + 4561 >>> sol = U, V, Qk = _lucas_sequence(N, 3, 1, N//2); sol (0, 2, 1) """ D = P*P - 4*Q if n < 2: raise ValueError("n must be >= 2") if k < 0: raise ValueError("k must be >= 0") if D == 0: raise ValueError("D must not be zero") if k == 0: return _int_tuple(0, 2, Q) U = 1 V = P Qk = Q b = _bitlength(k) if Q == 1: # Optimization for extra strong tests. while b > 1: U = (U*V) % n V = (V*V - 2) % n b -= 1 if (k >> (b - 1)) & 1: U, V = U*P + V, V*P + U*D if U & 1: U += n if V & 1: V += n U, V = U >> 1, V >> 1 elif P == 1 and Q == -1: # Small optimization for 50% of Selfridge parameters. while b > 1: U = (U*V) % n if Qk == 1: V = (V*V - 2) % n else: V = (V*V + 2) % n Qk = 1 b -= 1 if (k >> (b-1)) & 1: U, V = U + V, V + U*D if U & 1: U += n if V & 1: V += n U, V = U >> 1, V >> 1 Qk = -1 else: # The general case with any P and Q. while b > 1: U = (U*V) % n V = (V*V - 2*Qk) % n Qk *= Qk b -= 1 if (k >> (b - 1)) & 1: U, V = U*P + V, V*P + U*D if U & 1: U += n if V & 1: V += n U, V = U >> 1, V >> 1 Qk *= Q Qk %= n return _int_tuple(U % n, V % n, Qk) def _lucas_selfridge_params(n): """Calculates the Selfridge parameters (D, P, Q) for n. This is method A from page 1401 of Baillie and Wagstaff. References ========== - "Lucas Pseudoprimes", Baillie and Wagstaff, 1980. http://mpqs.free.fr/LucasPseudoprimes.pdf """ from sympy.core import igcd from sympy.ntheory.residue_ntheory import jacobi_symbol D = 5 while True: g = igcd(abs(D), n) if g > 1 and g != n: return (0, 0, 0) if jacobi_symbol(D, n) == -1: break if D > 0: D = -D - 2 else: D = -D + 2 return _int_tuple(D, 1, (1 - D)/4) def _lucas_extrastrong_params(n): """Calculates the "extra strong" parameters (D, P, Q) for n. References ========== - OEIS A217719: Extra Strong Lucas Pseudoprimes https://oeis.org/A217719 - https://en.wikipedia.org/wiki/Lucas_pseudoprime """ from sympy.core import igcd from sympy.ntheory.residue_ntheory import jacobi_symbol P, Q, D = 3, 1, 5 while True: g = igcd(D, n) if g > 1 and g != n: return (0, 0, 0) if jacobi_symbol(D, n) == -1: break P += 1 D = P*P - 4 return _int_tuple(D, P, Q) def is_lucas_prp(n): """Standard Lucas compositeness test with Selfridge parameters. Returns False if n is definitely composite, and True if n is a Lucas probable prime. This is typically used in combination with the Miller-Rabin test. References ========== - "Lucas Pseudoprimes", Baillie and Wagstaff, 1980. http://mpqs.free.fr/LucasPseudoprimes.pdf - OEIS A217120: Lucas Pseudoprimes https://oeis.org/A217120 - https://en.wikipedia.org/wiki/Lucas_pseudoprime Examples ======== >>> from sympy.ntheory.primetest import isprime, is_lucas_prp >>> for i in range(10000): ... if is_lucas_prp(i) and not isprime(i): ... print(i) 323 377 1159 1829 3827 5459 5777 9071 9179 """ n = as_int(n) if n == 2: return True if n < 2 or (n % 2) == 0: return False if is_square(n, False): return False D, P, Q = _lucas_selfridge_params(n) if D == 0: return False U, V, Qk = _lucas_sequence(n, P, Q, n+1) return U == 0 def is_strong_lucas_prp(n): """Strong Lucas compositeness test with Selfridge parameters. Returns False if n is definitely composite, and True if n is a strong Lucas probable prime. This is often used in combination with the Miller-Rabin test, and in particular, when combined with M-R base 2 creates the strong BPSW test. References ========== - "Lucas Pseudoprimes", Baillie and Wagstaff, 1980. http://mpqs.free.fr/LucasPseudoprimes.pdf - OEIS A217255: Strong Lucas Pseudoprimes https://oeis.org/A217255 - https://en.wikipedia.org/wiki/Lucas_pseudoprime - https://en.wikipedia.org/wiki/Baillie-PSW_primality_test Examples ======== >>> from sympy.ntheory.primetest import isprime, is_strong_lucas_prp >>> for i in range(20000): ... if is_strong_lucas_prp(i) and not isprime(i): ... print(i) 5459 5777 10877 16109 18971 """ from sympy.ntheory.factor_ import trailing n = as_int(n) if n == 2: return True if n < 2 or (n % 2) == 0: return False if is_square(n, False): return False D, P, Q = _lucas_selfridge_params(n) if D == 0: return False # remove powers of 2 from n+1 (= k * 2**s) s = trailing(n + 1) k = (n+1) >> s U, V, Qk = _lucas_sequence(n, P, Q, k) if U == 0 or V == 0: return True for r in range(1, s): V = (V*V - 2*Qk) % n if V == 0: return True Qk = pow(Qk, 2, n) return False def is_extra_strong_lucas_prp(n): """Extra Strong Lucas compositeness test. Returns False if n is definitely composite, and True if n is a "extra strong" Lucas probable prime. The parameters are selected using P = 3, Q = 1, then incrementing P until (D|n) == -1. The test itself is as defined in Grantham 2000, from the Mo and Jones preprint. The parameter selection and test are the same as used in OEIS A217719, Perl's Math::Prime::Util, and the Lucas pseudoprime page on Wikipedia. With these parameters, there are no counterexamples below 2^64 nor any known above that range. It is 20-50% faster than the strong test. Because of the different parameters selected, there is no relationship between the strong Lucas pseudoprimes and extra strong Lucas pseudoprimes. In particular, one is not a subset of the other. References ========== - "Frobenius Pseudoprimes", Jon Grantham, 2000. http://www.ams.org/journals/mcom/2001-70-234/S0025-5718-00-01197-2/ - OEIS A217719: Extra Strong Lucas Pseudoprimes https://oeis.org/A217719 - https://en.wikipedia.org/wiki/Lucas_pseudoprime Examples ======== >>> from sympy.ntheory.primetest import isprime, is_extra_strong_lucas_prp >>> for i in range(20000): ... if is_extra_strong_lucas_prp(i) and not isprime(i): ... print(i) 989 3239 5777 10877 """ # Implementation notes: # 1) the parameters differ from Thomas R. Nicely's. His parameter # selection leads to pseudoprimes that overlap M-R tests, and # contradict Baillie and Wagstaff's suggestion of (D|n) = -1. # 2) The MathWorld page as of June 2013 specifies Q=-1. The Lucas # sequence must have Q=1. See Grantham theorem 2.3, any of the # references on the MathWorld page, or run it and see Q=-1 is wrong. from sympy.ntheory.factor_ import trailing n = as_int(n) if n == 2: return True if n < 2 or (n % 2) == 0: return False if is_square(n, False): return False D, P, Q = _lucas_extrastrong_params(n) if D == 0: return False # remove powers of 2 from n+1 (= k * 2**s) s = trailing(n + 1) k = (n+1) >> s U, V, Qk = _lucas_sequence(n, P, Q, k) if U == 0 and (V == 2 or V == n - 2): return True if V == 0: return True for r in range(1, s): V = (V*V - 2) % n if V == 0: return True return False def isprime(n): """ Test if n is a prime number (True) or not (False). For n < 2^64 the answer is definitive; larger n values have a small probability of actually being pseudoprimes. Negative numbers (e.g. -2) are not considered prime. The first step is looking for trivial factors, which if found enables a quick return. Next, if the sieve is large enough, use bisection search on the sieve. For small numbers, a set of deterministic Miller-Rabin tests are performed with bases that are known to have no counterexamples in their range. Finally if the number is larger than 2^64, a strong BPSW test is performed. While this is a probable prime test and we believe counterexamples exist, there are no known counterexamples. Examples ======== >>> from sympy.ntheory import isprime >>> isprime(13) True >>> isprime(13.0) # limited precision False >>> isprime(15) False Notes ===== This routine is intended only for integer input, not numerical expressions which may represent numbers. Floats are also rejected as input because they represent numbers of limited precision. While it is tempting to permit 7.0 to represent an integer there are errors that may "pass silently" if this is allowed: >>> from sympy import Float, S >>> int(1e3) == 1e3 == 10**3 True >>> int(1e23) == 1e23 True >>> int(1e23) == 10**23 False >>> near_int = 1 + S(1)/10**19 >>> near_int == int(near_int) False >>> n = Float(near_int, 10) # truncated by precision >>> n == int(n) True >>> n = Float(near_int, 20) >>> n == int(n) False See Also ======== sympy.ntheory.generate.primerange : Generates all primes in a given range sympy.ntheory.generate.primepi : Return the number of primes less than or equal to n sympy.ntheory.generate.prime : Return the nth prime References ========== - https://en.wikipedia.org/wiki/Strong_pseudoprime - "Lucas Pseudoprimes", Baillie and Wagstaff, 1980. http://mpqs.free.fr/LucasPseudoprimes.pdf - https://en.wikipedia.org/wiki/Baillie-PSW_primality_test """ try: n = as_int(n) except ValueError: return False # Step 1, do quick composite testing via trial division. The individual # modulo tests benchmark faster than one or two primorial igcds for me. # The point here is just to speedily handle small numbers and many # composites. Step 2 only requires that n <= 2 get handled here. if n in [2, 3, 5]: return True if n < 2 or (n % 2) == 0 or (n % 3) == 0 or (n % 5) == 0: return False if n < 49: return True if (n % 7) == 0 or (n % 11) == 0 or (n % 13) == 0 or (n % 17) == 0 or \ (n % 19) == 0 or (n % 23) == 0 or (n % 29) == 0 or (n % 31) == 0 or \ (n % 37) == 0 or (n % 41) == 0 or (n % 43) == 0 or (n % 47) == 0: return False if n < 2809: return True if n <= 23001: return pow(2, n, n) == 2 and n not in [7957, 8321, 13747, 18721, 19951] # bisection search on the sieve if the sieve is large enough from sympy.ntheory.generate import sieve as s if n <= s._list[-1]: l, u = s.search(n) return l == u # If we have GMPY2, skip straight to step 3 and do a strong BPSW test. # This should be a bit faster than our step 2, and for large values will # be a lot faster than our step 3 (C+GMP vs. Python). from sympy.core.compatibility import HAS_GMPY if HAS_GMPY == 2: from gmpy2 import is_strong_prp, is_strong_selfridge_prp return is_strong_prp(n, 2) and is_strong_selfridge_prp(n) # Step 2: deterministic Miller-Rabin testing for numbers < 2^64. See: # https://miller-rabin.appspot.com/ # for lists. We have made sure the M-R routine will successfully handle # bases larger than n, so we can use the minimal set. if n < 341531: return mr(n, [9345883071009581737]) if n < 885594169: return mr(n, [725270293939359937, 3569819667048198375]) if n < 350269456337: return mr(n, [4230279247111683200, 14694767155120705706, 16641139526367750375]) if n < 55245642489451: return mr(n, [2, 141889084524735, 1199124725622454117, 11096072698276303650]) if n < 7999252175582851: return mr(n, [2, 4130806001517, 149795463772692060, 186635894390467037, 3967304179347715805]) if n < 585226005592931977: return mr(n, [2, 123635709730000, 9233062284813009, 43835965440333360, 761179012939631437, 1263739024124850375]) if n < 18446744073709551616: return mr(n, [2, 325, 9375, 28178, 450775, 9780504, 1795265022]) # We could do this instead at any point: #if n < 18446744073709551616: # return mr(n, [2]) and is_extra_strong_lucas_prp(n) # Here are tests that are safe for MR routines that don't understand # large bases. #if n < 9080191: # return mr(n, [31, 73]) #if n < 19471033: # return mr(n, [2, 299417]) #if n < 38010307: # return mr(n, [2, 9332593]) #if n < 316349281: # return mr(n, [11000544, 31481107]) #if n < 4759123141: # return mr(n, [2, 7, 61]) #if n < 105936894253: # return mr(n, [2, 1005905886, 1340600841]) #if n < 31858317218647: # return mr(n, [2, 642735, 553174392, 3046413974]) #if n < 3071837692357849: # return mr(n, [2, 75088, 642735, 203659041, 3613982119]) #if n < 18446744073709551616: # return mr(n, [2, 325, 9375, 28178, 450775, 9780504, 1795265022]) # Step 3: BPSW. # # Time for isprime(10**2000 + 4561), no gmpy or gmpy2 installed # 44.0s old isprime using 46 bases # 5.3s strong BPSW + one random base # 4.3s extra strong BPSW + one random base # 4.1s strong BPSW # 3.2s extra strong BPSW # Classic BPSW from page 1401 of the paper. See alternate ideas below. return mr(n, [2]) and is_strong_lucas_prp(n) # Using extra strong test, which is somewhat faster #return mr(n, [2]) and is_extra_strong_lucas_prp(n) # Add a random M-R base #import random #return mr(n, [2, random.randint(3, n-1)]) and is_strong_lucas_prp(n) def is_gaussian_prime(num): r"""Test if num is a Gaussian prime number. References ========== .. [1] https://oeis.org/wiki/Gaussian_primes """ from sympy import sympify num = sympify(num) a, b = num.as_real_imag() a = as_int(a) b = as_int(b) if a == 0: b = abs(b) return isprime(b) and b % 4 == 3 elif b == 0: a = abs(a) return isprime(a) and a % 4 == 3 return isprime(a**2 + b**2)
c62c992d79aa6da58f646c37b7d675b916cbeba99b8f4abbaaef28dc4cedb785
from __future__ import print_function, division from sympy import Integer import sympy.polys import sys if sys.version_info < (3,5): from fractions import gcd else: from math import gcd def egyptian_fraction(r, algorithm="Greedy"): """ Return the list of denominators of an Egyptian fraction expansion [1]_ of the said rational `r`. Parameters ========== r : Rational a positive rational number. algorithm : { "Greedy", "Graham Jewett", "Takenouchi", "Golomb" }, optional Denotes the algorithm to be used (the default is "Greedy"). Examples ======== >>> from sympy import Rational >>> from sympy.ntheory.egyptian_fraction import egyptian_fraction >>> egyptian_fraction(Rational(3, 7)) [3, 11, 231] >>> egyptian_fraction(Rational(3, 7), "Graham Jewett") [7, 8, 9, 56, 57, 72, 3192] >>> egyptian_fraction(Rational(3, 7), "Takenouchi") [4, 7, 28] >>> egyptian_fraction(Rational(3, 7), "Golomb") [3, 15, 35] >>> egyptian_fraction(Rational(11, 5), "Golomb") [1, 2, 3, 4, 9, 234, 1118, 2580] See Also ======== sympy.core.numbers.Rational Notes ===== Currently the following algorithms are supported: 1) Greedy Algorithm Also called the Fibonacci-Sylvester algorithm [2]_. At each step, extract the largest unit fraction less than the target and replace the target with the remainder. It has some distinct properties: a) Given `p/q` in lowest terms, generates an expansion of maximum length `p`. Even as the numerators get large, the number of terms is seldom more than a handful. b) Uses minimal memory. c) The terms can blow up (standard examples of this are 5/121 and 31/311). The denominator is at most squared at each step (doubly-exponential growth) and typically exhibits singly-exponential growth. 2) Graham Jewett Algorithm The algorithm suggested by the result of Graham and Jewett. Note that this has a tendency to blow up: the length of the resulting expansion is always ``2**(x/gcd(x, y)) - 1``. See [3]_. 3) Takenouchi Algorithm The algorithm suggested by Takenouchi (1921). Differs from the Graham-Jewett algorithm only in the handling of duplicates. See [3]_. 4) Golomb's Algorithm A method given by Golumb (1962), using modular arithmetic and inverses. It yields the same results as a method using continued fractions proposed by Bleicher (1972). See [4]_. If the given rational is greater than or equal to 1, a greedy algorithm of summing the harmonic sequence 1/1 + 1/2 + 1/3 + ... is used, taking all the unit fractions of this sequence until adding one more would be greater than the given number. This list of denominators is prefixed to the result from the requested algorithm used on the remainder. For example, if r is 8/3, using the Greedy algorithm, we get [1, 2, 3, 4, 5, 6, 7, 14, 420], where the beginning of the sequence, [1, 2, 3, 4, 5, 6, 7] is part of the harmonic sequence summing to 363/140, leaving a remainder of 31/420, which yields [14, 420] by the Greedy algorithm. The result of egyptian_fraction(Rational(8, 3), "Golomb") is [1, 2, 3, 4, 5, 6, 7, 14, 574, 2788, 6460, 11590, 33062, 113820], and so on. References ========== .. [1] https://en.wikipedia.org/wiki/Egyptian_fraction .. [2] https://en.wikipedia.org/wiki/Greedy_algorithm_for_Egyptian_fractions .. [3] https://www.ics.uci.edu/~eppstein/numth/egypt/conflict.html .. [4] http://ami.ektf.hu/uploads/papers/finalpdf/AMI_42_from129to134.pdf """ if r <= 0: raise ValueError("Value must be positive") prefix, rem = egypt_harmonic(r) if rem == 0: return prefix x, y = rem.as_numer_denom() if algorithm == "Greedy": return prefix + egypt_greedy(x, y) elif algorithm == "Graham Jewett": return prefix + egypt_graham_jewett(x, y) elif algorithm == "Takenouchi": return prefix + egypt_takenouchi(x, y) elif algorithm == "Golomb": return prefix + egypt_golomb(x, y) else: raise ValueError("Entered invalid algorithm") def egypt_greedy(x, y): if x == 1: return [y] else: a = (-y) % (x) b = y*(y//x + 1) c = gcd(a, b) if c > 1: num, denom = a//c, b//c else: num, denom = a, b return [y//x + 1] + egypt_greedy(num, denom) def egypt_graham_jewett(x, y): l = [y] * x # l is now a list of integers whose reciprocals sum to x/y. # we shall now proceed to manipulate the elements of l without # changing the reciprocated sum until all elements are unique. while len(l) != len(set(l)): l.sort() # so the list has duplicates. find a smallest pair for i in range(len(l) - 1): if l[i] == l[i + 1]: break # we have now identified a pair of identical # elements: l[i] and l[i + 1]. # now comes the application of the result of graham and jewett: l[i + 1] = l[i] + 1 # and we just iterate that until the list has no duplicates. l.append(l[i]*(l[i] + 1)) return sorted(l) def egypt_takenouchi(x, y): l = [y] * x while len(l) != len(set(l)): l.sort() for i in range(len(l) - 1): if l[i] == l[i + 1]: break k = l[i] if k % 2 == 0: l[i] = l[i] // 2 del l[i + 1] else: l[i], l[i + 1] = (k + 1)//2, k*(k + 1)//2 return sorted(l) def egypt_golomb(x, y): if x == 1: return [y] xp = sympy.polys.ZZ.invert(int(x), int(y)) rv = [Integer(xp*y)] rv.extend(egypt_golomb((x*xp - 1)//y, xp)) return sorted(rv) def egypt_harmonic(r): rv = [] d = Integer(1) acc = Integer(0) while acc + 1/d <= r: acc += 1/d rv.append(d) d += 1 return (rv, r - acc)
bb4ae599ad5707f3fe14e42cd64559528d5448e3f284ae307ece1352d9693887
""" Integer factorization """ from __future__ import print_function, division from collections import defaultdict import random import math from sympy.core import sympify from sympy.core.compatibility import as_int, SYMPY_INTS from sympy.core.containers import Dict from sympy.core.evalf import bitcount from sympy.core.expr import Expr from sympy.core.function import Function from sympy.core.logic import fuzzy_and from sympy.core.mul import Mul, prod from sympy.core.numbers import igcd, ilcm, Rational, Integer from sympy.core.power import integer_nthroot, Pow from sympy.core.singleton import S from .primetest import isprime from .generate import sieve, primerange, nextprime from sympy.utilities.misc import filldedent # Note: This list should be updated whenever new Mersenne primes are found. # Refer: https://www.mersenne.org/ MERSENNE_PRIME_EXPONENTS = (2, 3, 5, 7, 13, 17, 19, 31, 61, 89, 107, 127, 521, 607, 1279, 2203, 2281, 3217, 4253, 4423, 9689, 9941, 11213, 19937, 21701, 23209, 44497, 86243, 110503, 132049, 216091, 756839, 859433, 1257787, 1398269, 2976221, 3021377, 6972593, 13466917, 20996011, 24036583, 25964951, 30402457, 32582657, 37156667, 42643801, 43112609, 57885161, 74207281, 77232917, 82589933) # compute more when needed for i in Mersenne prime exponents PERFECT = [6] # 2**(i-1)*(2**i-1) MERSENNES = [3] # 2**i - 1 def _ismersenneprime(n): global MERSENNES j = len(MERSENNES) while n > MERSENNES[-1] and j < len(MERSENNE_PRIME_EXPONENTS): # conservatively grow the list MERSENNES.append(2**MERSENNE_PRIME_EXPONENTS[j] - 1) j += 1 return n in MERSENNES def _isperfect(n): global PERFECT if n % 2 == 0: j = len(PERFECT) while n > PERFECT[-1] and j < len(MERSENNE_PRIME_EXPONENTS): # conservatively grow the list t = 2**(MERSENNE_PRIME_EXPONENTS[j] - 1) PERFECT.append(t*(2*t - 1)) j += 1 return n in PERFECT small_trailing = [0] * 256 for j in range(1,8): small_trailing[1<<j::1<<(j+1)] = [j] * (1<<(7-j)) def smoothness(n): """ Return the B-smooth and B-power smooth values of n. The smoothness of n is the largest prime factor of n; the power- smoothness is the largest divisor raised to its multiplicity. Examples ======== >>> from sympy.ntheory.factor_ import smoothness >>> smoothness(2**7*3**2) (3, 128) >>> smoothness(2**4*13) (13, 16) >>> smoothness(2) (2, 2) See Also ======== factorint, smoothness_p """ if n == 1: return (1, 1) # not prime, but otherwise this causes headaches facs = factorint(n) return max(facs), max(m**facs[m] for m in facs) def smoothness_p(n, m=-1, power=0, visual=None): """ Return a list of [m, (p, (M, sm(p + m), psm(p + m)))...] where: 1. p**M is the base-p divisor of n 2. sm(p + m) is the smoothness of p + m (m = -1 by default) 3. psm(p + m) is the power smoothness of p + m The list is sorted according to smoothness (default) or by power smoothness if power=1. The smoothness of the numbers to the left (m = -1) or right (m = 1) of a factor govern the results that are obtained from the p +/- 1 type factoring methods. >>> from sympy.ntheory.factor_ import smoothness_p, factorint >>> smoothness_p(10431, m=1) (1, [(3, (2, 2, 4)), (19, (1, 5, 5)), (61, (1, 31, 31))]) >>> smoothness_p(10431) (-1, [(3, (2, 2, 2)), (19, (1, 3, 9)), (61, (1, 5, 5))]) >>> smoothness_p(10431, power=1) (-1, [(3, (2, 2, 2)), (61, (1, 5, 5)), (19, (1, 3, 9))]) If visual=True then an annotated string will be returned: >>> print(smoothness_p(21477639576571, visual=1)) p**i=4410317**1 has p-1 B=1787, B-pow=1787 p**i=4869863**1 has p-1 B=2434931, B-pow=2434931 This string can also be generated directly from a factorization dictionary and vice versa: >>> factorint(17*9) {3: 2, 17: 1} >>> smoothness_p(_) 'p**i=3**2 has p-1 B=2, B-pow=2\\np**i=17**1 has p-1 B=2, B-pow=16' >>> smoothness_p(_) {3: 2, 17: 1} The table of the output logic is: ====== ====== ======= ======= | Visual ------ ---------------------- Input True False other ====== ====== ======= ======= dict str tuple str str str tuple dict tuple str tuple str n str tuple tuple mul str tuple tuple ====== ====== ======= ======= See Also ======== factorint, smoothness """ from sympy.utilities import flatten # visual must be True, False or other (stored as None) if visual in (1, 0): visual = bool(visual) elif visual not in (True, False): visual = None if isinstance(n, str): if visual: return n d = {} for li in n.splitlines(): k, v = [int(i) for i in li.split('has')[0].split('=')[1].split('**')] d[k] = v if visual is not True and visual is not False: return d return smoothness_p(d, visual=False) elif type(n) is not tuple: facs = factorint(n, visual=False) if power: k = -1 else: k = 1 if type(n) is not tuple: rv = (m, sorted([(f, tuple([M] + list(smoothness(f + m)))) for f, M in [i for i in facs.items()]], key=lambda x: (x[1][k], x[0]))) else: rv = n if visual is False or (visual is not True) and (type(n) in [int, Mul]): return rv lines = [] for dat in rv[1]: dat = flatten(dat) dat.insert(2, m) lines.append('p**i=%i**%i has p%+i B=%i, B-pow=%i' % tuple(dat)) return '\n'.join(lines) def trailing(n): """Count the number of trailing zero digits in the binary representation of n, i.e. determine the largest power of 2 that divides n. Examples ======== >>> from sympy import trailing >>> trailing(128) 7 >>> trailing(63) 0 """ n = abs(int(n)) if not n: return 0 low_byte = n & 0xff if low_byte: return small_trailing[low_byte] # 2**m is quick for z up through 2**30 z = bitcount(n) - 1 if isinstance(z, SYMPY_INTS): if n == 1 << z: return z if z < 300: # fixed 8-byte reduction t = 8 n >>= 8 while not n & 0xff: n >>= 8 t += 8 return t + small_trailing[n & 0xff] # binary reduction important when there might be a large # number of trailing 0s t = 0 p = 8 while not n & 1: while not n & ((1 << p) - 1): n >>= p t += p p *= 2 p //= 2 return t def multiplicity(p, n): """ Find the greatest integer m such that p**m divides n. Examples ======== >>> from sympy.ntheory import multiplicity >>> from sympy.core.numbers import Rational as R >>> [multiplicity(5, n) for n in [8, 5, 25, 125, 250]] [0, 1, 2, 3, 3] >>> multiplicity(3, R(1, 9)) -2 Note: when checking for the multiplicity of a number in a large factorial it is most efficient to send it as an unevaluated factorial or to call ``multiplicity_in_factorial`` directly: >>> from sympy.ntheory import multiplicity_in_factorial >>> from sympy import factorial >>> p = factorial(25) >>> n = 2**100 >>> nfac = factorial(n, evaluate=False) >>> multiplicity(p, nfac) 52818775009509558395695966887 >>> _ == multiplicity_in_factorial(p, n) True """ from sympy.functions.combinatorial.factorials import factorial try: p, n = as_int(p), as_int(n) except ValueError: if all(isinstance(i, (SYMPY_INTS, Rational)) for i in (p, n)): p = Rational(p) n = Rational(n) if p.q == 1: if n.p == 1: return -multiplicity(p.p, n.q) return multiplicity(p.p, n.p) - multiplicity(p.p, n.q) elif p.p == 1: return multiplicity(p.q, n.q) else: like = min( multiplicity(p.p, n.p), multiplicity(p.q, n.q)) cross = min( multiplicity(p.q, n.p), multiplicity(p.p, n.q)) return like - cross elif (isinstance(p, (SYMPY_INTS, Integer)) and isinstance(n, factorial) and isinstance(n.args[0], Integer) and n.args[0] >= 0): return multiplicity_in_factorial(p, n.args[0]) raise ValueError('expecting ints or fractions, got %s and %s' % (p, n)) if n == 0: raise ValueError('no such integer exists: multiplicity of %s is not-defined' %(n)) if p == 2: return trailing(n) if p < 2: raise ValueError('p must be an integer, 2 or larger, but got %s' % p) if p == n: return 1 m = 0 n, rem = divmod(n, p) while not rem: m += 1 if m > 5: # The multiplicity could be very large. Better # to increment in powers of two e = 2 while 1: ppow = p**e if ppow < n: nnew, rem = divmod(n, ppow) if not rem: m += e e *= 2 n = nnew continue return m + multiplicity(p, n) n, rem = divmod(n, p) return m def multiplicity_in_factorial(p, n): """return the largest integer ``m`` such that ``p**m`` divides ``n!`` without calculating the factorial of ``n``. Examples ======== >>> from sympy.ntheory import multiplicity_in_factorial >>> from sympy import factorial >>> multiplicity_in_factorial(2, 3) 1 An instructive use of this is to tell how many trailing zeros a given factorial has. For example, there are 6 in 25!: >>> factorial(25) 15511210043330985984000000 >>> multiplicity_in_factorial(10, 25) 6 For large factorials, it is much faster/feasible to use this function rather than computing the actual factorial: >>> multiplicity_in_factorial(factorial(25), 2**100) 52818775009509558395695966887 """ p, n = as_int(p), as_int(n) if p <= 0: raise ValueError('expecting positive integer got %s' % p ) if n < 0: raise ValueError('expecting non-negative integer got %s' % n ) factors = factorint(p) # keep only the largest of a given multiplicity since those # of a given multiplicity will be goverened by the behavior # of the largest factor test = defaultdict(int) for k, v in factors.items(): test[v] = max(k, test[v]) keep = set(test.values()) # remove others from factors for k in list(factors.keys()): if k not in keep: factors.pop(k) mp = S.Infinity for i in factors: # multiplicity of i in n! is mi = (n - (sum(digits(n, i)) - i))//(i - 1) # multiplicity of p in n! depends on multiplicity # of prime `i` in p, so we floor divide by factors[i] # and keep it if smaller than the multiplicity of p # seen so far mp = min(mp, mi//factors[i]) return mp def perfect_power(n, candidates=None, big=True, factor=True): """ Return ``(b, e)`` such that ``n`` == ``b**e`` if ``n`` is a perfect power with ``e > 1``, else ``False``. A ValueError is raised if ``n`` is not an integer or is not positive. By default, the base is recursively decomposed and the exponents collected so the largest possible ``e`` is sought. If ``big=False`` then the smallest possible ``e`` (thus prime) will be chosen. If ``factor=True`` then simultaneous factorization of ``n`` is attempted since finding a factor indicates the only possible root for ``n``. This is True by default since only a few small factors will be tested in the course of searching for the perfect power. The use of ``candidates`` is primarily for internal use; if provided, False will be returned if ``n`` cannot be written as a power with one of the candidates as an exponent and factoring (beyond testing for a factor of 2) will not be attempted. Examples ======== >>> from sympy import perfect_power >>> perfect_power(16) (2, 4) >>> perfect_power(16, big=False) (4, 2) Notes ===== To know whether an integer is a perfect power of 2 use >>> is2pow = lambda n: bool(n and not n & (n - 1)) >>> [(i, is2pow(i)) for i in range(5)] [(0, False), (1, True), (2, True), (3, False), (4, True)] It is not necessary to provide ``candidates``. When provided it will be assumed that they are ints. The first one that is larger than the computed maximum possible exponent will signal failure for the routine. >>> perfect_power(3**8, [9]) False >>> perfect_power(3**8, [2, 4, 8]) (3, 8) >>> perfect_power(3**8, [4, 8], big=False) (9, 4) See Also ======== sympy.core.power.integer_nthroot sympy.ntheory.primetest.is_square """ from sympy.core.power import integer_nthroot n = as_int(n) if n < 3: if n < 1: raise ValueError('expecting positive n') return False logn = math.log(n, 2) max_possible = int(logn) + 2 # only check values less than this not_square = n % 10 in [2, 3, 7, 8] # squares cannot end in 2, 3, 7, 8 min_possible = 2 + not_square if not candidates: candidates = primerange(min_possible, max_possible) else: candidates = sorted([i for i in candidates if min_possible <= i < max_possible]) if n%2 == 0: e = trailing(n) candidates = [i for i in candidates if e%i == 0] if big: candidates = reversed(candidates) for e in candidates: r, ok = integer_nthroot(n, e) if ok: return (r, e) return False def _factors(): rv = 2 + n % 2 while True: yield rv rv = nextprime(rv) for fac, e in zip(_factors(), candidates): # see if there is a factor present if factor and n % fac == 0: # find what the potential power is if fac == 2: e = trailing(n) else: e = multiplicity(fac, n) # if it's a trivial power we are done if e == 1: return False # maybe the e-th root of n is exact r, exact = integer_nthroot(n, e) if not exact: # Having a factor, we know that e is the maximal # possible value for a root of n. # If n = fac**e*m can be written as a perfect # power then see if m can be written as r**E where # gcd(e, E) != 1 so n = (fac**(e//E)*r)**E m = n//fac**e rE = perfect_power(m, candidates=divisors(e, generator=True)) if not rE: return False else: r, E = rE r, e = fac**(e//E)*r, E if not big: e0 = primefactors(e) if e0[0] != e: r, e = r**(e//e0[0]), e0[0] return r, e # Weed out downright impossible candidates if logn/e < 40: b = 2.0**(logn/e) if abs(int(b + 0.5) - b) > 0.01: continue # now see if the plausible e makes a perfect power r, exact = integer_nthroot(n, e) if exact: if big: m = perfect_power(r, big=big, factor=factor) if m: r, e = m[0], e*m[1] return int(r), e return False def pollard_rho(n, s=2, a=1, retries=5, seed=1234, max_steps=None, F=None): r""" Use Pollard's rho method to try to extract a nontrivial factor of ``n``. The returned factor may be a composite number. If no factor is found, ``None`` is returned. The algorithm generates pseudo-random values of x with a generator function, replacing x with F(x). If F is not supplied then the function x**2 + ``a`` is used. The first value supplied to F(x) is ``s``. Upon failure (if ``retries`` is > 0) a new ``a`` and ``s`` will be supplied; the ``a`` will be ignored if F was supplied. The sequence of numbers generated by such functions generally have a a lead-up to some number and then loop around back to that number and begin to repeat the sequence, e.g. 1, 2, 3, 4, 5, 3, 4, 5 -- this leader and loop look a bit like the Greek letter rho, and thus the name, 'rho'. For a given function, very different leader-loop values can be obtained so it is a good idea to allow for retries: >>> from sympy.ntheory.generate import cycle_length >>> n = 16843009 >>> F = lambda x:(2048*pow(x, 2, n) + 32767) % n >>> for s in range(5): ... print('loop length = %4i; leader length = %3i' % next(cycle_length(F, s))) ... loop length = 2489; leader length = 42 loop length = 78; leader length = 120 loop length = 1482; leader length = 99 loop length = 1482; leader length = 285 loop length = 1482; leader length = 100 Here is an explicit example where there is a two element leadup to a sequence of 3 numbers (11, 14, 4) that then repeat: >>> x=2 >>> for i in range(9): ... x=(x**2+12)%17 ... print(x) ... 16 13 11 14 4 11 14 4 11 >>> next(cycle_length(lambda x: (x**2+12)%17, 2)) (3, 2) >>> list(cycle_length(lambda x: (x**2+12)%17, 2, values=True)) [16, 13, 11, 14, 4] Instead of checking the differences of all generated values for a gcd with n, only the kth and 2*kth numbers are checked, e.g. 1st and 2nd, 2nd and 4th, 3rd and 6th until it has been detected that the loop has been traversed. Loops may be many thousands of steps long before rho finds a factor or reports failure. If ``max_steps`` is specified, the iteration is cancelled with a failure after the specified number of steps. Examples ======== >>> from sympy import pollard_rho >>> n=16843009 >>> F=lambda x:(2048*pow(x,2,n) + 32767) % n >>> pollard_rho(n, F=F) 257 Use the default setting with a bad value of ``a`` and no retries: >>> pollard_rho(n, a=n-2, retries=0) If retries is > 0 then perhaps the problem will correct itself when new values are generated for a: >>> pollard_rho(n, a=n-2, retries=1) 257 References ========== .. [1] Richard Crandall & Carl Pomerance (2005), "Prime Numbers: A Computational Perspective", Springer, 2nd edition, 229-231 """ n = int(n) if n < 5: raise ValueError('pollard_rho should receive n > 4') prng = random.Random(seed + retries) V = s for i in range(retries + 1): U = V if not F: F = lambda x: (pow(x, 2, n) + a) % n j = 0 while 1: if max_steps and (j > max_steps): break j += 1 U = F(U) V = F(F(V)) # V is 2x further along than U g = igcd(U - V, n) if g == 1: continue if g == n: break return int(g) V = prng.randint(0, n - 1) a = prng.randint(1, n - 3) # for x**2 + a, a%n should not be 0 or -2 F = None return None def pollard_pm1(n, B=10, a=2, retries=0, seed=1234): """ Use Pollard's p-1 method to try to extract a nontrivial factor of ``n``. Either a divisor (perhaps composite) or ``None`` is returned. The value of ``a`` is the base that is used in the test gcd(a**M - 1, n). The default is 2. If ``retries`` > 0 then if no factor is found after the first attempt, a new ``a`` will be generated randomly (using the ``seed``) and the process repeated. Note: the value of M is lcm(1..B) = reduce(ilcm, range(2, B + 1)). A search is made for factors next to even numbers having a power smoothness less than ``B``. Choosing a larger B increases the likelihood of finding a larger factor but takes longer. Whether a factor of n is found or not depends on ``a`` and the power smoothness of the even number just less than the factor p (hence the name p - 1). Although some discussion of what constitutes a good ``a`` some descriptions are hard to interpret. At the modular.math site referenced below it is stated that if gcd(a**M - 1, n) = N then a**M % q**r is 1 for every prime power divisor of N. But consider the following: >>> from sympy.ntheory.factor_ import smoothness_p, pollard_pm1 >>> n=257*1009 >>> smoothness_p(n) (-1, [(257, (1, 2, 256)), (1009, (1, 7, 16))]) So we should (and can) find a root with B=16: >>> pollard_pm1(n, B=16, a=3) 1009 If we attempt to increase B to 256 we find that it doesn't work: >>> pollard_pm1(n, B=256) >>> But if the value of ``a`` is changed we find that only multiples of 257 work, e.g.: >>> pollard_pm1(n, B=256, a=257) 1009 Checking different ``a`` values shows that all the ones that didn't work had a gcd value not equal to ``n`` but equal to one of the factors: >>> from sympy.core.numbers import ilcm, igcd >>> from sympy import factorint, Pow >>> M = 1 >>> for i in range(2, 256): ... M = ilcm(M, i) ... >>> set([igcd(pow(a, M, n) - 1, n) for a in range(2, 256) if ... igcd(pow(a, M, n) - 1, n) != n]) {1009} But does aM % d for every divisor of n give 1? >>> aM = pow(255, M, n) >>> [(d, aM%Pow(*d.args)) for d in factorint(n, visual=True).args] [(257**1, 1), (1009**1, 1)] No, only one of them. So perhaps the principle is that a root will be found for a given value of B provided that: 1) the power smoothness of the p - 1 value next to the root does not exceed B 2) a**M % p != 1 for any of the divisors of n. By trying more than one ``a`` it is possible that one of them will yield a factor. Examples ======== With the default smoothness bound, this number can't be cracked: >>> from sympy.ntheory import pollard_pm1, primefactors >>> pollard_pm1(21477639576571) Increasing the smoothness bound helps: >>> pollard_pm1(21477639576571, B=2000) 4410317 Looking at the smoothness of the factors of this number we find: >>> from sympy.utilities import flatten >>> from sympy.ntheory.factor_ import smoothness_p, factorint >>> print(smoothness_p(21477639576571, visual=1)) p**i=4410317**1 has p-1 B=1787, B-pow=1787 p**i=4869863**1 has p-1 B=2434931, B-pow=2434931 The B and B-pow are the same for the p - 1 factorizations of the divisors because those factorizations had a very large prime factor: >>> factorint(4410317 - 1) {2: 2, 617: 1, 1787: 1} >>> factorint(4869863-1) {2: 1, 2434931: 1} Note that until B reaches the B-pow value of 1787, the number is not cracked; >>> pollard_pm1(21477639576571, B=1786) >>> pollard_pm1(21477639576571, B=1787) 4410317 The B value has to do with the factors of the number next to the divisor, not the divisors themselves. A worst case scenario is that the number next to the factor p has a large prime divisisor or is a perfect power. If these conditions apply then the power-smoothness will be about p/2 or p. The more realistic is that there will be a large prime factor next to p requiring a B value on the order of p/2. Although primes may have been searched for up to this level, the p/2 is a factor of p - 1, something that we don't know. The modular.math reference below states that 15% of numbers in the range of 10**15 to 15**15 + 10**4 are 10**6 power smooth so a B of 10**6 will fail 85% of the time in that range. From 10**8 to 10**8 + 10**3 the percentages are nearly reversed...but in that range the simple trial division is quite fast. References ========== .. [1] Richard Crandall & Carl Pomerance (2005), "Prime Numbers: A Computational Perspective", Springer, 2nd edition, 236-238 .. [2] http://modular.math.washington.edu/edu/2007/spring/ent/ent-html/node81.html .. [3] https://www.cs.toronto.edu/~yuvalf/Factorization.pdf """ n = int(n) if n < 4 or B < 3: raise ValueError('pollard_pm1 should receive n > 3 and B > 2') prng = random.Random(seed + B) # computing a**lcm(1,2,3,..B) % n for B > 2 # it looks weird, but it's right: primes run [2, B] # and the answer's not right until the loop is done. for i in range(retries + 1): aM = a for p in sieve.primerange(2, B + 1): e = int(math.log(B, p)) aM = pow(aM, pow(p, e), n) g = igcd(aM - 1, n) if 1 < g < n: return int(g) # get a new a: # since the exponent, lcm(1..B), is even, if we allow 'a' to be 'n-1' # then (n - 1)**even % n will be 1 which will give a g of 0 and 1 will # give a zero, too, so we set the range as [2, n-2]. Some references # say 'a' should be coprime to n, but either will detect factors. a = prng.randint(2, n - 2) def _trial(factors, n, candidates, verbose=False): """ Helper function for integer factorization. Trial factors ``n` against all integers given in the sequence ``candidates`` and updates the dict ``factors`` in-place. Returns the reduced value of ``n`` and a flag indicating whether any factors were found. """ if verbose: factors0 = list(factors.keys()) nfactors = len(factors) for d in candidates: if n % d == 0: m = multiplicity(d, n) n //= d**m factors[d] = m if verbose: for k in sorted(set(factors).difference(set(factors0))): print(factor_msg % (k, factors[k])) return int(n), len(factors) != nfactors def _check_termination(factors, n, limitp1, use_trial, use_rho, use_pm1, verbose): """ Helper function for integer factorization. Checks if ``n`` is a prime or a perfect power, and in those cases updates the factorization and raises ``StopIteration``. """ if verbose: print('Check for termination') # since we've already been factoring there is no need to do # simultaneous factoring with the power check p = perfect_power(n, factor=False) if p is not False: base, exp = p if limitp1: limit = limitp1 - 1 else: limit = limitp1 facs = factorint(base, limit, use_trial, use_rho, use_pm1, verbose=False) for b, e in facs.items(): if verbose: print(factor_msg % (b, e)) factors[b] = exp*e raise StopIteration if isprime(n): factors[int(n)] = 1 raise StopIteration if n == 1: raise StopIteration trial_int_msg = "Trial division with ints [%i ... %i] and fail_max=%i" trial_msg = "Trial division with primes [%i ... %i]" rho_msg = "Pollard's rho with retries %i, max_steps %i and seed %i" pm1_msg = "Pollard's p-1 with smoothness bound %i and seed %i" factor_msg = '\t%i ** %i' fermat_msg = 'Close factors satisying Fermat condition found.' complete_msg = 'Factorization is complete.' def _factorint_small(factors, n, limit, fail_max): """ Return the value of n and either a 0 (indicating that factorization up to the limit was complete) or else the next near-prime that would have been tested. Factoring stops if there are fail_max unsuccessful tests in a row. If factors of n were found they will be in the factors dictionary as {factor: multiplicity} and the returned value of n will have had those factors removed. The factors dictionary is modified in-place. """ def done(n, d): """return n, d if the sqrt(n) wasn't reached yet, else n, 0 indicating that factoring is done. """ if d*d <= n: return n, d return n, 0 d = 2 m = trailing(n) if m: factors[d] = m n >>= m d = 3 if limit < d: if n > 1: factors[n] = 1 return done(n, d) # reduce m = 0 while n % d == 0: n //= d m += 1 if m == 20: mm = multiplicity(d, n) m += mm n //= d**mm break if m: factors[d] = m # when d*d exceeds maxx or n we are done; if limit**2 is greater # than n then maxx is set to zero so the value of n will flag the finish if limit*limit > n: maxx = 0 else: maxx = limit*limit dd = maxx or n d = 5 fails = 0 while fails < fail_max: if d*d > dd: break # d = 6*i - 1 # reduce m = 0 while n % d == 0: n //= d m += 1 if m == 20: mm = multiplicity(d, n) m += mm n //= d**mm break if m: factors[d] = m dd = maxx or n fails = 0 else: fails += 1 d += 2 if d*d > dd: break # d = 6*i - 1 # reduce m = 0 while n % d == 0: n //= d m += 1 if m == 20: mm = multiplicity(d, n) m += mm n //= d**mm break if m: factors[d] = m dd = maxx or n fails = 0 else: fails += 1 # d = 6*(i + 1) - 1 d += 4 return done(n, d) def factorint(n, limit=None, use_trial=True, use_rho=True, use_pm1=True, verbose=False, visual=None, multiple=False): r""" Given a positive integer ``n``, ``factorint(n)`` returns a dict containing the prime factors of ``n`` as keys and their respective multiplicities as values. For example: >>> from sympy.ntheory import factorint >>> factorint(2000) # 2000 = (2**4) * (5**3) {2: 4, 5: 3} >>> factorint(65537) # This number is prime {65537: 1} For input less than 2, factorint behaves as follows: - ``factorint(1)`` returns the empty factorization, ``{}`` - ``factorint(0)`` returns ``{0:1}`` - ``factorint(-n)`` adds ``-1:1`` to the factors and then factors ``n`` Partial Factorization: If ``limit`` (> 3) is specified, the search is stopped after performing trial division up to (and including) the limit (or taking a corresponding number of rho/p-1 steps). This is useful if one has a large number and only is interested in finding small factors (if any). Note that setting a limit does not prevent larger factors from being found early; it simply means that the largest factor may be composite. Since checking for perfect power is relatively cheap, it is done regardless of the limit setting. This number, for example, has two small factors and a huge semi-prime factor that cannot be reduced easily: >>> from sympy.ntheory import isprime >>> a = 1407633717262338957430697921446883 >>> f = factorint(a, limit=10000) >>> f == {991: 1, int(202916782076162456022877024859): 1, 7: 1} True >>> isprime(max(f)) False This number has a small factor and a residual perfect power whose base is greater than the limit: >>> factorint(3*101**7, limit=5) {3: 1, 101: 7} List of Factors: If ``multiple`` is set to ``True`` then a list containing the prime factors including multiplicities is returned. >>> factorint(24, multiple=True) [2, 2, 2, 3] Visual Factorization: If ``visual`` is set to ``True``, then it will return a visual factorization of the integer. For example: >>> from sympy import pprint >>> pprint(factorint(4200, visual=True)) 3 1 2 1 2 *3 *5 *7 Note that this is achieved by using the evaluate=False flag in Mul and Pow. If you do other manipulations with an expression where evaluate=False, it may evaluate. Therefore, you should use the visual option only for visualization, and use the normal dictionary returned by visual=False if you want to perform operations on the factors. You can easily switch between the two forms by sending them back to factorint: >>> from sympy import Mul, Pow >>> regular = factorint(1764); regular {2: 2, 3: 2, 7: 2} >>> pprint(factorint(regular)) 2 2 2 2 *3 *7 >>> visual = factorint(1764, visual=True); pprint(visual) 2 2 2 2 *3 *7 >>> print(factorint(visual)) {2: 2, 3: 2, 7: 2} If you want to send a number to be factored in a partially factored form you can do so with a dictionary or unevaluated expression: >>> factorint(factorint({4: 2, 12: 3})) # twice to toggle to dict form {2: 10, 3: 3} >>> factorint(Mul(4, 12, evaluate=False)) {2: 4, 3: 1} The table of the output logic is: ====== ====== ======= ======= Visual ------ ---------------------- Input True False other ====== ====== ======= ======= dict mul dict mul n mul dict dict mul mul dict dict ====== ====== ======= ======= Notes ===== Algorithm: The function switches between multiple algorithms. Trial division quickly finds small factors (of the order 1-5 digits), and finds all large factors if given enough time. The Pollard rho and p-1 algorithms are used to find large factors ahead of time; they will often find factors of the order of 10 digits within a few seconds: >>> factors = factorint(12345678910111213141516) >>> for base, exp in sorted(factors.items()): ... print('%s %s' % (base, exp)) ... 2 2 2507191691 1 1231026625769 1 Any of these methods can optionally be disabled with the following boolean parameters: - ``use_trial``: Toggle use of trial division - ``use_rho``: Toggle use of Pollard's rho method - ``use_pm1``: Toggle use of Pollard's p-1 method ``factorint`` also periodically checks if the remaining part is a prime number or a perfect power, and in those cases stops. For unevaluated factorial, it uses Legendre's formula(theorem). If ``verbose`` is set to ``True``, detailed progress is printed. See Also ======== smoothness, smoothness_p, divisors """ if isinstance(n, Dict): n = dict(n) if multiple: fac = factorint(n, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False, multiple=False) factorlist = sum(([p] * fac[p] if fac[p] > 0 else [S.One/p]*(-fac[p]) for p in sorted(fac)), []) return factorlist factordict = {} if visual and not isinstance(n, Mul) and not isinstance(n, dict): factordict = factorint(n, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False) elif isinstance(n, Mul): factordict = {int(k): int(v) for k, v in n.as_powers_dict().items()} elif isinstance(n, dict): factordict = n if factordict and (isinstance(n, Mul) or isinstance(n, dict)): # check it for key in list(factordict.keys()): if isprime(key): continue e = factordict.pop(key) d = factorint(key, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False) for k, v in d.items(): if k in factordict: factordict[k] += v*e else: factordict[k] = v*e if visual or (type(n) is dict and visual is not True and visual is not False): if factordict == {}: return S.One if -1 in factordict: factordict.pop(-1) args = [S.NegativeOne] else: args = [] args.extend([Pow(*i, evaluate=False) for i in sorted(factordict.items())]) return Mul(*args, evaluate=False) elif isinstance(n, dict) or isinstance(n, Mul): return factordict assert use_trial or use_rho or use_pm1 from sympy.functions.combinatorial.factorials import factorial if isinstance(n, factorial): x = as_int(n.args[0]) if x >= 20: factors = {} m = 2 # to initialize the if condition below for p in sieve.primerange(2, x + 1): if m > 1: m, q = 0, x // p while q != 0: m += q q //= p factors[p] = m if factors and verbose: for k in sorted(factors): print(factor_msg % (k, factors[k])) if verbose: print(complete_msg) return factors else: # if n < 20!, direct computation is faster # since it uses a lookup table n = n.func(x) n = as_int(n) if limit: limit = int(limit) # special cases if n < 0: factors = factorint( -n, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False) factors[-1] = 1 return factors if limit and limit < 2: if n == 1: return {} return {n: 1} elif n < 10: # doing this we are assured of getting a limit > 2 # when we have to compute it later return [{0: 1}, {}, {2: 1}, {3: 1}, {2: 2}, {5: 1}, {2: 1, 3: 1}, {7: 1}, {2: 3}, {3: 2}][n] factors = {} # do simplistic factorization if verbose: sn = str(n) if len(sn) > 50: print('Factoring %s' % sn[:5] + \ '..(%i other digits)..' % (len(sn) - 10) + sn[-5:]) else: print('Factoring', n) if use_trial: # this is the preliminary factorization for small factors small = 2**15 fail_max = 600 small = min(small, limit or small) if verbose: print(trial_int_msg % (2, small, fail_max)) n, next_p = _factorint_small(factors, n, small, fail_max) else: next_p = 2 if factors and verbose: for k in sorted(factors): print(factor_msg % (k, factors[k])) if next_p == 0: if n > 1: factors[int(n)] = 1 if verbose: print(complete_msg) return factors # continue with more advanced factorization methods # first check if the simplistic run didn't finish # because of the limit and check for a perfect # power before exiting try: if limit and next_p > limit: if verbose: print('Exceeded limit:', limit) _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) if n > 1: factors[int(n)] = 1 return factors else: # Before quitting (or continuing on)... # ...do a Fermat test since it's so easy and we need the # square root anyway. Finding 2 factors is easy if they are # "close enough." This is the big root equivalent of dividing by # 2, 3, 5. sqrt_n = integer_nthroot(n, 2)[0] a = sqrt_n + 1 a2 = a**2 b2 = a2 - n for i in range(3): b, fermat = integer_nthroot(b2, 2) if fermat: break b2 += 2*a + 1 # equiv to (a + 1)**2 - n a += 1 if fermat: if verbose: print(fermat_msg) if limit: limit -= 1 for r in [a - b, a + b]: facs = factorint(r, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose) for k, v in facs.items(): factors[k] = factors.get(k, 0) + v raise StopIteration # ...see if factorization can be terminated _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) except StopIteration: if verbose: print(complete_msg) return factors # these are the limits for trial division which will # be attempted in parallel with pollard methods low, high = next_p, 2*next_p limit = limit or sqrt_n # add 1 to make sure limit is reached in primerange calls limit += 1 while 1: try: high_ = high if limit < high_: high_ = limit # Trial division if use_trial: if verbose: print(trial_msg % (low, high_)) ps = sieve.primerange(low, high_) n, found_trial = _trial(factors, n, ps, verbose) if found_trial: _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) else: found_trial = False if high > limit: if verbose: print('Exceeded limit:', limit) if n > 1: factors[int(n)] = 1 raise StopIteration # Only used advanced methods when no small factors were found if not found_trial: if (use_pm1 or use_rho): high_root = max(int(math.log(high_**0.7)), low, 3) # Pollard p-1 if use_pm1: if verbose: print(pm1_msg % (high_root, high_)) c = pollard_pm1(n, B=high_root, seed=high_) if c: # factor it and let _trial do the update ps = factorint(c, limit=limit - 1, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose) n, _ = _trial(factors, n, ps, verbose=False) _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) # Pollard rho if use_rho: max_steps = high_root if verbose: print(rho_msg % (1, max_steps, high_)) c = pollard_rho(n, retries=1, max_steps=max_steps, seed=high_) if c: # factor it and let _trial do the update ps = factorint(c, limit=limit - 1, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose) n, _ = _trial(factors, n, ps, verbose=False) _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) except StopIteration: if verbose: print(complete_msg) return factors low, high = high, high*2 def factorrat(rat, limit=None, use_trial=True, use_rho=True, use_pm1=True, verbose=False, visual=None, multiple=False): r""" Given a Rational ``r``, ``factorrat(r)`` returns a dict containing the prime factors of ``r`` as keys and their respective multiplicities as values. For example: >>> from sympy.ntheory import factorrat >>> from sympy.core.symbol import S >>> factorrat(S(8)/9) # 8/9 = (2**3) * (3**-2) {2: 3, 3: -2} >>> factorrat(S(-1)/987) # -1/789 = -1 * (3**-1) * (7**-1) * (47**-1) {-1: 1, 3: -1, 7: -1, 47: -1} Please see the docstring for ``factorint`` for detailed explanations and examples of the following keywords: - ``limit``: Integer limit up to which trial division is done - ``use_trial``: Toggle use of trial division - ``use_rho``: Toggle use of Pollard's rho method - ``use_pm1``: Toggle use of Pollard's p-1 method - ``verbose``: Toggle detailed printing of progress - ``multiple``: Toggle returning a list of factors or dict - ``visual``: Toggle product form of output """ from collections import defaultdict if multiple: fac = factorrat(rat, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False, multiple=False) factorlist = sum(([p] * fac[p] if fac[p] > 0 else [S.One/p]*(-fac[p]) for p, _ in sorted(fac.items(), key=lambda elem: elem[0] if elem[1] > 0 else 1/elem[0])), []) return factorlist f = factorint(rat.p, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose).copy() f = defaultdict(int, f) for p, e in factorint(rat.q, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose).items(): f[p] += -e if len(f) > 1 and 1 in f: del f[1] if not visual: return dict(f) else: if -1 in f: f.pop(-1) args = [S.NegativeOne] else: args = [] args.extend([Pow(*i, evaluate=False) for i in sorted(f.items())]) return Mul(*args, evaluate=False) def primefactors(n, limit=None, verbose=False): """Return a sorted list of n's prime factors, ignoring multiplicity and any composite factor that remains if the limit was set too low for complete factorization. Unlike factorint(), primefactors() does not return -1 or 0. Examples ======== >>> from sympy.ntheory import primefactors, factorint, isprime >>> primefactors(6) [2, 3] >>> primefactors(-5) [5] >>> sorted(factorint(123456).items()) [(2, 6), (3, 1), (643, 1)] >>> primefactors(123456) [2, 3, 643] >>> sorted(factorint(10000000001, limit=200).items()) [(101, 1), (99009901, 1)] >>> isprime(99009901) False >>> primefactors(10000000001, limit=300) [101] See Also ======== divisors """ n = int(n) factors = sorted(factorint(n, limit=limit, verbose=verbose).keys()) s = [f for f in factors[:-1:] if f not in [-1, 0, 1]] if factors and isprime(factors[-1]): s += [factors[-1]] return s def _divisors(n, proper=False): """Helper function for divisors which generates the divisors.""" factordict = factorint(n) ps = sorted(factordict.keys()) def rec_gen(n=0): if n == len(ps): yield 1 else: pows = [1] for j in range(factordict[ps[n]]): pows.append(pows[-1] * ps[n]) for q in rec_gen(n + 1): for p in pows: yield p * q if proper: for p in rec_gen(): if p != n: yield p else: for p in rec_gen(): yield p def divisors(n, generator=False, proper=False): r""" Return all divisors of n sorted from 1..n by default. If generator is ``True`` an unordered generator is returned. The number of divisors of n can be quite large if there are many prime factors (counting repeated factors). If only the number of factors is desired use divisor_count(n). Examples ======== >>> from sympy import divisors, divisor_count >>> divisors(24) [1, 2, 3, 4, 6, 8, 12, 24] >>> divisor_count(24) 8 >>> list(divisors(120, generator=True)) [1, 2, 4, 8, 3, 6, 12, 24, 5, 10, 20, 40, 15, 30, 60, 120] Notes ===== This is a slightly modified version of Tim Peters referenced at: https://stackoverflow.com/questions/1010381/python-factorization See Also ======== primefactors, factorint, divisor_count """ n = as_int(abs(n)) if isprime(n): if proper: return [1] return [1, n] if n == 1: if proper: return [] return [1] if n == 0: return [] rv = _divisors(n, proper) if not generator: return sorted(rv) return rv def divisor_count(n, modulus=1, proper=False): """ Return the number of divisors of ``n``. If ``modulus`` is not 1 then only those that are divisible by ``modulus`` are counted. If ``proper`` is True then the divisor of ``n`` will not be counted. Examples ======== >>> from sympy import divisor_count >>> divisor_count(6) 4 >>> divisor_count(6, 2) 2 >>> divisor_count(6, proper=True) 3 See Also ======== factorint, divisors, totient, proper_divisor_count """ if not modulus: return 0 elif modulus != 1: n, r = divmod(n, modulus) if r: return 0 if n == 0: return 0 n = Mul(*[v + 1 for k, v in factorint(n).items() if k > 1]) if n and proper: n -= 1 return n def proper_divisors(n, generator=False): """ Return all divisors of n except n, sorted by default. If generator is ``True`` an unordered generator is returned. Examples ======== >>> from sympy import proper_divisors, proper_divisor_count >>> proper_divisors(24) [1, 2, 3, 4, 6, 8, 12] >>> proper_divisor_count(24) 7 >>> list(proper_divisors(120, generator=True)) [1, 2, 4, 8, 3, 6, 12, 24, 5, 10, 20, 40, 15, 30, 60] See Also ======== factorint, divisors, proper_divisor_count """ return divisors(n, generator=generator, proper=True) def proper_divisor_count(n, modulus=1): """ Return the number of proper divisors of ``n``. Examples ======== >>> from sympy import proper_divisor_count >>> proper_divisor_count(6) 3 >>> proper_divisor_count(6, modulus=2) 1 See Also ======== divisors, proper_divisors, divisor_count """ return divisor_count(n, modulus=modulus, proper=True) def _udivisors(n): """Helper function for udivisors which generates the unitary divisors.""" factorpows = [p**e for p, e in factorint(n).items()] for i in range(2**len(factorpows)): d, j, k = 1, i, 0 while j: if (j & 1): d *= factorpows[k] j >>= 1 k += 1 yield d def udivisors(n, generator=False): r""" Return all unitary divisors of n sorted from 1..n by default. If generator is ``True`` an unordered generator is returned. The number of unitary divisors of n can be quite large if there are many prime factors. If only the number of unitary divisors is desired use udivisor_count(n). Examples ======== >>> from sympy.ntheory.factor_ import udivisors, udivisor_count >>> udivisors(15) [1, 3, 5, 15] >>> udivisor_count(15) 4 >>> sorted(udivisors(120, generator=True)) [1, 3, 5, 8, 15, 24, 40, 120] See Also ======== primefactors, factorint, divisors, divisor_count, udivisor_count References ========== .. [1] https://en.wikipedia.org/wiki/Unitary_divisor .. [2] http://mathworld.wolfram.com/UnitaryDivisor.html """ n = as_int(abs(n)) if isprime(n): return [1, n] if n == 1: return [1] if n == 0: return [] rv = _udivisors(n) if not generator: return sorted(rv) return rv def udivisor_count(n): """ Return the number of unitary divisors of ``n``. Parameters ========== n : integer Examples ======== >>> from sympy.ntheory.factor_ import udivisor_count >>> udivisor_count(120) 8 See Also ======== factorint, divisors, udivisors, divisor_count, totient References ========== .. [1] http://mathworld.wolfram.com/UnitaryDivisorFunction.html """ if n == 0: return 0 return 2**len([p for p in factorint(n) if p > 1]) def _antidivisors(n): """Helper function for antidivisors which generates the antidivisors.""" for d in _divisors(n): y = 2*d if n > y and n % y: yield y for d in _divisors(2*n-1): if n > d >= 2 and n % d: yield d for d in _divisors(2*n+1): if n > d >= 2 and n % d: yield d def antidivisors(n, generator=False): r""" Return all antidivisors of n sorted from 1..n by default. Antidivisors [1]_ of n are numbers that do not divide n by the largest possible margin. If generator is True an unordered generator is returned. Examples ======== >>> from sympy.ntheory.factor_ import antidivisors >>> antidivisors(24) [7, 16] >>> sorted(antidivisors(128, generator=True)) [3, 5, 15, 17, 51, 85] See Also ======== primefactors, factorint, divisors, divisor_count, antidivisor_count References ========== .. [1] definition is described in https://oeis.org/A066272/a066272a.html """ n = as_int(abs(n)) if n <= 2: return [] rv = _antidivisors(n) if not generator: return sorted(rv) return rv def antidivisor_count(n): """ Return the number of antidivisors [1]_ of ``n``. Parameters ========== n : integer Examples ======== >>> from sympy.ntheory.factor_ import antidivisor_count >>> antidivisor_count(13) 4 >>> antidivisor_count(27) 5 See Also ======== factorint, divisors, antidivisors, divisor_count, totient References ========== .. [1] formula from https://oeis.org/A066272 """ n = as_int(abs(n)) if n <= 2: return 0 return divisor_count(2*n - 1) + divisor_count(2*n + 1) + \ divisor_count(n) - divisor_count(n, 2) - 5 class totient(Function): r""" Calculate the Euler totient function phi(n) ``totient(n)`` or `\phi(n)` is the number of positive integers `\leq` n that are relatively prime to n. Parameters ========== n : integer Examples ======== >>> from sympy.ntheory import totient >>> totient(1) 1 >>> totient(25) 20 >>> totient(45) == totient(5)*totient(9) True See Also ======== divisor_count References ========== .. [1] https://en.wikipedia.org/wiki/Euler%27s_totient_function .. [2] http://mathworld.wolfram.com/TotientFunction.html """ @classmethod def eval(cls, n): n = sympify(n) if n.is_Integer: if n < 1: raise ValueError("n must be a positive integer") factors = factorint(n) return cls._from_factors(factors) elif not isinstance(n, Expr) or (n.is_integer is False) or (n.is_positive is False): raise ValueError("n must be a positive integer") def _eval_is_integer(self): return fuzzy_and([self.args[0].is_integer, self.args[0].is_positive]) @classmethod def _from_distinct_primes(self, *args): """Subroutine to compute totient from the list of assumed distinct primes Examples ======== >>> from sympy.ntheory.factor_ import totient >>> totient._from_distinct_primes(5, 7) 24 """ from functools import reduce return reduce(lambda i, j: i * (j-1), args, 1) @classmethod def _from_factors(self, factors): """Subroutine to compute totient from already-computed factors Examples ======== >>> from sympy.ntheory.factor_ import totient >>> totient._from_factors({5: 2}) 20 """ t = 1 for p, k in factors.items(): t *= (p - 1) * p**(k - 1) return t class reduced_totient(Function): r""" Calculate the Carmichael reduced totient function lambda(n) ``reduced_totient(n)`` or `\lambda(n)` is the smallest m > 0 such that `k^m \equiv 1 \mod n` for all k relatively prime to n. Examples ======== >>> from sympy.ntheory import reduced_totient >>> reduced_totient(1) 1 >>> reduced_totient(8) 2 >>> reduced_totient(30) 4 See Also ======== totient References ========== .. [1] https://en.wikipedia.org/wiki/Carmichael_function .. [2] http://mathworld.wolfram.com/CarmichaelFunction.html """ @classmethod def eval(cls, n): n = sympify(n) if n.is_Integer: if n < 1: raise ValueError("n must be a positive integer") factors = factorint(n) return cls._from_factors(factors) @classmethod def _from_factors(self, factors): """Subroutine to compute totient from already-computed factors """ t = 1 for p, k in factors.items(): if p == 2 and k > 2: t = ilcm(t, 2**(k - 2)) else: t = ilcm(t, (p - 1) * p**(k - 1)) return t @classmethod def _from_distinct_primes(self, *args): """Subroutine to compute totient from the list of assumed distinct primes """ args = [p - 1 for p in args] return ilcm(*args) def _eval_is_integer(self): return fuzzy_and([self.args[0].is_integer, self.args[0].is_positive]) class divisor_sigma(Function): r""" Calculate the divisor function `\sigma_k(n)` for positive integer n ``divisor_sigma(n, k)`` is equal to ``sum([x**k for x in divisors(n)])`` If n's prime factorization is: .. math :: n = \prod_{i=1}^\omega p_i^{m_i}, then .. math :: \sigma_k(n) = \prod_{i=1}^\omega (1+p_i^k+p_i^{2k}+\cdots + p_i^{m_ik}). Parameters ========== n : integer k : integer, optional power of divisors in the sum for k = 0, 1: ``divisor_sigma(n, 0)`` is equal to ``divisor_count(n)`` ``divisor_sigma(n, 1)`` is equal to ``sum(divisors(n))`` Default for k is 1. Examples ======== >>> from sympy.ntheory import divisor_sigma >>> divisor_sigma(18, 0) 6 >>> divisor_sigma(39, 1) 56 >>> divisor_sigma(12, 2) 210 >>> divisor_sigma(37) 38 See Also ======== divisor_count, totient, divisors, factorint References ========== .. [1] https://en.wikipedia.org/wiki/Divisor_function """ @classmethod def eval(cls, n, k=1): n = sympify(n) k = sympify(k) if n.is_prime: return 1 + n**k if n.is_Integer: if n <= 0: raise ValueError("n must be a positive integer") elif k.is_Integer: k = int(k) return Integer(prod( (p**(k*(e + 1)) - 1)//(p**k - 1) if k != 0 else e + 1 for p, e in factorint(n).items())) else: return Mul(*[(p**(k*(e + 1)) - 1)/(p**k - 1) if k != 0 else e + 1 for p, e in factorint(n).items()]) if n.is_integer: # symbolic case args = [] for p, e in (_.as_base_exp() for _ in Mul.make_args(n)): if p.is_prime and e.is_positive: args.append((p**(k*(e + 1)) - 1)/(p**k - 1) if k != 0 else e + 1) else: return return Mul(*args) def core(n, t=2): r""" Calculate core(n, t) = `core_t(n)` of a positive integer n ``core_2(n)`` is equal to the squarefree part of n If n's prime factorization is: .. math :: n = \prod_{i=1}^\omega p_i^{m_i}, then .. math :: core_t(n) = \prod_{i=1}^\omega p_i^{m_i \mod t}. Parameters ========== n : integer t : integer core(n, t) calculates the t-th power free part of n ``core(n, 2)`` is the squarefree part of ``n`` ``core(n, 3)`` is the cubefree part of ``n`` Default for t is 2. Examples ======== >>> from sympy.ntheory.factor_ import core >>> core(24, 2) 6 >>> core(9424, 3) 1178 >>> core(379238) 379238 >>> core(15**11, 10) 15 See Also ======== factorint, sympy.solvers.diophantine.diophantine.square_factor References ========== .. [1] https://en.wikipedia.org/wiki/Square-free_integer#Squarefree_core """ n = as_int(n) t = as_int(t) if n <= 0: raise ValueError("n must be a positive integer") elif t <= 1: raise ValueError("t must be >= 2") else: y = 1 for p, e in factorint(n).items(): y *= p**(e % t) return y def digits(n, b=10): """ Return a list of the digits of n in base b. The first element in the list is b (or -b if n is negative). Examples ======== >>> from sympy.ntheory.factor_ import digits >>> digits(35) [10, 3, 5] >>> digits(27, 2) [2, 1, 1, 0, 1, 1] >>> digits(65536, 256) [256, 1, 0, 0] >>> digits(-3958, 27) [-27, 5, 11, 16] """ b = as_int(b) n = as_int(n) if b <= 1: raise ValueError("b must be >= 2") else: x, y = abs(n), [] while x >= b: x, r = divmod(x, b) y.append(r) y.append(x) y.append(-b if n < 0 else b) y.reverse() return y class udivisor_sigma(Function): r""" Calculate the unitary divisor function `\sigma_k^*(n)` for positive integer n ``udivisor_sigma(n, k)`` is equal to ``sum([x**k for x in udivisors(n)])`` If n's prime factorization is: .. math :: n = \prod_{i=1}^\omega p_i^{m_i}, then .. math :: \sigma_k^*(n) = \prod_{i=1}^\omega (1+ p_i^{m_ik}). Parameters ========== k : power of divisors in the sum for k = 0, 1: ``udivisor_sigma(n, 0)`` is equal to ``udivisor_count(n)`` ``udivisor_sigma(n, 1)`` is equal to ``sum(udivisors(n))`` Default for k is 1. Examples ======== >>> from sympy.ntheory.factor_ import udivisor_sigma >>> udivisor_sigma(18, 0) 4 >>> udivisor_sigma(74, 1) 114 >>> udivisor_sigma(36, 3) 47450 >>> udivisor_sigma(111) 152 See Also ======== divisor_count, totient, divisors, udivisors, udivisor_count, divisor_sigma, factorint References ========== .. [1] http://mathworld.wolfram.com/UnitaryDivisorFunction.html """ @classmethod def eval(cls, n, k=1): n = sympify(n) k = sympify(k) if n.is_prime: return 1 + n**k if n.is_Integer: if n <= 0: raise ValueError("n must be a positive integer") else: return Mul(*[1+p**(k*e) for p, e in factorint(n).items()]) class primenu(Function): r""" Calculate the number of distinct prime factors for a positive integer n. If n's prime factorization is: .. math :: n = \prod_{i=1}^k p_i^{m_i}, then ``primenu(n)`` or `\nu(n)` is: .. math :: \nu(n) = k. Examples ======== >>> from sympy.ntheory.factor_ import primenu >>> primenu(1) 0 >>> primenu(30) 3 See Also ======== factorint References ========== .. [1] http://mathworld.wolfram.com/PrimeFactor.html """ @classmethod def eval(cls, n): n = sympify(n) if n.is_Integer: if n <= 0: raise ValueError("n must be a positive integer") else: return len(factorint(n).keys()) class primeomega(Function): r""" Calculate the number of prime factors counting multiplicities for a positive integer n. If n's prime factorization is: .. math :: n = \prod_{i=1}^k p_i^{m_i}, then ``primeomega(n)`` or `\Omega(n)` is: .. math :: \Omega(n) = \sum_{i=1}^k m_i. Examples ======== >>> from sympy.ntheory.factor_ import primeomega >>> primeomega(1) 0 >>> primeomega(20) 3 See Also ======== factorint References ========== .. [1] http://mathworld.wolfram.com/PrimeFactor.html """ @classmethod def eval(cls, n): n = sympify(n) if n.is_Integer: if n <= 0: raise ValueError("n must be a positive integer") else: return sum(factorint(n).values()) def mersenne_prime_exponent(nth): """Returns the exponent ``i`` for the nth Mersenne prime (which has the form `2^i - 1`). Examples ======== >>> from sympy.ntheory.factor_ import mersenne_prime_exponent >>> mersenne_prime_exponent(1) 2 >>> mersenne_prime_exponent(20) 4423 """ n = as_int(nth) if n < 1: raise ValueError("nth must be a positive integer; mersenne_prime_exponent(1) == 2") if n > 51: raise ValueError("There are only 51 perfect numbers; nth must be less than or equal to 51") return MERSENNE_PRIME_EXPONENTS[n - 1] def is_perfect(n): """Returns True if ``n`` is a perfect number, else False. A perfect number is equal to the sum of its positive, proper divisors. Examples ======== >>> from sympy.ntheory.factor_ import is_perfect, divisors, divisor_sigma >>> is_perfect(20) False >>> is_perfect(6) True >>> 6 == divisor_sigma(6) - 6 == sum(divisors(6)[:-1]) True References ========== .. [1] http://mathworld.wolfram.com/PerfectNumber.html .. [2] https://en.wikipedia.org/wiki/Perfect_number """ from sympy.core.power import integer_log n = as_int(n) if _isperfect(n): return True # all perfect numbers for Mersenne primes with exponents # less than or equal to 43112609 are known iknow = MERSENNE_PRIME_EXPONENTS.index(43112609) if iknow <= len(PERFECT) - 1 and n <= PERFECT[iknow]: # there may be gaps between this and larger known values # so only conclude in the range for which all values # are known return False if n%2 == 0: last2 = n % 100 if last2 != 28 and last2 % 10 != 6: return False r, b = integer_nthroot(1 + 8*n, 2) if not b: return False m, x = divmod(1 + r, 4) if x: return False e, b = integer_log(m, 2) if not b: return False else: if n < 10**2000: # http://www.lirmm.fr/~ochem/opn/ return False if n % 105 == 0: # not divis by 105 return False if not any(n%m == r for m, r in [(12, 1), (468, 117), (324, 81)]): return False # there are many criteria that the factor structure of n # must meet; since we will have to factor it to test the # structure we will have the factors and can then check # to see whether it is a perfect number or not. So we # skip the structure checks and go straight to the final # test below. rv = divisor_sigma(n) - n if rv == n: if n%2 == 0: raise ValueError(filldedent(''' This even number is perfect and is associated with a Mersenne Prime, 2^%s - 1. It should be added to SymPy.''' % (e + 1))) else: raise ValueError(filldedent('''In 1888, Sylvester stated: " ...a prolonged meditation on the subject has satisfied me that the existence of any one such [odd perfect number] -- its escape, so to say, from the complex web of conditions which hem it in on all sides -- would be little short of a miracle." I guess SymPy just found that miracle and it factors like this: %s''' % factorint(n))) def is_mersenne_prime(n): """Returns True if ``n`` is a Mersenne prime, else False. A Mersenne prime is a prime number having the form `2^i - 1`. Examples ======== >>> from sympy.ntheory.factor_ import is_mersenne_prime >>> is_mersenne_prime(6) False >>> is_mersenne_prime(127) True References ========== .. [1] http://mathworld.wolfram.com/MersennePrime.html """ from sympy.core.power import integer_log n = as_int(n) if _ismersenneprime(n): return True if not isprime(n): return False r, b = integer_log(n + 1, 2) if not b: return False raise ValueError(filldedent(''' This Mersenne Prime, 2^%s - 1, should be added to SymPy's known values.''' % r)) def abundance(n): """Returns the difference between the sum of the positive proper divisors of a number and the number. Examples ======== >>> from sympy.ntheory import abundance, is_perfect, is_abundant >>> abundance(6) 0 >>> is_perfect(6) True >>> abundance(10) -2 >>> is_abundant(10) False """ return divisor_sigma(n, 1) - 2 * n def is_abundant(n): """Returns True if ``n`` is an abundant number, else False. A abundant number is smaller than the sum of its positive proper divisors. Examples ======== >>> from sympy.ntheory.factor_ import is_abundant >>> is_abundant(20) True >>> is_abundant(15) False References ========== .. [1] http://mathworld.wolfram.com/AbundantNumber.html """ n = as_int(n) if is_perfect(n): return False return n % 6 == 0 or bool(abundance(n) > 0) def is_deficient(n): """Returns True if ``n`` is a deficient number, else False. A deficient number is greater than the sum of its positive proper divisors. Examples ======== >>> from sympy.ntheory.factor_ import is_deficient >>> is_deficient(20) False >>> is_deficient(15) True References ========== .. [1] http://mathworld.wolfram.com/DeficientNumber.html """ n = as_int(n) if is_perfect(n): return False return bool(abundance(n) < 0) def is_amicable(m, n): """Returns True if the numbers `m` and `n` are "amicable", else False. Amicable numbers are two different numbers so related that the sum of the proper divisors of each is equal to that of the other. Examples ======== >>> from sympy.ntheory.factor_ import is_amicable, divisor_sigma >>> is_amicable(220, 284) True >>> divisor_sigma(220) == divisor_sigma(284) True References ========== .. [1] https://en.wikipedia.org/wiki/Amicable_numbers """ if m == n: return False a, b = map(lambda i: divisor_sigma(i), (m, n)) return a == b == (m + n) def dra(n, b): """ Returns the additive digital root of a natural number ``n`` in base ``b`` which is a single digit value obtained by an iterative process of summing digits, on each iteration using the result from the previous iteration to compute a digit sum. Examples ======== >>> from sympy.ntheory.factor_ import dra >>> dra(3110, 12) 8 References ========== .. [1] https://en.wikipedia.org/wiki/Digital_root """ num = abs(as_int(n)) b = as_int(b) if b <= 1: raise ValueError("Base should be an integer greater than 1") if num == 0: return 0 return (1 + (num - 1) % (b - 1)) def drm(n, b): """ Returns the multiplicative digital root of a natural number ``n`` in a given base ``b`` which is a single digit value obtained by an iterative process of multiplying digits, on each iteration using the result from the previous iteration to compute the digit multiplication. Examples ======== >>> from sympy.ntheory.factor_ import drm >>> drm(9876, 10) 0 >>> drm(49, 10) 8 References ========== .. [1] http://mathworld.wolfram.com/MultiplicativeDigitalRoot.html """ n = abs(as_int(n)) b = as_int(b) if b <= 1: raise ValueError("Base should be an integer greater than 1") while n > b: mul = 1 while n > 1: n, r = divmod(n, b) if r == 0: return 0 mul *= r n = mul return n
f136108a2187b4fb366a555dbb3efe3a66bb31fc1ef30043c5838641a90c5ffe
from __future__ import print_function, division from mpmath.libmp import (fzero, from_int, from_rational, fone, fhalf, bitcount, to_int, to_str, mpf_mul, mpf_div, mpf_sub, mpf_add, mpf_sqrt, mpf_pi, mpf_cosh_sinh, mpf_cos, mpf_sin) from sympy.core.numbers import igcd from .residue_ntheory import (_sqrt_mod_prime_power, legendre_symbol, jacobi_symbol, is_quad_residue) import math def _pre(): maxn = 10**5 global _factor global _totient _factor = [0]*maxn _totient = [1]*maxn lim = int(maxn**0.5) + 5 for i in range(2, lim): if _factor[i] == 0: for j in range(i*i, maxn, i): if _factor[j] == 0: _factor[j] = i for i in range(2, maxn): if _factor[i] == 0: _factor[i] = i _totient[i] = i-1 continue x = _factor[i] y = i//x if y % x == 0: _totient[i] = _totient[y]*x else: _totient[i] = _totient[y]*(x - 1) def _a(n, k, prec): """ Compute the inner sum in HRR formula [1]_ References ========== .. [1] http://msp.org/pjm/1956/6-1/pjm-v6-n1-p18-p.pdf """ if k == 1: return fone k1 = k e = 0 p = _factor[k] while k1 % p == 0: k1 //= p e += 1 k2 = k//k1 # k2 = p^e v = 1 - 24*n pi = mpf_pi(prec) if k1 == 1: # k = p^e if p == 2: mod = 8*k v = mod + v % mod v = (v*pow(9, k - 1, mod)) % mod m = _sqrt_mod_prime_power(v, 2, e + 3)[0] arg = mpf_div(mpf_mul( from_int(4*m), pi, prec), from_int(mod), prec) return mpf_mul(mpf_mul( from_int((-1)**e*jacobi_symbol(m - 1, m)), mpf_sqrt(from_int(k), prec), prec), mpf_sin(arg, prec), prec) if p == 3: mod = 3*k v = mod + v % mod if e > 1: v = (v*pow(64, k//3 - 1, mod)) % mod m = _sqrt_mod_prime_power(v, 3, e + 1)[0] arg = mpf_div(mpf_mul(from_int(4*m), pi, prec), from_int(mod), prec) return mpf_mul(mpf_mul( from_int(2*(-1)**(e + 1)*legendre_symbol(m, 3)), mpf_sqrt(from_int(k//3), prec), prec), mpf_sin(arg, prec), prec) v = k + v % k if v % p == 0: if e == 1: return mpf_mul( from_int(jacobi_symbol(3, k)), mpf_sqrt(from_int(k), prec), prec) return fzero if not is_quad_residue(v, p): return fzero _phi = p**(e - 1)*(p - 1) v = (v*pow(576, _phi - 1, k)) m = _sqrt_mod_prime_power(v, p, e)[0] arg = mpf_div( mpf_mul(from_int(4*m), pi, prec), from_int(k), prec) return mpf_mul(mpf_mul( from_int(2*jacobi_symbol(3, k)), mpf_sqrt(from_int(k), prec), prec), mpf_cos(arg, prec), prec) if p != 2 or e >= 3: d1, d2 = igcd(k1, 24), igcd(k2, 24) e = 24//(d1*d2) n1 = ((d2*e*n + (k2**2 - 1)//d1)* pow(e*k2*k2*d2, _totient[k1] - 1, k1)) % k1 n2 = ((d1*e*n + (k1**2 - 1)//d2)* pow(e*k1*k1*d1, _totient[k2] - 1, k2)) % k2 return mpf_mul(_a(n1, k1, prec), _a(n2, k2, prec), prec) if e == 2: n1 = ((8*n + 5)*pow(128, _totient[k1] - 1, k1)) % k1 n2 = (4 + ((n - 2 - (k1**2 - 1)//8)*(k1**2)) % 4) % 4 return mpf_mul(mpf_mul( from_int(-1), _a(n1, k1, prec), prec), _a(n2, k2, prec)) n1 = ((8*n + 1)*pow(32, _totient[k1] - 1, k1)) % k1 n2 = (2 + (n - (k1**2 - 1)//8) % 2) % 2 return mpf_mul(_a(n1, k1, prec), _a(n2, k2, prec), prec) def _d(n, j, prec, sq23pi, sqrt8): """ Compute the sinh term in the outer sum of the HRR formula. The constants sqrt(2/3*pi) and sqrt(8) must be precomputed. """ j = from_int(j) pi = mpf_pi(prec) a = mpf_div(sq23pi, j, prec) b = mpf_sub(from_int(n), from_rational(1, 24, prec), prec) c = mpf_sqrt(b, prec) ch, sh = mpf_cosh_sinh(mpf_mul(a, c), prec) D = mpf_div( mpf_sqrt(j, prec), mpf_mul(mpf_mul(sqrt8, b), pi), prec) E = mpf_sub(mpf_mul(a, ch), mpf_div(sh, c, prec), prec) return mpf_mul(D, E) def npartitions(n, verbose=False): """ Calculate the partition function P(n), i.e. the number of ways that n can be written as a sum of positive integers. P(n) is computed using the Hardy-Ramanujan-Rademacher formula [1]_. The correctness of this implementation has been tested through 10**10. Examples ======== >>> from sympy.ntheory import npartitions >>> npartitions(25) 1958 References ========== .. [1] http://mathworld.wolfram.com/PartitionFunctionP.html """ n = int(n) if n < 0: return 0 if n <= 5: return [1, 1, 2, 3, 5, 7][n] if '_factor' not in globals(): _pre() # Estimate number of bits in p(n). This formula could be tidied pbits = int(( math.pi*(2*n/3.)**0.5 - math.log(4*n))/math.log(10) + 1) * \ math.log(10, 2) prec = p = int(pbits*1.1 + 100) s = fzero M = max(6, int(0.24*n**0.5 + 4)) if M > 10**5: raise ValueError("Input too big") # Corresponds to n > 1.7e11 sq23pi = mpf_mul(mpf_sqrt(from_rational(2, 3, p), p), mpf_pi(p), p) sqrt8 = mpf_sqrt(from_int(8), p) for q in range(1, M): a = _a(n, q, p) d = _d(n, q, p, sq23pi, sqrt8) s = mpf_add(s, mpf_mul(a, d), prec) if verbose: print("step", q, "of", M, to_str(a, 10), to_str(d, 10)) # On average, the terms decrease rapidly in magnitude. # Dynamically reducing the precision greatly improves # performance. p = bitcount(abs(to_int(d))) + 50 return int(to_int(mpf_add(s, fhalf, prec))) __all__ = ['npartitions']
c52d492f2bf482b14066e36fcac68ee8cc9a984f481c7dcd72e7ad5cf0b2279a
from sympy.combinatorics import Permutation from sympy.combinatorics.util import _distribute_gens_by_base rmul = Permutation.rmul def _cmp_perm_lists(first, second): """ Compare two lists of permutations as sets. This is used for testing purposes. Since the array form of a permutation is currently a list, Permutation is not hashable and cannot be put into a set. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.testutil import _cmp_perm_lists >>> a = Permutation([0, 2, 3, 4, 1]) >>> b = Permutation([1, 2, 0, 4, 3]) >>> c = Permutation([3, 4, 0, 1, 2]) >>> ls1 = [a, b, c] >>> ls2 = [b, c, a] >>> _cmp_perm_lists(ls1, ls2) True """ return {tuple(a) for a in first} == \ {tuple(a) for a in second} def _naive_list_centralizer(self, other, af=False): from sympy.combinatorics.perm_groups import PermutationGroup """ Return a list of elements for the centralizer of a subgroup/set/element. This is a brute force implementation that goes over all elements of the group and checks for membership in the centralizer. It is used to test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``. Examples ======== >>> from sympy.combinatorics.testutil import _naive_list_centralizer >>> from sympy.combinatorics.named_groups import DihedralGroup >>> D = DihedralGroup(4) >>> _naive_list_centralizer(D, D) [Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])] See Also ======== sympy.combinatorics.perm_groups.centralizer """ from sympy.combinatorics.permutations import _af_commutes_with if hasattr(other, 'generators'): elements = list(self.generate_dimino(af=True)) gens = [x._array_form for x in other.generators] commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens) centralizer_list = [] if not af: for element in elements: if commutes_with_gens(element): centralizer_list.append(Permutation._af_new(element)) else: for element in elements: if commutes_with_gens(element): centralizer_list.append(element) return centralizer_list elif hasattr(other, 'getitem'): return _naive_list_centralizer(self, PermutationGroup(other), af) elif hasattr(other, 'array_form'): return _naive_list_centralizer(self, PermutationGroup([other]), af) def _verify_bsgs(group, base, gens): """ Verify the correctness of a base and strong generating set. This is a naive implementation using the definition of a base and a strong generating set relative to it. There are other procedures for verifying a base and strong generating set, but this one will serve for more robust testing. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> A = AlternatingGroup(4) >>> A.schreier_sims() >>> _verify_bsgs(A, A.base, A.strong_gens) True See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims """ from sympy.combinatorics.perm_groups import PermutationGroup strong_gens_distr = _distribute_gens_by_base(base, gens) current_stabilizer = group for i in range(len(base)): candidate = PermutationGroup(strong_gens_distr[i]) if current_stabilizer.order() != candidate.order(): return False current_stabilizer = current_stabilizer.stabilizer(base[i]) if current_stabilizer.order() != 1: return False return True def _verify_centralizer(group, arg, centr=None): """ Verify the centralizer of a group/set/element inside another group. This is used for testing ``.centralizer()`` from ``sympy.combinatorics.perm_groups`` Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... AlternatingGroup) >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.testutil import _verify_centralizer >>> S = SymmetricGroup(5) >>> A = AlternatingGroup(5) >>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])]) >>> _verify_centralizer(S, A, centr) True See Also ======== _naive_list_centralizer, sympy.combinatorics.perm_groups.PermutationGroup.centralizer, _cmp_perm_lists """ if centr is None: centr = group.centralizer(arg) centr_list = list(centr.generate_dimino(af=True)) centr_list_naive = _naive_list_centralizer(group, arg, af=True) return _cmp_perm_lists(centr_list, centr_list_naive) def _verify_normal_closure(group, arg, closure=None): from sympy.combinatorics.perm_groups import PermutationGroup """ Verify the normal closure of a subgroup/subset/element in a group. This is used to test sympy.combinatorics.perm_groups.PermutationGroup.normal_closure Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... AlternatingGroup) >>> from sympy.combinatorics.testutil import _verify_normal_closure >>> S = SymmetricGroup(3) >>> A = AlternatingGroup(3) >>> _verify_normal_closure(S, A, closure=A) True See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.normal_closure """ if closure is None: closure = group.normal_closure(arg) conjugates = set() if hasattr(arg, 'generators'): subgr_gens = arg.generators elif hasattr(arg, '__getitem__'): subgr_gens = arg elif hasattr(arg, 'array_form'): subgr_gens = [arg] for el in group.generate_dimino(): for gen in subgr_gens: conjugates.add(gen ^ el) naive_closure = PermutationGroup(list(conjugates)) return closure.is_subgroup(naive_closure) def canonicalize_naive(g, dummies, sym, *v): """ Canonicalize tensor formed by tensors of the different types g permutation representing the tensor dummies list of dummy indices msym symmetry of the metric v is a list of (base_i, gens_i, n_i, sym_i) for tensors of type `i` base_i, gens_i BSGS for tensors of this type n_i number ot tensors of type `i` sym_i symmetry under exchange of two component tensors of type `i` None no symmetry 0 commuting 1 anticommuting Return 0 if the tensor is zero, else return the array form of the permutation representing the canonical form of the tensor. Examples ======== >>> from sympy.combinatorics.testutil import canonicalize_naive >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs >>> from sympy.combinatorics import Permutation, PermutationGroup >>> g = Permutation([1, 3, 2, 0, 4, 5]) >>> base2, gens2 = get_symmetric_group_sgs(2) >>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0)) [0, 2, 1, 3, 4, 5] """ from sympy.combinatorics.perm_groups import PermutationGroup from sympy.combinatorics.tensor_can import gens_products, dummy_sgs from sympy.combinatorics.permutations import Permutation, _af_rmul v1 = [] for i in range(len(v)): base_i, gens_i, n_i, sym_i = v[i] v1.append((base_i, gens_i, [[]]*n_i, sym_i)) size, sbase, sgens = gens_products(*v1) dgens = dummy_sgs(dummies, sym, size-2) if isinstance(sym, int): num_types = 1 dummies = [dummies] sym = [sym] else: num_types = len(sym) dgens = [] for i in range(num_types): dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2)) S = PermutationGroup(sgens) D = PermutationGroup([Permutation(x) for x in dgens]) dlist = list(D.generate(af=True)) g = g.array_form st = set() for s in S.generate(af=True): h = _af_rmul(g, s) for d in dlist: q = tuple(_af_rmul(d, h)) st.add(q) a = list(st) a.sort() prev = (0,)*size for h in a: if h[:-2] == prev[:-2]: if h[-1] != prev[-1]: return 0 prev = h return list(a[0]) def graph_certificate(gr): """ Return a certificate for the graph gr adjacency list The graph is assumed to be unoriented and without external lines. Associate to each vertex of the graph a symmetric tensor with number of indices equal to the degree of the vertex; indices are contracted when they correspond to the same line of the graph. The canonical form of the tensor gives a certificate for the graph. This is not an efficient algorithm to get the certificate of a graph. Examples ======== >>> from sympy.combinatorics.testutil import graph_certificate >>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]} >>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]} >>> c1 = graph_certificate(gr1) >>> c2 = graph_certificate(gr2) >>> c1 [0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21] >>> c1 == c2 True """ from sympy.combinatorics.permutations import _af_invert from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize items = list(gr.items()) items.sort(key=lambda x: len(x[1]), reverse=True) pvert = [x[0] for x in items] pvert = _af_invert(pvert) # the indices of the tensor are twice the number of lines of the graph num_indices = 0 for v, neigh in items: num_indices += len(neigh) # associate to each vertex its indices; for each line # between two vertices assign the # even index to the vertex which comes first in items, # the odd index to the other vertex vertices = [[] for i in items] i = 0 for v, neigh in items: for v2 in neigh: if pvert[v] < pvert[v2]: vertices[pvert[v]].append(i) vertices[pvert[v2]].append(i+1) i += 2 g = [] for v in vertices: g.extend(v) assert len(g) == num_indices g += [num_indices, num_indices + 1] size = num_indices + 2 assert sorted(g) == list(range(size)) g = Permutation(g) vlen = [0]*(len(vertices[0])+1) for neigh in vertices: vlen[len(neigh)] += 1 v = [] for i in range(len(vlen)): n = vlen[i] if n: base, gens = get_symmetric_group_sgs(i) v.append((base, gens, n, 0)) v.reverse() dummies = list(range(num_indices)) can = canonicalize(g, dummies, 0, *v) return can
83c58d38acf9b397bd4d91093d125ac6dd16ad31f26caa1d1a410faaace6213e
from __future__ import print_function, division from random import randrange, choice from math import log from sympy.ntheory import primefactors from sympy import multiplicity, factorint from sympy.combinatorics import Permutation from sympy.combinatorics.permutations import (_af_commutes_with, _af_invert, _af_rmul, _af_rmuln, _af_pow, Cycle) from sympy.combinatorics.util import (_check_cycles_alt_sym, _distribute_gens_by_base, _orbits_transversals_from_bsgs, _handle_precomputed_bsgs, _base_ordering, _strong_gens_from_distr, _strip, _strip_af) from sympy.core import Basic from sympy.functions.combinatorial.factorials import factorial from sympy.ntheory import sieve from sympy.utilities.iterables import has_variety, is_sequence, uniq from sympy.testing.randtest import _randrange from itertools import islice rmul = Permutation.rmul_with_af _af_new = Permutation._af_new class PermutationGroup(Basic): """The class defining a Permutation group. PermutationGroup([p1, p2, ..., pn]) returns the permutation group generated by the list of permutations. This group can be supplied to Polyhedron if one desires to decorate the elements to which the indices of the permutation refer. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.permutations import Cycle >>> from sympy.combinatorics.polyhedron import Polyhedron >>> from sympy.combinatorics.perm_groups import PermutationGroup The permutations corresponding to motion of the front, right and bottom face of a 2x2 Rubik's cube are defined: >>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5) >>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9) >>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21) These are passed as permutations to PermutationGroup: >>> G = PermutationGroup(F, R, D) >>> G.order() 3674160 The group can be supplied to a Polyhedron in order to track the objects being moved. An example involving the 2x2 Rubik's cube is given there, but here is a simple demonstration: >>> a = Permutation(2, 1) >>> b = Permutation(1, 0) >>> G = PermutationGroup(a, b) >>> P = Polyhedron(list('ABC'), pgroup=G) >>> P.corners (A, B, C) >>> P.rotate(0) # apply permutation 0 >>> P.corners (A, C, B) >>> P.reset() >>> P.corners (A, B, C) Or one can make a permutation as a product of selected permutations and apply them to an iterable directly: >>> P10 = G.make_perm([0, 1]) >>> P10('ABC') ['C', 'A', 'B'] See Also ======== sympy.combinatorics.polyhedron.Polyhedron, sympy.combinatorics.permutations.Permutation References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of Computational Group Theory" .. [2] Seress, A. "Permutation Group Algorithms" .. [3] https://en.wikipedia.org/wiki/Schreier_vector .. [4] https://en.wikipedia.org/wiki/Nielsen_transformation#Product_replacement_algorithm .. [5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray, Alice C.Niemeyer, and E.A.O'Brien. "Generating Random Elements of a Finite Group" .. [6] https://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29 .. [7] http://www.algorithmist.com/index.php/Union_Find .. [8] https://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups .. [9] https://en.wikipedia.org/wiki/Center_%28group_theory%29 .. [10] https://en.wikipedia.org/wiki/Centralizer_and_normalizer .. [11] http://groupprops.subwiki.org/wiki/Derived_subgroup .. [12] https://en.wikipedia.org/wiki/Nilpotent_group .. [13] http://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf .. [14] https://www.gap-system.org/Manuals/doc/ref/manual.pdf """ is_group = True def __new__(cls, *args, **kwargs): """The default constructor. Accepts Cycle and Permutation forms. Removes duplicates unless ``dups`` keyword is ``False``. """ if not args: args = [Permutation()] else: args = list(args[0] if is_sequence(args[0]) else args) if not args: args = [Permutation()] if any(isinstance(a, Cycle) for a in args): args = [Permutation(a) for a in args] if has_variety(a.size for a in args): degree = kwargs.pop('degree', None) if degree is None: degree = max(a.size for a in args) for i in range(len(args)): if args[i].size != degree: args[i] = Permutation(args[i], size=degree) if kwargs.pop('dups', True): args = list(uniq([_af_new(list(a)) for a in args])) if len(args) > 1: args = [g for g in args if not g.is_identity] obj = Basic.__new__(cls, *args, **kwargs) obj._generators = args obj._order = None obj._center = [] obj._is_abelian = None obj._is_transitive = None obj._is_sym = None obj._is_alt = None obj._is_primitive = None obj._is_nilpotent = None obj._is_solvable = None obj._is_trivial = None obj._transitivity_degree = None obj._max_div = None obj._is_perfect = None obj._is_cyclic = None obj._r = len(obj._generators) obj._degree = obj._generators[0].size # these attributes are assigned after running schreier_sims obj._base = [] obj._strong_gens = [] obj._strong_gens_slp = [] obj._basic_orbits = [] obj._transversals = [] obj._transversal_slp = [] # these attributes are assigned after running _random_pr_init obj._random_gens = [] # finite presentation of the group as an instance of `FpGroup` obj._fp_presentation = None return obj def __getitem__(self, i): return self._generators[i] def __contains__(self, i): """Return ``True`` if *i* is contained in PermutationGroup. Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> p = Permutation(1, 2, 3) >>> Permutation(3) in PermutationGroup(p) True """ if not isinstance(i, Permutation): raise TypeError("A PermutationGroup contains only Permutations as " "elements, not elements of type %s" % type(i)) return self.contains(i) def __len__(self): return len(self._generators) def __eq__(self, other): """Return ``True`` if PermutationGroup generated by elements in the group are same i.e they represent the same PermutationGroup. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> p = Permutation(0, 1, 2, 3, 4, 5) >>> G = PermutationGroup([p, p**2]) >>> H = PermutationGroup([p**2, p]) >>> G.generators == H.generators False >>> G == H True """ if not isinstance(other, PermutationGroup): return False set_self_gens = set(self.generators) set_other_gens = set(other.generators) # before reaching the general case there are also certain # optimisation and obvious cases requiring less or no actual # computation. if set_self_gens == set_other_gens: return True # in the most general case it will check that each generator of # one group belongs to the other PermutationGroup and vice-versa for gen1 in set_self_gens: if not other.contains(gen1): return False for gen2 in set_other_gens: if not self.contains(gen2): return False return True def __hash__(self): return super(PermutationGroup, self).__hash__() def __mul__(self, other): """ Return the direct product of two permutation groups as a permutation group. This implementation realizes the direct product by shifting the index set for the generators of the second group: so if we have ``G`` acting on ``n1`` points and ``H`` acting on ``n2`` points, ``G*H`` acts on ``n1 + n2`` points. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import CyclicGroup >>> G = CyclicGroup(5) >>> H = G*G >>> H PermutationGroup([ (9)(0 1 2 3 4), (5 6 7 8 9)]) >>> H.order() 25 """ gens1 = [perm._array_form for perm in self.generators] gens2 = [perm._array_form for perm in other.generators] n1 = self._degree n2 = other._degree start = list(range(n1)) end = list(range(n1, n1 + n2)) for i in range(len(gens2)): gens2[i] = [x + n1 for x in gens2[i]] gens2 = [start + gen for gen in gens2] gens1 = [gen + end for gen in gens1] together = gens1 + gens2 gens = [_af_new(x) for x in together] return PermutationGroup(gens) def _random_pr_init(self, r, n, _random_prec_n=None): r"""Initialize random generators for the product replacement algorithm. The implementation uses a modification of the original product replacement algorithm due to Leedham-Green, as described in [1], pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical analysis of the original product replacement algorithm, and [4]. The product replacement algorithm is used for producing random, uniformly distributed elements of a group `G` with a set of generators `S`. For the initialization ``_random_pr_init``, a list ``R`` of `\max\{r, |S|\}` group generators is created as the attribute ``G._random_gens``, repeating elements of `S` if necessary, and the identity element of `G` is appended to ``R`` - we shall refer to this last element as the accumulator. Then the function ``random_pr()`` is called ``n`` times, randomizing the list ``R`` while preserving the generation of `G` by ``R``. The function ``random_pr()`` itself takes two random elements ``g, h`` among all elements of ``R`` but the accumulator and replaces ``g`` with a randomly chosen element from `\{gh, g(~h), hg, (~h)g\}`. Then the accumulator is multiplied by whatever ``g`` was replaced by. The new value of the accumulator is then returned by ``random_pr()``. The elements returned will eventually (for ``n`` large enough) become uniformly distributed across `G` ([5]). For practical purposes however, the values ``n = 50, r = 11`` are suggested in [1]. Notes ===== THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute self._random_gens See Also ======== random_pr """ deg = self.degree random_gens = [x._array_form for x in self.generators] k = len(random_gens) if k < r: for i in range(k, r): random_gens.append(random_gens[i - k]) acc = list(range(deg)) random_gens.append(acc) self._random_gens = random_gens # handle randomized input for testing purposes if _random_prec_n is None: for i in range(n): self.random_pr() else: for i in range(n): self.random_pr(_random_prec=_random_prec_n[i]) def _union_find_merge(self, first, second, ranks, parents, not_rep): """Merges two classes in a union-find data structure. Used in the implementation of Atkinson's algorithm as suggested in [1], pp. 83-87. The class merging process uses union by rank as an optimization. ([7]) Notes ===== THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives, ``parents``, the list of class sizes, ``ranks``, and the list of elements that are not representatives, ``not_rep``, are changed due to class merging. See Also ======== minimal_block, _union_find_rep References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" .. [7] http://www.algorithmist.com/index.php/Union_Find """ rep_first = self._union_find_rep(first, parents) rep_second = self._union_find_rep(second, parents) if rep_first != rep_second: # union by rank if ranks[rep_first] >= ranks[rep_second]: new_1, new_2 = rep_first, rep_second else: new_1, new_2 = rep_second, rep_first total_rank = ranks[new_1] + ranks[new_2] if total_rank > self.max_div: return -1 parents[new_2] = new_1 ranks[new_1] = total_rank not_rep.append(new_2) return 1 return 0 def _union_find_rep(self, num, parents): """Find representative of a class in a union-find data structure. Used in the implementation of Atkinson's algorithm as suggested in [1], pp. 83-87. After the representative of the class to which ``num`` belongs is found, path compression is performed as an optimization ([7]). Notes ===== THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives, ``parents``, is altered due to path compression. See Also ======== minimal_block, _union_find_merge References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" .. [7] http://www.algorithmist.com/index.php/Union_Find """ rep, parent = num, parents[num] while parent != rep: rep = parent parent = parents[rep] # path compression temp, parent = num, parents[num] while parent != rep: parents[temp] = rep temp = parent parent = parents[temp] return rep @property def base(self): """Return a base from the Schreier-Sims algorithm. For a permutation group `G`, a base is a sequence of points `B = (b_1, b_2, ..., b_k)` such that no element of `G` apart from the identity fixes all the points in `B`. The concepts of a base and strong generating set and their applications are discussed in depth in [1], pp. 87-89 and [2], pp. 55-57. An alternative way to think of `B` is that it gives the indices of the stabilizer cosets that contain more than the identity permutation. Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)]) >>> G.base [0, 2] See Also ======== strong_gens, basic_transversals, basic_orbits, basic_stabilizers """ if self._base == []: self.schreier_sims() return self._base def baseswap(self, base, strong_gens, pos, randomized=False, transversals=None, basic_orbits=None, strong_gens_distr=None): r"""Swap two consecutive base points in base and strong generating set. If a base for a group `G` is given by `(b_1, b_2, ..., b_k)`, this function returns a base `(b_1, b_2, ..., b_{i+1}, b_i, ..., b_k)`, where `i` is given by ``pos``, and a strong generating set relative to that base. The original base and strong generating set are not modified. The randomized version (default) is of Las Vegas type. Parameters ========== base, strong_gens The base and strong generating set. pos The position at which swapping is performed. randomized A switch between randomized and deterministic version. transversals The transversals for the basic orbits, if known. basic_orbits The basic orbits, if known. strong_gens_distr The strong generators distributed by basic stabilizers, if known. Returns ======= (base, strong_gens) ``base`` is the new base, and ``strong_gens`` is a generating set relative to it. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> S = SymmetricGroup(4) >>> S.schreier_sims() >>> S.base [0, 1, 2] >>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False) >>> base, gens ([0, 2, 1], [(0 1 2 3), (3)(0 1), (1 3 2), (2 3), (1 3)]) check that base, gens is a BSGS >>> S1 = PermutationGroup(gens) >>> _verify_bsgs(S1, base, gens) True See Also ======== schreier_sims Notes ===== The deterministic version of the algorithm is discussed in [1], pp. 102-103; the randomized version is discussed in [1], p.103, and [2], p.98. It is of Las Vegas type. Notice that [1] contains a mistake in the pseudocode and discussion of BASESWAP: on line 3 of the pseudocode, `|\beta_{i+1}^{\left\langle T\right\rangle}|` should be replaced by `|\beta_{i}^{\left\langle T\right\rangle}|`, and the same for the discussion of the algorithm. """ # construct the basic orbits, generators for the stabilizer chain # and transversal elements from whatever was provided transversals, basic_orbits, strong_gens_distr = \ _handle_precomputed_bsgs(base, strong_gens, transversals, basic_orbits, strong_gens_distr) base_len = len(base) degree = self.degree # size of orbit of base[pos] under the stabilizer we seek to insert # in the stabilizer chain at position pos + 1 size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \ //len(_orbit(degree, strong_gens_distr[pos], base[pos + 1])) # initialize the wanted stabilizer by a subgroup if pos + 2 > base_len - 1: T = [] else: T = strong_gens_distr[pos + 2][:] # randomized version if randomized is True: stab_pos = PermutationGroup(strong_gens_distr[pos]) schreier_vector = stab_pos.schreier_vector(base[pos + 1]) # add random elements of the stabilizer until they generate it while len(_orbit(degree, T, base[pos])) != size: new = stab_pos.random_stab(base[pos + 1], schreier_vector=schreier_vector) T.append(new) # deterministic version else: Gamma = set(basic_orbits[pos]) Gamma.remove(base[pos]) if base[pos + 1] in Gamma: Gamma.remove(base[pos + 1]) # add elements of the stabilizer until they generate it by # ruling out member of the basic orbit of base[pos] along the way while len(_orbit(degree, T, base[pos])) != size: gamma = next(iter(Gamma)) x = transversals[pos][gamma] temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1]) if temp not in basic_orbits[pos + 1]: Gamma = Gamma - _orbit(degree, T, gamma) else: y = transversals[pos + 1][temp] el = rmul(x, y) if el(base[pos]) not in _orbit(degree, T, base[pos]): T.append(el) Gamma = Gamma - _orbit(degree, T, base[pos]) # build the new base and strong generating set strong_gens_new_distr = strong_gens_distr[:] strong_gens_new_distr[pos + 1] = T base_new = base[:] base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos] strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr) for gen in T: if gen not in strong_gens_new: strong_gens_new.append(gen) return base_new, strong_gens_new @property def basic_orbits(self): """ Return the basic orbits relative to a base and strong generating set. If `(b_1, b_2, ..., b_k)` is a base for a group `G`, and `G^{(i)} = G_{b_1, b_2, ..., b_{i-1}}` is the ``i``-th basic stabilizer (so that `G^{(1)} = G`), the ``i``-th basic orbit relative to this base is the orbit of `b_i` under `G^{(i)}`. See [1], pp. 87-89 for more information. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> S = SymmetricGroup(4) >>> S.basic_orbits [[0, 1, 2, 3], [1, 2, 3], [2, 3]] See Also ======== base, strong_gens, basic_transversals, basic_stabilizers """ if self._basic_orbits == []: self.schreier_sims() return self._basic_orbits @property def basic_stabilizers(self): """ Return a chain of stabilizers relative to a base and strong generating set. The ``i``-th basic stabilizer `G^{(i)}` relative to a base `(b_1, b_2, ..., b_k)` is `G_{b_1, b_2, ..., b_{i-1}}`. For more information, see [1], pp. 87-89. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> A = AlternatingGroup(4) >>> A.schreier_sims() >>> A.base [0, 1] >>> for g in A.basic_stabilizers: ... print(g) ... PermutationGroup([ (3)(0 1 2), (1 2 3)]) PermutationGroup([ (1 2 3)]) See Also ======== base, strong_gens, basic_orbits, basic_transversals """ if self._transversals == []: self.schreier_sims() strong_gens = self._strong_gens base = self._base if not base: # e.g. if self is trivial return [] strong_gens_distr = _distribute_gens_by_base(base, strong_gens) basic_stabilizers = [] for gens in strong_gens_distr: basic_stabilizers.append(PermutationGroup(gens)) return basic_stabilizers @property def basic_transversals(self): """ Return basic transversals relative to a base and strong generating set. The basic transversals are transversals of the basic orbits. They are provided as a list of dictionaries, each dictionary having keys - the elements of one of the basic orbits, and values - the corresponding transversal elements. See [1], pp. 87-89 for more information. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> A = AlternatingGroup(4) >>> A.basic_transversals [{0: (3), 1: (3)(0 1 2), 2: (3)(0 2 1), 3: (0 3 1)}, {1: (3), 2: (1 2 3), 3: (1 3 2)}] See Also ======== strong_gens, base, basic_orbits, basic_stabilizers """ if self._transversals == []: self.schreier_sims() return self._transversals def composition_series(self): r""" Return the composition series for a group as a list of permutation groups. The composition series for a group `G` is defined as a subnormal series `G = H_0 > H_1 > H_2 \ldots` A composition series is a subnormal series such that each factor group `H(i+1) / H(i)` is simple. A subnormal series is a composition series only if it is of maximum length. The algorithm works as follows: Starting with the derived series the idea is to fill the gap between `G = der[i]` and `H = der[i+1]` for each `i` independently. Since, all subgroups of the abelian group `G/H` are normal so, first step is to take the generators `g` of `G` and add them to generators of `H` one by one. The factor groups formed are not simple in general. Each group is obtained from the previous one by adding one generator `g`, if the previous group is denoted by `H` then the next group `K` is generated by `g` and `H`. The factor group `K/H` is cyclic and it's order is `K.order()//G.order()`. The series is then extended between `K` and `H` by groups generated by powers of `g` and `H`. The series formed is then prepended to the already existing series. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.named_groups import CyclicGroup >>> S = SymmetricGroup(12) >>> G = S.sylow_subgroup(2) >>> C = G.composition_series() >>> [H.order() for H in C] [1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1] >>> G = S.sylow_subgroup(3) >>> C = G.composition_series() >>> [H.order() for H in C] [243, 81, 27, 9, 3, 1] >>> G = CyclicGroup(12) >>> C = G.composition_series() >>> [H.order() for H in C] [12, 6, 3, 1] """ der = self.derived_series() if not (all(g.is_identity for g in der[-1].generators)): raise NotImplementedError('Group should be solvable') series = [] for i in range(len(der)-1): H = der[i+1] up_seg = [] for g in der[i].generators: K = PermutationGroup([g] + H.generators) order = K.order() // H.order() down_seg = [] for p, e in factorint(order).items(): for j in range(e): down_seg.append(PermutationGroup([g] + H.generators)) g = g**p up_seg = down_seg + up_seg H = K up_seg[0] = der[i] series.extend(up_seg) series.append(der[-1]) return series def coset_transversal(self, H): """Return a transversal of the right cosets of self by its subgroup H using the second method described in [1], Subsection 4.6.7 """ if not H.is_subgroup(self): raise ValueError("The argument must be a subgroup") if H.order() == 1: return self._elements self._schreier_sims(base=H.base) # make G.base an extension of H.base base = self.base base_ordering = _base_ordering(base, self.degree) identity = Permutation(self.degree - 1) transversals = self.basic_transversals[:] # transversals is a list of dictionaries. Get rid of the keys # so that it is a list of lists and sort each list in # the increasing order of base[l]^x for l, t in enumerate(transversals): transversals[l] = sorted(t.values(), key = lambda x: base_ordering[base[l]^x]) orbits = H.basic_orbits h_stabs = H.basic_stabilizers g_stabs = self.basic_stabilizers indices = [x.order()//y.order() for x, y in zip(g_stabs, h_stabs)] # T^(l) should be a right transversal of H^(l) in G^(l) for # 1<=l<=len(base). While H^(l) is the trivial group, T^(l) # contains all the elements of G^(l) so we might just as well # start with l = len(h_stabs)-1 if len(g_stabs) > len(h_stabs): T = g_stabs[len(h_stabs)]._elements else: T = [identity] l = len(h_stabs)-1 t_len = len(T) while l > -1: T_next = [] for u in transversals[l]: if u == identity: continue b = base_ordering[base[l]^u] for t in T: p = t*u if all([base_ordering[h^p] >= b for h in orbits[l]]): T_next.append(p) if t_len + len(T_next) == indices[l]: break if t_len + len(T_next) == indices[l]: break T += T_next t_len += len(T_next) l -= 1 T.remove(identity) T = [identity] + T return T def _coset_representative(self, g, H): """Return the representative of Hg from the transversal that would be computed by ``self.coset_transversal(H)``. """ if H.order() == 1: return g # The base of self must be an extension of H.base. if not(self.base[:len(H.base)] == H.base): self._schreier_sims(base=H.base) orbits = H.basic_orbits[:] h_transversals = [list(_.values()) for _ in H.basic_transversals] transversals = [list(_.values()) for _ in self.basic_transversals] base = self.base base_ordering = _base_ordering(base, self.degree) def step(l, x): gamma = sorted(orbits[l], key = lambda y: base_ordering[y^x])[0] i = [base[l]^h for h in h_transversals[l]].index(gamma) x = h_transversals[l][i]*x if l < len(orbits)-1: for u in transversals[l]: if base[l]^u == base[l]^x: break x = step(l+1, x*u**-1)*u return x return step(0, g) def coset_table(self, H): """Return the standardised (right) coset table of self in H as a list of lists. """ # Maybe this should be made to return an instance of CosetTable # from fp_groups.py but the class would need to be changed first # to be compatible with PermutationGroups from itertools import chain, product if not H.is_subgroup(self): raise ValueError("The argument must be a subgroup") T = self.coset_transversal(H) n = len(T) A = list(chain.from_iterable((gen, gen**-1) for gen in self.generators)) table = [] for i in range(n): row = [self._coset_representative(T[i]*x, H) for x in A] row = [T.index(r) for r in row] table.append(row) # standardize (this is the same as the algorithm used in coset_table) # If CosetTable is made compatible with PermutationGroups, this # should be replaced by table.standardize() A = range(len(A)) gamma = 1 for alpha, a in product(range(n), A): beta = table[alpha][a] if beta >= gamma: if beta > gamma: for x in A: z = table[gamma][x] table[gamma][x] = table[beta][x] table[beta][x] = z for i in range(n): if table[i][x] == beta: table[i][x] = gamma elif table[i][x] == gamma: table[i][x] = beta gamma += 1 if gamma >= n-1: return table def center(self): r""" Return the center of a permutation group. The center for a group `G` is defined as `Z(G) = \{z\in G | \forall g\in G, zg = gz \}`, the set of elements of `G` that commute with all elements of `G`. It is equal to the centralizer of `G` inside `G`, and is naturally a subgroup of `G` ([9]). Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> D = DihedralGroup(4) >>> G = D.center() >>> G.order() 2 See Also ======== centralizer Notes ===== This is a naive implementation that is a straightforward application of ``.centralizer()`` """ return self.centralizer(self) def centralizer(self, other): r""" Return the centralizer of a group/set/element. The centralizer of a set of permutations ``S`` inside a group ``G`` is the set of elements of ``G`` that commute with all elements of ``S``:: `C_G(S) = \{ g \in G | gs = sg \forall s \in S\}` ([10]) Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of the full symmetric group, we allow for ``S`` to have elements outside ``G``. It is naturally a subgroup of ``G``; the centralizer of a permutation group is equal to the centralizer of any set of generators for that group, since any element commuting with the generators commutes with any product of the generators. Parameters ========== other a permutation group/list of permutations/single permutation Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... CyclicGroup) >>> S = SymmetricGroup(6) >>> C = CyclicGroup(6) >>> H = S.centralizer(C) >>> H.is_subgroup(C) True See Also ======== subgroup_search Notes ===== The implementation is an application of ``.subgroup_search()`` with tests using a specific base for the group ``G``. """ if hasattr(other, 'generators'): if other.is_trivial or self.is_trivial: return self degree = self.degree identity = _af_new(list(range(degree))) orbits = other.orbits() num_orbits = len(orbits) orbits.sort(key=lambda x: -len(x)) long_base = [] orbit_reps = [None]*num_orbits orbit_reps_indices = [None]*num_orbits orbit_descr = [None]*degree for i in range(num_orbits): orbit = list(orbits[i]) orbit_reps[i] = orbit[0] orbit_reps_indices[i] = len(long_base) for point in orbit: orbit_descr[point] = i long_base = long_base + orbit base, strong_gens = self.schreier_sims_incremental(base=long_base) strong_gens_distr = _distribute_gens_by_base(base, strong_gens) i = 0 for i in range(len(base)): if strong_gens_distr[i] == [identity]: break base = base[:i] base_len = i for j in range(num_orbits): if base[base_len - 1] in orbits[j]: break rel_orbits = orbits[: j + 1] num_rel_orbits = len(rel_orbits) transversals = [None]*num_rel_orbits for j in range(num_rel_orbits): rep = orbit_reps[j] transversals[j] = dict( other.orbit_transversal(rep, pairs=True)) trivial_test = lambda x: True tests = [None]*base_len for l in range(base_len): if base[l] in orbit_reps: tests[l] = trivial_test else: def test(computed_words, l=l): g = computed_words[l] rep_orb_index = orbit_descr[base[l]] rep = orbit_reps[rep_orb_index] im = g._array_form[base[l]] im_rep = g._array_form[rep] tr_el = transversals[rep_orb_index][base[l]] # using the definition of transversal, # base[l]^g = rep^(tr_el*g); # if g belongs to the centralizer, then # base[l]^g = (rep^g)^tr_el return im == tr_el._array_form[im_rep] tests[l] = test def prop(g): return [rmul(g, gen) for gen in other.generators] == \ [rmul(gen, g) for gen in other.generators] return self.subgroup_search(prop, base=base, strong_gens=strong_gens, tests=tests) elif hasattr(other, '__getitem__'): gens = list(other) return self.centralizer(PermutationGroup(gens)) elif hasattr(other, 'array_form'): return self.centralizer(PermutationGroup([other])) def commutator(self, G, H): """ Return the commutator of two subgroups. For a permutation group ``K`` and subgroups ``G``, ``H``, the commutator of ``G`` and ``H`` is defined as the group generated by all the commutators `[g, h] = hgh^{-1}g^{-1}` for ``g`` in ``G`` and ``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27). Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... AlternatingGroup) >>> S = SymmetricGroup(5) >>> A = AlternatingGroup(5) >>> G = S.commutator(S, A) >>> G.is_subgroup(A) True See Also ======== derived_subgroup Notes ===== The commutator of two subgroups `H, G` is equal to the normal closure of the commutators of all the generators, i.e. `hgh^{-1}g^{-1}` for `h` a generator of `H` and `g` a generator of `G` ([1], p.28) """ ggens = G.generators hgens = H.generators commutators = [] for ggen in ggens: for hgen in hgens: commutator = rmul(hgen, ggen, ~hgen, ~ggen) if commutator not in commutators: commutators.append(commutator) res = self.normal_closure(commutators) return res def coset_factor(self, g, factor_index=False): """Return ``G``'s (self's) coset factorization of ``g`` If ``g`` is an element of ``G`` then it can be written as the product of permutations drawn from the Schreier-Sims coset decomposition, The permutations returned in ``f`` are those for which the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)`` and ``B = G.base``. f[i] is one of the permutations in ``self._basic_orbits[i]``. If factor_index==True, returns a tuple ``[b[0],..,b[n]]``, where ``b[i]`` belongs to ``self._basic_orbits[i]`` Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5) >>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6) >>> G = PermutationGroup([a, b]) Define g: >>> g = Permutation(7)(1, 2, 4)(3, 6, 5) Confirm that it is an element of G: >>> G.contains(g) True Thus, it can be written as a product of factors (up to 3) drawn from u. See below that a factor from u1 and u2 and the Identity permutation have been used: >>> f = G.coset_factor(g) >>> f[2]*f[1]*f[0] == g True >>> f1 = G.coset_factor(g, True); f1 [0, 4, 4] >>> tr = G.basic_transversals >>> f[0] == tr[0][f1[0]] True If g is not an element of G then [] is returned: >>> c = Permutation(5, 6, 7) >>> G.coset_factor(c) [] See Also ======== sympy.combinatorics.util._strip """ if isinstance(g, (Cycle, Permutation)): g = g.list() if len(g) != self._degree: # this could either adjust the size or return [] immediately # but we don't choose between the two and just signal a possible # error raise ValueError('g should be the same size as permutations of G') I = list(range(self._degree)) basic_orbits = self.basic_orbits transversals = self._transversals factors = [] base = self.base h = g for i in range(len(base)): beta = h[base[i]] if beta == base[i]: factors.append(beta) continue if beta not in basic_orbits[i]: return [] u = transversals[i][beta]._array_form h = _af_rmul(_af_invert(u), h) factors.append(beta) if h != I: return [] if factor_index: return factors tr = self.basic_transversals factors = [tr[i][factors[i]] for i in range(len(base))] return factors def generator_product(self, g, original=False): ''' Return a list of strong generators `[s1, ..., sn]` s.t `g = sn*...*s1`. If `original=True`, make the list contain only the original group generators ''' product = [] if g.is_identity: return [] if g in self.strong_gens: if not original or g in self.generators: return [g] else: slp = self._strong_gens_slp[g] for s in slp: product.extend(self.generator_product(s, original=True)) return product elif g**-1 in self.strong_gens: g = g**-1 if not original or g in self.generators: return [g**-1] else: slp = self._strong_gens_slp[g] for s in slp: product.extend(self.generator_product(s, original=True)) l = len(product) product = [product[l-i-1]**-1 for i in range(l)] return product f = self.coset_factor(g, True) for i, j in enumerate(f): slp = self._transversal_slp[i][j] for s in slp: if not original: product.append(self.strong_gens[s]) else: s = self.strong_gens[s] product.extend(self.generator_product(s, original=True)) return product def coset_rank(self, g): """rank using Schreier-Sims representation The coset rank of ``g`` is the ordering number in which it appears in the lexicographic listing according to the coset decomposition The ordering is the same as in G.generate(method='coset'). If ``g`` does not belong to the group it returns None. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5) >>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6) >>> G = PermutationGroup([a, b]) >>> c = Permutation(7)(2, 4)(3, 5) >>> G.coset_rank(c) 16 >>> G.coset_unrank(16) (7)(2 4)(3 5) See Also ======== coset_factor """ factors = self.coset_factor(g, True) if not factors: return None rank = 0 b = 1 transversals = self._transversals base = self._base basic_orbits = self._basic_orbits for i in range(len(base)): k = factors[i] j = basic_orbits[i].index(k) rank += b*j b = b*len(transversals[i]) return rank def coset_unrank(self, rank, af=False): """unrank using Schreier-Sims representation coset_unrank is the inverse operation of coset_rank if 0 <= rank < order; otherwise it returns None. """ if rank < 0 or rank >= self.order(): return None base = self.base transversals = self.basic_transversals basic_orbits = self.basic_orbits m = len(base) v = [0]*m for i in range(m): rank, c = divmod(rank, len(transversals[i])) v[i] = basic_orbits[i][c] a = [transversals[i][v[i]]._array_form for i in range(m)] h = _af_rmuln(*a) if af: return h else: return _af_new(h) @property def degree(self): """Returns the size of the permutations in the group. The number of permutations comprising the group is given by ``len(group)``; the number of permutations that can be generated by the group is given by ``group.order()``. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([1, 0, 2]) >>> G = PermutationGroup([a]) >>> G.degree 3 >>> len(G) 1 >>> G.order() 2 >>> list(G.generate()) [(2), (2)(0 1)] See Also ======== order """ return self._degree @property def identity(self): ''' Return the identity element of the permutation group. ''' return _af_new(list(range(self.degree))) @property def elements(self): """Returns all the elements of the permutation group as a set Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2)) >>> p.elements {(1 2 3), (1 3 2), (1 3), (2 3), (3), (3)(1 2)} """ return set(self._elements) @property def _elements(self): """Returns all the elements of the permutation group as a list Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2)) >>> p._elements [(3), (3)(1 2), (1 3), (2 3), (1 2 3), (1 3 2)] """ return list(islice(self.generate(), None)) def derived_series(self): r"""Return the derived series for the group. The derived series for a group `G` is defined as `G = G_0 > G_1 > G_2 > \ldots` where `G_i = [G_{i-1}, G_{i-1}]`, i.e. `G_i` is the derived subgroup of `G_{i-1}`, for `i\in\mathbb{N}`. When we have `G_k = G_{k-1}` for some `k\in\mathbb{N}`, the series terminates. Returns ======= A list of permutation groups containing the members of the derived series in the order `G = G_0, G_1, G_2, \ldots`. Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... AlternatingGroup, DihedralGroup) >>> A = AlternatingGroup(5) >>> len(A.derived_series()) 1 >>> S = SymmetricGroup(4) >>> len(S.derived_series()) 4 >>> S.derived_series()[1].is_subgroup(AlternatingGroup(4)) True >>> S.derived_series()[2].is_subgroup(DihedralGroup(2)) True See Also ======== derived_subgroup """ res = [self] current = self next = self.derived_subgroup() while not current.is_subgroup(next): res.append(next) current = next next = next.derived_subgroup() return res def derived_subgroup(self): r"""Compute the derived subgroup. The derived subgroup, or commutator subgroup is the subgroup generated by all commutators `[g, h] = hgh^{-1}g^{-1}` for `g, h\in G` ; it is equal to the normal closure of the set of commutators of the generators ([1], p.28, [11]). Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([1, 0, 2, 4, 3]) >>> b = Permutation([0, 1, 3, 2, 4]) >>> G = PermutationGroup([a, b]) >>> C = G.derived_subgroup() >>> list(C.generate(af=True)) [[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]] See Also ======== derived_series """ r = self._r gens = [p._array_form for p in self.generators] set_commutators = set() degree = self._degree rng = list(range(degree)) for i in range(r): for j in range(r): p1 = gens[i] p2 = gens[j] c = list(range(degree)) for k in rng: c[p2[p1[k]]] = p1[p2[k]] ct = tuple(c) if not ct in set_commutators: set_commutators.add(ct) cms = [_af_new(p) for p in set_commutators] G2 = self.normal_closure(cms) return G2 def generate(self, method="coset", af=False): """Return iterator to generate the elements of the group Iteration is done with one of these methods:: method='coset' using the Schreier-Sims coset representation method='dimino' using the Dimino method If af = True it yields the array form of the permutations Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics import PermutationGroup >>> from sympy.combinatorics.polyhedron import tetrahedron The permutation group given in the tetrahedron object is also true groups: >>> G = tetrahedron.pgroup >>> G.is_group True Also the group generated by the permutations in the tetrahedron pgroup -- even the first two -- is a proper group: >>> H = PermutationGroup(G[0], G[1]) >>> J = PermutationGroup(list(H.generate())); J PermutationGroup([ (0 1)(2 3), (1 2 3), (1 3 2), (0 3 1), (0 2 3), (0 3)(1 2), (0 1 3), (3)(0 2 1), (0 3 2), (3)(0 1 2), (0 2)(1 3)]) >>> _.is_group True """ if method == "coset": return self.generate_schreier_sims(af) elif method == "dimino": return self.generate_dimino(af) else: raise NotImplementedError('No generation defined for %s' % method) def generate_dimino(self, af=False): """Yield group elements using Dimino's algorithm If af == True it yields the array form of the permutations Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1, 3]) >>> b = Permutation([0, 2, 3, 1]) >>> g = PermutationGroup([a, b]) >>> list(g.generate_dimino(af=True)) [[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1], [0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]] References ========== .. [1] The Implementation of Various Algorithms for Permutation Groups in the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis """ idn = list(range(self.degree)) order = 0 element_list = [idn] set_element_list = {tuple(idn)} if af: yield idn else: yield _af_new(idn) gens = [p._array_form for p in self.generators] for i in range(len(gens)): # D elements of the subgroup G_i generated by gens[:i] D = element_list[:] N = [idn] while N: A = N N = [] for a in A: for g in gens[:i + 1]: ag = _af_rmul(a, g) if tuple(ag) not in set_element_list: # produce G_i*g for d in D: order += 1 ap = _af_rmul(d, ag) if af: yield ap else: p = _af_new(ap) yield p element_list.append(ap) set_element_list.add(tuple(ap)) N.append(ap) self._order = len(element_list) def generate_schreier_sims(self, af=False): """Yield group elements using the Schreier-Sims representation in coset_rank order If ``af = True`` it yields the array form of the permutations Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1, 3]) >>> b = Permutation([0, 2, 3, 1]) >>> g = PermutationGroup([a, b]) >>> list(g.generate_schreier_sims(af=True)) [[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1], [0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]] """ n = self._degree u = self.basic_transversals basic_orbits = self._basic_orbits if len(u) == 0: for x in self.generators: if af: yield x._array_form else: yield x return if len(u) == 1: for i in basic_orbits[0]: if af: yield u[0][i]._array_form else: yield u[0][i] return u = list(reversed(u)) basic_orbits = basic_orbits[::-1] # stg stack of group elements stg = [list(range(n))] posmax = [len(x) for x in u] n1 = len(posmax) - 1 pos = [0]*n1 h = 0 while 1: # backtrack when finished iterating over coset if pos[h] >= posmax[h]: if h == 0: return pos[h] = 0 h -= 1 stg.pop() continue p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1]) pos[h] += 1 stg.append(p) h += 1 if h == n1: if af: for i in basic_orbits[-1]: p = _af_rmul(u[-1][i]._array_form, stg[-1]) yield p else: for i in basic_orbits[-1]: p = _af_rmul(u[-1][i]._array_form, stg[-1]) p1 = _af_new(p) yield p1 stg.pop() h -= 1 @property def generators(self): """Returns the generators of the group. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.generators [(1 2), (2)(0 1)] """ return self._generators def contains(self, g, strict=True): """Test if permutation ``g`` belong to self, ``G``. If ``g`` is an element of ``G`` it can be written as a product of factors drawn from the cosets of ``G``'s stabilizers. To see if ``g`` is one of the actual generators defining the group use ``G.has(g)``. If ``strict`` is not ``True``, ``g`` will be resized, if necessary, to match the size of permutations in ``self``. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation(1, 2) >>> b = Permutation(2, 3, 1) >>> G = PermutationGroup(a, b, degree=5) >>> G.contains(G[0]) # trivial check True >>> elem = Permutation([[2, 3]], size=5) >>> G.contains(elem) True >>> G.contains(Permutation(4)(0, 1, 2, 3)) False If strict is False, a permutation will be resized, if necessary: >>> H = PermutationGroup(Permutation(5)) >>> H.contains(Permutation(3)) False >>> H.contains(Permutation(3), strict=False) True To test if a given permutation is present in the group: >>> elem in G.generators False >>> G.has(elem) False See Also ======== coset_factor, sympy.core.basic.Basic.has, __contains__ """ if not isinstance(g, Permutation): return False if g.size != self.degree: if strict: return False g = Permutation(g, size=self.degree) if g in self.generators: return True return bool(self.coset_factor(g.array_form, True)) @property def is_perfect(self): """Return ``True`` if the group is perfect. A group is perfect if it equals to its derived subgroup. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation(1,2,3)(4,5) >>> b = Permutation(1,2,3,4,5) >>> G = PermutationGroup([a, b]) >>> G.is_perfect False """ if self._is_perfect is None: self._is_perfect = self == self.derived_subgroup() return self._is_perfect @property def is_abelian(self): """Test if the group is Abelian. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.is_abelian False >>> a = Permutation([0, 2, 1]) >>> G = PermutationGroup([a]) >>> G.is_abelian True """ if self._is_abelian is not None: return self._is_abelian self._is_abelian = True gens = [p._array_form for p in self.generators] for x in gens: for y in gens: if y <= x: continue if not _af_commutes_with(x, y): self._is_abelian = False return False return True def abelian_invariants(self): """ Returns the abelian invariants for the given group. Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to the direct product of finitely many nontrivial cyclic groups of prime-power order. The prime-powers that occur as the orders of the factors are uniquely determined by G. More precisely, the primes that occur in the orders of the factors in any such decomposition of ``G`` are exactly the primes that divide ``|G|`` and for any such prime ``p``, if the orders of the factors that are p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, then the orders of the factors that are p-groups in any such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``. The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken for all primes that divide ``|G|`` are called the invariants of the nontrivial group ``G`` as suggested in ([14], p. 542). Notes ===== We adopt the convention that the invariants of a trivial group are []. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.abelian_invariants() [2] >>> from sympy.combinatorics.named_groups import CyclicGroup >>> G = CyclicGroup(7) >>> G.abelian_invariants() [7] """ if self.is_trivial: return [] gns = self.generators inv = [] G = self H = G.derived_subgroup() Hgens = H.generators for p in primefactors(G.order()): ranks = [] while True: pows = [] for g in gns: elm = g**p if not H.contains(elm): pows.append(elm) K = PermutationGroup(Hgens + pows) if pows else H r = G.order()//K.order() G = K gns = pows if r == 1: break; ranks.append(multiplicity(p, r)) if ranks: pows = [1]*ranks[0] for i in ranks: for j in range(0, i): pows[j] = pows[j]*p inv.extend(pows) inv.sort() return inv def is_elementary(self, p): """Return ``True`` if the group is elementary abelian. An elementary abelian group is a finite abelian group, where every nontrivial element has order `p`, where `p` is a prime. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1]) >>> G = PermutationGroup([a]) >>> G.is_elementary(2) True >>> a = Permutation([0, 2, 1, 3]) >>> b = Permutation([3, 1, 2, 0]) >>> G = PermutationGroup([a, b]) >>> G.is_elementary(2) True >>> G.is_elementary(3) False """ return self.is_abelian and all(g.order() == p for g in self.generators) def _eval_is_alt_sym_naive(self, only_sym=False, only_alt=False): """A naive test using the group order.""" if only_sym and only_alt: raise ValueError( "Both {} and {} cannot be set to True" .format(only_sym, only_alt)) n = self.degree sym_order = 1 for i in range(2, n+1): sym_order *= i order = self.order() if order == sym_order: self._is_sym = True self._is_alt = False if only_alt: return False return True elif 2*order == sym_order: self._is_sym = False self._is_alt = True if only_sym: return False return True return False def _eval_is_alt_sym_monte_carlo(self, eps=0.05, perms=None): """A test using monte-carlo algorithm. Parameters ========== eps : float, optional The criterion for the incorrect ``False`` return. perms : list[Permutation], optional If explicitly given, it tests over the given candidats for testing. If ``None``, it randomly computes ``N_eps`` and chooses ``N_eps`` sample of the permutation from the group. See Also ======== _check_cycles_alt_sym """ if perms is None: n = self.degree if n < 17: c_n = 0.34 else: c_n = 0.57 d_n = (c_n*log(2))/log(n) N_eps = int(-log(eps)/d_n) perms = (self.random_pr() for i in range(N_eps)) return self._eval_is_alt_sym_monte_carlo(perms=perms) for perm in perms: if _check_cycles_alt_sym(perm): return True return False def is_alt_sym(self, eps=0.05, _random_prec=None): r"""Monte Carlo test for the symmetric/alternating group for degrees >= 8. More specifically, it is one-sided Monte Carlo with the answer True (i.e., G is symmetric/alternating) guaranteed to be correct, and the answer False being incorrect with probability eps. For degree < 8, the order of the group is checked so the test is deterministic. Notes ===== The algorithm itself uses some nontrivial results from group theory and number theory: 1) If a transitive group ``G`` of degree ``n`` contains an element with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the symmetric or alternating group ([1], pp. 81-82) 2) The proportion of elements in the symmetric/alternating group having the property described in 1) is approximately `\log(2)/\log(n)` ([1], p.82; [2], pp. 226-227). The helper function ``_check_cycles_alt_sym`` is used to go over the cycles in a permutation and look for ones satisfying 1). Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> D = DihedralGroup(10) >>> D.is_alt_sym() False See Also ======== _check_cycles_alt_sym """ if _random_prec is not None: N_eps = _random_prec['N_eps'] perms= (_random_prec[i] for i in range(N_eps)) return self._eval_is_alt_sym_monte_carlo(perms=perms) if self._is_sym or self._is_alt: return True if self._is_sym is False and self._is_alt is False: return False n = self.degree if n < 8: return self._eval_is_alt_sym_naive() elif self.is_transitive(): return self._eval_is_alt_sym_monte_carlo(eps=eps) self._is_sym, self._is_alt = False, False return False @property def is_nilpotent(self): """Test if the group is nilpotent. A group `G` is nilpotent if it has a central series of finite length. Alternatively, `G` is nilpotent if its lower central series terminates with the trivial group. Every nilpotent group is also solvable ([1], p.29, [12]). Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... CyclicGroup) >>> C = CyclicGroup(6) >>> C.is_nilpotent True >>> S = SymmetricGroup(5) >>> S.is_nilpotent False See Also ======== lower_central_series, is_solvable """ if self._is_nilpotent is None: lcs = self.lower_central_series() terminator = lcs[len(lcs) - 1] gens = terminator.generators degree = self.degree identity = _af_new(list(range(degree))) if all(g == identity for g in gens): self._is_solvable = True self._is_nilpotent = True return True else: self._is_nilpotent = False return False else: return self._is_nilpotent def is_normal(self, gr, strict=True): """Test if ``G=self`` is a normal subgroup of ``gr``. G is normal in gr if for each g2 in G, g1 in gr, ``g = g1*g2*g1**-1`` belongs to G It is sufficient to check this for each g1 in gr.generators and g2 in G.generators. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([1, 2, 0]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G1 = PermutationGroup([a, Permutation([2, 0, 1])]) >>> G1.is_normal(G) True """ if not self.is_subgroup(gr, strict=strict): return False d_self = self.degree d_gr = gr.degree if self.is_trivial and (d_self == d_gr or not strict): return True if self._is_abelian: return True new_self = self.copy() if not strict and d_self != d_gr: if d_self < d_gr: new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)]) else: gr = PermGroup(gr.generators + [Permutation(d_self - 1)]) gens2 = [p._array_form for p in new_self.generators] gens1 = [p._array_form for p in gr.generators] for g1 in gens1: for g2 in gens2: p = _af_rmuln(g1, g2, _af_invert(g1)) if not new_self.coset_factor(p, True): return False return True def is_primitive(self, randomized=True): r"""Test if a group is primitive. A permutation group ``G`` acting on a set ``S`` is called primitive if ``S`` contains no nontrivial block under the action of ``G`` (a block is nontrivial if its cardinality is more than ``1``). Notes ===== The algorithm is described in [1], p.83, and uses the function minimal_block to search for blocks of the form `\{0, k\}` for ``k`` ranging over representatives for the orbits of `G_0`, the stabilizer of ``0``. This algorithm has complexity `O(n^2)` where ``n`` is the degree of the group, and will perform badly if `G_0` is small. There are two implementations offered: one finds `G_0` deterministically using the function ``stabilizer``, and the other (default) produces random elements of `G_0` using ``random_stab``, hoping that they generate a subgroup of `G_0` with not too many more orbits than `G_0` (this is suggested in [1], p.83). Behavior is changed by the ``randomized`` flag. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> D = DihedralGroup(10) >>> D.is_primitive() False See Also ======== minimal_block, random_stab """ if self._is_primitive is not None: return self._is_primitive if self.is_transitive() is False: return False if randomized: random_stab_gens = [] v = self.schreier_vector(0) for i in range(len(self)): random_stab_gens.append(self.random_stab(0, v)) stab = PermutationGroup(random_stab_gens) else: stab = self.stabilizer(0) orbits = stab.orbits() for orb in orbits: x = orb.pop() if x != 0 and any(e != 0 for e in self.minimal_block([0, x])): self._is_primitive = False return False self._is_primitive = True return True def minimal_blocks(self, randomized=True): ''' For a transitive group, return the list of all minimal block systems. If a group is intransitive, return `False`. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> DihedralGroup(6).minimal_blocks() [[0, 1, 0, 1, 0, 1], [0, 1, 2, 0, 1, 2]] >>> G = PermutationGroup(Permutation(1,2,5)) >>> G.minimal_blocks() False See Also ======== minimal_block, is_transitive, is_primitive ''' def _number_blocks(blocks): # number the blocks of a block system # in order and return the number of # blocks and the tuple with the # reordering n = len(blocks) appeared = {} m = 0 b = [None]*n for i in range(n): if blocks[i] not in appeared: appeared[blocks[i]] = m b[i] = m m += 1 else: b[i] = appeared[blocks[i]] return tuple(b), m if not self.is_transitive(): return False blocks = [] num_blocks = [] rep_blocks = [] if randomized: random_stab_gens = [] v = self.schreier_vector(0) for i in range(len(self)): random_stab_gens.append(self.random_stab(0, v)) stab = PermutationGroup(random_stab_gens) else: stab = self.stabilizer(0) orbits = stab.orbits() for orb in orbits: x = orb.pop() if x != 0: block = self.minimal_block([0, x]) num_block, m = _number_blocks(block) # a representative block (containing 0) rep = set(j for j in range(self.degree) if num_block[j] == 0) # check if the system is minimal with # respect to the already discovere ones minimal = True to_remove = [] for i, r in enumerate(rep_blocks): if len(r) > len(rep) and rep.issubset(r): # i-th block system is not minimal del num_blocks[i], blocks[i] to_remove.append(rep_blocks[i]) elif len(r) < len(rep) and r.issubset(rep): # the system being checked is not minimal minimal = False break # remove non-minimal representative blocks rep_blocks = [r for r in rep_blocks if r not in to_remove] if minimal and num_block not in num_blocks: blocks.append(block) num_blocks.append(num_block) rep_blocks.append(rep) return blocks @property def is_solvable(self): """Test if the group is solvable. ``G`` is solvable if its derived series terminates with the trivial group ([1], p.29). Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> S = SymmetricGroup(3) >>> S.is_solvable True See Also ======== is_nilpotent, derived_series """ if self._is_solvable is None: if self.order() % 2 != 0: return True ds = self.derived_series() terminator = ds[len(ds) - 1] gens = terminator.generators degree = self.degree identity = _af_new(list(range(degree))) if all(g == identity for g in gens): self._is_solvable = True return True else: self._is_solvable = False return False else: return self._is_solvable def is_subgroup(self, G, strict=True): """Return ``True`` if all elements of ``self`` belong to ``G``. If ``strict`` is ``False`` then if ``self``'s degree is smaller than ``G``'s, the elements will be resized to have the same degree. Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... CyclicGroup) Testing is strict by default: the degree of each group must be the same: >>> p = Permutation(0, 1, 2, 3, 4, 5) >>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)]) >>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)]) >>> G3 = PermutationGroup([p, p**2]) >>> assert G1.order() == G2.order() == G3.order() == 6 >>> G1.is_subgroup(G2) True >>> G1.is_subgroup(G3) False >>> G3.is_subgroup(PermutationGroup(G3[1])) False >>> G3.is_subgroup(PermutationGroup(G3[0])) True To ignore the size, set ``strict`` to ``False``: >>> S3 = SymmetricGroup(3) >>> S5 = SymmetricGroup(5) >>> S3.is_subgroup(S5, strict=False) True >>> C7 = CyclicGroup(7) >>> G = S5*C7 >>> S5.is_subgroup(G, False) True >>> C7.is_subgroup(G, 0) False """ if not isinstance(G, PermutationGroup): return False if self == G or self.generators[0]==Permutation(): return True if G.order() % self.order() != 0: return False if self.degree == G.degree or \ (self.degree < G.degree and not strict): gens = self.generators else: return False return all(G.contains(g, strict=strict) for g in gens) @property def is_polycyclic(self): """Return ``True`` if a group is polycyclic. A group is polycyclic if it has a subnormal series with cyclic factors. For finite groups, this is the same as if the group is solvable. Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> a = Permutation([0, 2, 1, 3]) >>> b = Permutation([2, 0, 1, 3]) >>> G = PermutationGroup([a, b]) >>> G.is_polycyclic True """ return self.is_solvable def is_transitive(self, strict=True): """Test if the group is transitive. A group is transitive if it has a single orbit. If ``strict`` is ``False`` the group is transitive if it has a single orbit of length different from 1. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1, 3]) >>> b = Permutation([2, 0, 1, 3]) >>> G1 = PermutationGroup([a, b]) >>> G1.is_transitive() False >>> G1.is_transitive(strict=False) True >>> c = Permutation([2, 3, 0, 1]) >>> G2 = PermutationGroup([a, c]) >>> G2.is_transitive() True >>> d = Permutation([1, 0, 2, 3]) >>> e = Permutation([0, 1, 3, 2]) >>> G3 = PermutationGroup([d, e]) >>> G3.is_transitive() or G3.is_transitive(strict=False) False """ if self._is_transitive: # strict or not, if True then True return self._is_transitive if strict: if self._is_transitive is not None: # we only store strict=True return self._is_transitive ans = len(self.orbit(0)) == self.degree self._is_transitive = ans return ans got_orb = False for x in self.orbits(): if len(x) > 1: if got_orb: return False got_orb = True return got_orb @property def is_trivial(self): """Test if the group is the trivial group. This is true if the group contains only the identity permutation. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> G = PermutationGroup([Permutation([0, 1, 2])]) >>> G.is_trivial True """ if self._is_trivial is None: self._is_trivial = len(self) == 1 and self[0].is_Identity return self._is_trivial def lower_central_series(self): r"""Return the lower central series for the group. The lower central series for a group `G` is the series `G = G_0 > G_1 > G_2 > \ldots` where `G_k = [G, G_{k-1}]`, i.e. every term after the first is equal to the commutator of `G` and the previous term in `G1` ([1], p.29). Returns ======= A list of permutation groups in the order `G = G_0, G_1, G_2, \ldots` Examples ======== >>> from sympy.combinatorics.named_groups import (AlternatingGroup, ... DihedralGroup) >>> A = AlternatingGroup(4) >>> len(A.lower_central_series()) 2 >>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2)) True See Also ======== commutator, derived_series """ res = [self] current = self next = self.commutator(self, current) while not current.is_subgroup(next): res.append(next) current = next next = self.commutator(self, current) return res @property def max_div(self): """Maximum proper divisor of the degree of a permutation group. Notes ===== Obviously, this is the degree divided by its minimal proper divisor (larger than ``1``, if one exists). As it is guaranteed to be prime, the ``sieve`` from ``sympy.ntheory`` is used. This function is also used as an optimization tool for the functions ``minimal_block`` and ``_union_find_merge``. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> G = PermutationGroup([Permutation([0, 2, 1, 3])]) >>> G.max_div 2 See Also ======== minimal_block, _union_find_merge """ if self._max_div is not None: return self._max_div n = self.degree if n == 1: return 1 for x in sieve: if n % x == 0: d = n//x self._max_div = d return d def minimal_block(self, points): r"""For a transitive group, finds the block system generated by ``points``. If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S`` is called a block under the action of ``G`` if for all ``g`` in ``G`` we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no common points (``g`` moves ``B`` entirely). ([1], p.23; [6]). The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G`` partition the set ``S`` and this set of translates is known as a block system. Moreover, we obviously have that all blocks in the partition have the same size, hence the block size divides ``|S|`` ([1], p.23). A ``G``-congruence is an equivalence relation ``~`` on the set ``S`` such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``. For a transitive group, the equivalence classes of a ``G``-congruence and the blocks of a block system are the same thing ([1], p.23). The algorithm below checks the group for transitivity, and then finds the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2), ..., (p_0,p_{k-1})`` which is the same as finding the maximal block system (i.e., the one with minimum block size) such that ``p_0, ..., p_{k-1}`` are in the same block ([1], p.83). It is an implementation of Atkinson's algorithm, as suggested in [1], and manipulates an equivalence relation on the set ``S`` using a union-find data structure. The running time is just above `O(|points||S|)`. ([1], pp. 83-87; [7]). Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> D = DihedralGroup(10) >>> D.minimal_block([0, 5]) [0, 1, 2, 3, 4, 0, 1, 2, 3, 4] >>> D.minimal_block([0, 1]) [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] See Also ======== _union_find_rep, _union_find_merge, is_transitive, is_primitive """ if not self.is_transitive(): return False n = self.degree gens = self.generators # initialize the list of equivalence class representatives parents = list(range(n)) ranks = [1]*n not_rep = [] k = len(points) # the block size must divide the degree of the group if k > self.max_div: return [0]*n for i in range(k - 1): parents[points[i + 1]] = points[0] not_rep.append(points[i + 1]) ranks[points[0]] = k i = 0 len_not_rep = k - 1 while i < len_not_rep: gamma = not_rep[i] i += 1 for gen in gens: # find has side effects: performs path compression on the list # of representatives delta = self._union_find_rep(gamma, parents) # union has side effects: performs union by rank on the list # of representatives temp = self._union_find_merge(gen(gamma), gen(delta), ranks, parents, not_rep) if temp == -1: return [0]*n len_not_rep += temp for i in range(n): # force path compression to get the final state of the equivalence # relation self._union_find_rep(i, parents) # rewrite result so that block representatives are minimal new_reps = {} return [new_reps.setdefault(r, i) for i, r in enumerate(parents)] def conjugacy_class(self, x): r"""Return the conjugacy class of an element in the group. The conjugacy class of an element ``g`` in a group ``G`` is the set of elements ``x`` in ``G`` that are conjugate with ``g``, i.e. for which ``g = xax^{-1}`` for some ``a`` in ``G``. Note that conjugacy is an equivalence relation, and therefore that conjugacy classes are partitions of ``G``. For a list of all the conjugacy classes of the group, use the conjugacy_classes() method. In a permutation group, each conjugacy class corresponds to a particular `cycle structure': for example, in ``S_3``, the conjugacy classes are: * the identity class, ``{()}`` * all transpositions, ``{(1 2), (1 3), (2 3)}`` * all 3-cycles, ``{(1 2 3), (1 3 2)}`` Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> S3 = SymmetricGroup(3) >>> S3.conjugacy_class(Permutation(0, 1, 2)) {(0 1 2), (0 2 1)} Notes ===== This procedure computes the conjugacy class directly by finding the orbit of the element under conjugation in G. This algorithm is only feasible for permutation groups of relatively small order, but is like the orbit() function itself in that respect. """ # Ref: "Computing the conjugacy classes of finite groups"; Butler, G. # Groups '93 Galway/St Andrews; edited by Campbell, C. M. new_class = set([x]) last_iteration = new_class while len(last_iteration) > 0: this_iteration = set() for y in last_iteration: for s in self.generators: conjugated = s * y * (~s) if conjugated not in new_class: this_iteration.add(conjugated) new_class.update(last_iteration) last_iteration = this_iteration return new_class def conjugacy_classes(self): r"""Return the conjugacy classes of the group. As described in the documentation for the .conjugacy_class() function, conjugacy is an equivalence relation on a group G which partitions the set of elements. This method returns a list of all these conjugacy classes of G. Examples ======== >>> from sympy.combinatorics import SymmetricGroup >>> SymmetricGroup(3).conjugacy_classes() [{(2)}, {(0 1 2), (0 2 1)}, {(0 2), (1 2), (2)(0 1)}] """ identity = _af_new(list(range(self.degree))) known_elements = set([identity]) classes = [known_elements.copy()] for x in self.generate(): if x not in known_elements: new_class = self.conjugacy_class(x) classes.append(new_class) known_elements.update(new_class) return classes def normal_closure(self, other, k=10): r"""Return the normal closure of a subgroup/set of permutations. If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G`` is defined as the intersection of all normal subgroups of ``G`` that contain ``A`` ([1], p.14). Alternatively, it is the group generated by the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a generator of the subgroup ``\left\langle S\right\rangle`` generated by ``S`` (for some chosen generating set for ``\left\langle S\right\rangle``) ([1], p.73). Parameters ========== other a subgroup/list of permutations/single permutation k an implementation-specific parameter that determines the number of conjugates that are adjoined to ``other`` at once Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... CyclicGroup, AlternatingGroup) >>> S = SymmetricGroup(5) >>> C = CyclicGroup(5) >>> G = S.normal_closure(C) >>> G.order() 60 >>> G.is_subgroup(AlternatingGroup(5)) True See Also ======== commutator, derived_subgroup, random_pr Notes ===== The algorithm is described in [1], pp. 73-74; it makes use of the generation of random elements for permutation groups by the product replacement algorithm. """ if hasattr(other, 'generators'): degree = self.degree identity = _af_new(list(range(degree))) if all(g == identity for g in other.generators): return other Z = PermutationGroup(other.generators[:]) base, strong_gens = Z.schreier_sims_incremental() strong_gens_distr = _distribute_gens_by_base(base, strong_gens) basic_orbits, basic_transversals = \ _orbits_transversals_from_bsgs(base, strong_gens_distr) self._random_pr_init(r=10, n=20) _loop = True while _loop: Z._random_pr_init(r=10, n=10) for i in range(k): g = self.random_pr() h = Z.random_pr() conj = h^g res = _strip(conj, base, basic_orbits, basic_transversals) if res[0] != identity or res[1] != len(base) + 1: gens = Z.generators gens.append(conj) Z = PermutationGroup(gens) strong_gens.append(conj) temp_base, temp_strong_gens = \ Z.schreier_sims_incremental(base, strong_gens) base, strong_gens = temp_base, temp_strong_gens strong_gens_distr = \ _distribute_gens_by_base(base, strong_gens) basic_orbits, basic_transversals = \ _orbits_transversals_from_bsgs(base, strong_gens_distr) _loop = False for g in self.generators: for h in Z.generators: conj = h^g res = _strip(conj, base, basic_orbits, basic_transversals) if res[0] != identity or res[1] != len(base) + 1: _loop = True break if _loop: break return Z elif hasattr(other, '__getitem__'): return self.normal_closure(PermutationGroup(other)) elif hasattr(other, 'array_form'): return self.normal_closure(PermutationGroup([other])) def orbit(self, alpha, action='tuples'): r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set. The time complexity of the algorithm used here is `O(|Orb|*r)` where `|Orb|` is the size of the orbit and ``r`` is the number of generators of the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21. Here alpha can be a single point, or a list of points. If alpha is a single point, the ordinary orbit is computed. if alpha is a list of points, there are three available options: 'union' - computes the union of the orbits of the points in the list 'tuples' - computes the orbit of the list interpreted as an ordered tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) ) 'sets' - computes the orbit of the list interpreted as a sets Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([1, 2, 0, 4, 5, 6, 3]) >>> G = PermutationGroup([a]) >>> G.orbit(0) {0, 1, 2} >>> G.orbit([0, 4], 'union') {0, 1, 2, 3, 4, 5, 6} See Also ======== orbit_transversal """ return _orbit(self.degree, self.generators, alpha, action) def orbit_rep(self, alpha, beta, schreier_vector=None): """Return a group element which sends ``alpha`` to ``beta``. If ``beta`` is not in the orbit of ``alpha``, the function returns ``False``. This implementation makes use of the schreier vector. For a proof of correctness, see [1], p.80 Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> G = AlternatingGroup(5) >>> G.orbit_rep(0, 4) (0 4 1 2 3) See Also ======== schreier_vector """ if schreier_vector is None: schreier_vector = self.schreier_vector(alpha) if schreier_vector[beta] is None: return False k = schreier_vector[beta] gens = [x._array_form for x in self.generators] a = [] while k != -1: a.append(gens[k]) beta = gens[k].index(beta) # beta = (~gens[k])(beta) k = schreier_vector[beta] if a: return _af_new(_af_rmuln(*a)) else: return _af_new(list(range(self._degree))) def orbit_transversal(self, alpha, pairs=False): r"""Computes a transversal for the orbit of ``alpha`` as a set. For a permutation group `G`, a transversal for the orbit `Orb = \{g(\alpha) | g \in G\}` is a set `\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`. Note that there may be more than one possible transversal. If ``pairs`` is set to ``True``, it returns the list of pairs `(\beta, g_\beta)`. For a proof of correctness, see [1], p.79 Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> G = DihedralGroup(6) >>> G.orbit_transversal(0) [(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)] See Also ======== orbit """ return _orbit_transversal(self._degree, self.generators, alpha, pairs) def orbits(self, rep=False): """Return the orbits of ``self``, ordered according to lowest element in each orbit. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation(1, 5)(2, 3)(4, 0, 6) >>> b = Permutation(1, 5)(3, 4)(2, 6, 0) >>> G = PermutationGroup([a, b]) >>> G.orbits() [{0, 2, 3, 4, 6}, {1, 5}] """ return _orbits(self._degree, self._generators) def order(self): """Return the order of the group: the number of permutations that can be generated from elements of the group. The number of permutations comprising the group is given by ``len(group)``; the length of each permutation in the group is given by ``group.size``. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([1, 0, 2]) >>> G = PermutationGroup([a]) >>> G.degree 3 >>> len(G) 1 >>> G.order() 2 >>> list(G.generate()) [(2), (2)(0 1)] >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.order() 6 See Also ======== degree """ if self._order is not None: return self._order if self._is_sym: n = self._degree self._order = factorial(n) return self._order if self._is_alt: n = self._degree self._order = factorial(n)/2 return self._order basic_transversals = self.basic_transversals m = 1 for x in basic_transversals: m *= len(x) self._order = m return m def index(self, H): """ Returns the index of a permutation group. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation(1,2,3) >>> b =Permutation(3) >>> G = PermutationGroup([a]) >>> H = PermutationGroup([b]) >>> G.index(H) 3 """ if H.is_subgroup(self): return self.order()//H.order() @property def is_symmetric(self): """Return ``True`` if the group is symmetric. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> g = SymmetricGroup(5) >>> g.is_symmetric True >>> from sympy.combinatorics import Permutation, PermutationGroup >>> g = PermutationGroup( ... Permutation(0, 1, 2, 3, 4), ... Permutation(2, 3)) >>> g.is_symmetric True Notes ===== This uses a naive test involving the computation of the full group order. If you need more quicker taxonomy for large groups, you can use :meth:`PermutationGroup.is_alt_sym`. However, :meth:`PermutationGroup.is_alt_sym` may not be accurate and is not able to distinguish between an alternating group and a symmetric group. See Also ======== is_alt_sym """ _is_sym = self._is_sym if _is_sym is not None: return _is_sym n = self.degree if n >= 8: if self.is_transitive(): _is_alt_sym = self._eval_is_alt_sym_monte_carlo() if _is_alt_sym: if any(g.is_odd for g in self.generators): self._is_sym, self._is_alt = True, False return True self._is_sym, self._is_alt = False, True return False return self._eval_is_alt_sym_naive(only_sym=True) self._is_sym, self._is_alt = False, False return False return self._eval_is_alt_sym_naive(only_sym=True) @property def is_alternating(self): """Return ``True`` if the group is alternating. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> g = AlternatingGroup(5) >>> g.is_alternating True >>> from sympy.combinatorics import Permutation, PermutationGroup >>> g = PermutationGroup( ... Permutation(0, 1, 2, 3, 4), ... Permutation(2, 3, 4)) >>> g.is_alternating True Notes ===== This uses a naive test involving the computation of the full group order. If you need more quicker taxonomy for large groups, you can use :meth:`PermutationGroup.is_alt_sym`. However, :meth:`PermutationGroup.is_alt_sym` may not be accurate and is not able to distinguish between an alternating group and a symmetric group. See Also ======== is_alt_sym """ _is_alt = self._is_alt if _is_alt is not None: return _is_alt n = self.degree if n >= 8: if self.is_transitive(): _is_alt_sym = self._eval_is_alt_sym_monte_carlo() if _is_alt_sym: if all(g.is_even for g in self.generators): self._is_sym, self._is_alt = False, True return True self._is_sym, self._is_alt = True, False return False return self._eval_is_alt_sym_naive(only_alt=True) self._is_sym, self._is_alt = False, False return False return self._eval_is_alt_sym_naive(only_alt=True) @classmethod def _distinct_primes_lemma(cls, primes): """Subroutine to test if there is only one cyclic group for the order.""" primes = sorted(primes) l = len(primes) for i in range(l): for j in range(i+1, l): if primes[j] % primes[i] == 1: return None return True @property def is_cyclic(self): r""" Return ``True`` if the group is Cyclic. Examples ======== >>> from sympy.combinatorics.named_groups import AbelianGroup >>> G = AbelianGroup(3, 4) >>> G.is_cyclic True >>> G = AbelianGroup(4, 4) >>> G.is_cyclic False Notes ===== If the order of a group $n$ can be factored into the distinct primes $p_1, p_2, ... , p_s$ and if .. math:: \forall i, j \in \{1, 2, \ldots, s \}: p_i \not \equiv 1 \pmod {p_j} holds true, there is only one group of the order $n$ which is a cyclic group. [1]_ This is a generalization of the lemma that the group of order $15, 35, ...$ are cyclic. And also, these additional lemmas can be used to test if a group is cyclic if the order of the group is already found. - If the group is abelian and the order of the group is square-free, the group is cyclic. - If the order of the group is less than $6$ and is not $4$, the group is cyclic. - If the order of the group is prime, the group is cyclic. References ========== .. [1] 1978: John S. Rose: A Course on Group Theory, Introduction to Finite Group Theory: 1.4 """ if self._is_cyclic is not None: return self._is_cyclic if len(self.generators) == 1: self._is_cyclic = True self._is_abelian = True return True if self._is_abelian is False: self._is_cyclic = False return False order = self.order() if order < 6: self._is_abelian == True if order != 4: self._is_cyclic == True return True factors = factorint(order) if all(v == 1 for v in factors.values()): if self._is_abelian: self._is_cyclic = True return True primes = list(factors.keys()) if PermutationGroup._distinct_primes_lemma(primes) is True: self._is_cyclic = True self._is_abelian = True return True for p in factors: pgens = [] for g in self.generators: pgens.append(g**p) if self.index(self.subgroup(pgens)) != p: self._is_cyclic = False return False self._is_cyclic = True self._is_abelian = True return True def pointwise_stabilizer(self, points, incremental=True): r"""Return the pointwise stabilizer for a set of points. For a permutation group `G` and a set of points `\{p_1, p_2,\ldots, p_k\}`, the pointwise stabilizer of `p_1, p_2, \ldots, p_k` is defined as `G_{p_1,\ldots, p_k} = \{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\}` ([1],p20). It is a subgroup of `G`. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> S = SymmetricGroup(7) >>> Stab = S.pointwise_stabilizer([2, 3, 5]) >>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5)) True See Also ======== stabilizer, schreier_sims_incremental Notes ===== When incremental == True, rather than the obvious implementation using successive calls to ``.stabilizer()``, this uses the incremental Schreier-Sims algorithm to obtain a base with starting segment - the given points. """ if incremental: base, strong_gens = self.schreier_sims_incremental(base=points) stab_gens = [] degree = self.degree for gen in strong_gens: if [gen(point) for point in points] == points: stab_gens.append(gen) if not stab_gens: stab_gens = _af_new(list(range(degree))) return PermutationGroup(stab_gens) else: gens = self._generators degree = self.degree for x in points: gens = _stabilizer(degree, gens, x) return PermutationGroup(gens) def make_perm(self, n, seed=None): """ Multiply ``n`` randomly selected permutations from pgroup together, starting with the identity permutation. If ``n`` is a list of integers, those integers will be used to select the permutations and they will be applied in L to R order: make_perm((A, B, C)) will give CBA(I) where I is the identity permutation. ``seed`` is used to set the seed for the random selection of permutations from pgroup. If this is a list of integers, the corresponding permutations from pgroup will be selected in the order give. This is mainly used for testing purposes. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])] >>> G = PermutationGroup([a, b]) >>> G.make_perm(1, [0]) (0 1)(2 3) >>> G.make_perm(3, [0, 1, 0]) (0 2 3 1) >>> G.make_perm([0, 1, 0]) (0 2 3 1) See Also ======== random """ if is_sequence(n): if seed is not None: raise ValueError('If n is a sequence, seed should be None') n, seed = len(n), n else: try: n = int(n) except TypeError: raise ValueError('n must be an integer or a sequence.') randrange = _randrange(seed) # start with the identity permutation result = Permutation(list(range(self.degree))) m = len(self) for i in range(n): p = self[randrange(m)] result = rmul(result, p) return result def random(self, af=False): """Return a random group element """ rank = randrange(self.order()) return self.coset_unrank(rank, af) def random_pr(self, gen_count=11, iterations=50, _random_prec=None): """Return a random group element using product replacement. For the details of the product replacement algorithm, see ``_random_pr_init`` In ``random_pr`` the actual 'product replacement' is performed. Notice that if the attribute ``_random_gens`` is empty, it needs to be initialized by ``_random_pr_init``. See Also ======== _random_pr_init """ if self._random_gens == []: self._random_pr_init(gen_count, iterations) random_gens = self._random_gens r = len(random_gens) - 1 # handle randomized input for testing purposes if _random_prec is None: s = randrange(r) t = randrange(r - 1) if t == s: t = r - 1 x = choice([1, 2]) e = choice([-1, 1]) else: s = _random_prec['s'] t = _random_prec['t'] if t == s: t = r - 1 x = _random_prec['x'] e = _random_prec['e'] if x == 1: random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e)) random_gens[r] = _af_rmul(random_gens[r], random_gens[s]) else: random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s]) random_gens[r] = _af_rmul(random_gens[s], random_gens[r]) return _af_new(random_gens[r]) def random_stab(self, alpha, schreier_vector=None, _random_prec=None): """Random element from the stabilizer of ``alpha``. The schreier vector for ``alpha`` is an optional argument used for speeding up repeated calls. The algorithm is described in [1], p.81 See Also ======== random_pr, orbit_rep """ if schreier_vector is None: schreier_vector = self.schreier_vector(alpha) if _random_prec is None: rand = self.random_pr() else: rand = _random_prec['rand'] beta = rand(alpha) h = self.orbit_rep(alpha, beta, schreier_vector) return rmul(~h, rand) def schreier_sims(self): """Schreier-Sims algorithm. It computes the generators of the chain of stabilizers `G > G_{b_1} > .. > G_{b1,..,b_r} > 1` in which `G_{b_1,..,b_i}` stabilizes `b_1,..,b_i`, and the corresponding ``s`` cosets. An element of the group can be written as the product `h_1*..*h_s`. We use the incremental Schreier-Sims algorithm. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.schreier_sims() >>> G.basic_transversals [{0: (2)(0 1), 1: (2), 2: (1 2)}, {0: (2), 2: (0 2)}] """ if self._transversals: return self._schreier_sims() return def _schreier_sims(self, base=None): schreier = self.schreier_sims_incremental(base=base, slp_dict=True) base, strong_gens = schreier[:2] self._base = base self._strong_gens = strong_gens self._strong_gens_slp = schreier[2] if not base: self._transversals = [] self._basic_orbits = [] return strong_gens_distr = _distribute_gens_by_base(base, strong_gens) basic_orbits, transversals, slps = _orbits_transversals_from_bsgs(base,\ strong_gens_distr, slp=True) # rewrite the indices stored in slps in terms of strong_gens for i, slp in enumerate(slps): gens = strong_gens_distr[i] for k in slp: slp[k] = [strong_gens.index(gens[s]) for s in slp[k]] self._transversals = transversals self._basic_orbits = [sorted(x) for x in basic_orbits] self._transversal_slp = slps def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False): """Extend a sequence of points and generating set to a base and strong generating set. Parameters ========== base The sequence of points to be extended to a base. Optional parameter with default value ``[]``. gens The generating set to be extended to a strong generating set relative to the base obtained. Optional parameter with default value ``self.generators``. slp_dict If `True`, return a dictionary `{g: gens}` for each strong generator `g` where `gens` is a list of strong generators coming before `g` in `strong_gens`, such that the product of the elements of `gens` is equal to `g`. Returns ======= (base, strong_gens) ``base`` is the base obtained, and ``strong_gens`` is the strong generating set relative to it. The original parameters ``base``, ``gens`` remain unchanged. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> A = AlternatingGroup(7) >>> base = [2, 3] >>> seq = [2, 3] >>> base, strong_gens = A.schreier_sims_incremental(base=seq) >>> _verify_bsgs(A, base, strong_gens) True >>> base[:2] [2, 3] Notes ===== This version of the Schreier-Sims algorithm runs in polynomial time. There are certain assumptions in the implementation - if the trivial group is provided, ``base`` and ``gens`` are returned immediately, as any sequence of points is a base for the trivial group. If the identity is present in the generators ``gens``, it is removed as it is a redundant generator. The implementation is described in [1], pp. 90-93. See Also ======== schreier_sims, schreier_sims_random """ if base is None: base = [] if gens is None: gens = self.generators[:] degree = self.degree id_af = list(range(degree)) # handle the trivial group if len(gens) == 1 and gens[0].is_Identity: if slp_dict: return base, gens, {gens[0]: [gens[0]]} return base, gens # prevent side effects _base, _gens = base[:], gens[:] # remove the identity as a generator _gens = [x for x in _gens if not x.is_Identity] # make sure no generator fixes all base points for gen in _gens: if all(x == gen._array_form[x] for x in _base): for new in id_af: if gen._array_form[new] != new: break else: assert None # can this ever happen? _base.append(new) # distribute generators according to basic stabilizers strong_gens_distr = _distribute_gens_by_base(_base, _gens) strong_gens_slp = [] # initialize the basic stabilizers, basic orbits and basic transversals orbs = {} transversals = {} slps = {} base_len = len(_base) for i in range(base_len): transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i], _base[i], pairs=True, af=True, slp=True) transversals[i] = dict(transversals[i]) orbs[i] = list(transversals[i].keys()) # main loop: amend the stabilizer chain until we have generators # for all stabilizers i = base_len - 1 while i >= 0: # this flag is used to continue with the main loop from inside # a nested loop continue_i = False # test the generators for being a strong generating set db = {} for beta, u_beta in list(transversals[i].items()): for j, gen in enumerate(strong_gens_distr[i]): gb = gen._array_form[beta] u1 = transversals[i][gb] g1 = _af_rmul(gen._array_form, u_beta) slp = [(i, g) for g in slps[i][beta]] slp = [(i, j)] + slp if g1 != u1: # test if the schreier generator is in the i+1-th # would-be basic stabilizer y = True try: u1_inv = db[gb] except KeyError: u1_inv = db[gb] = _af_invert(u1) schreier_gen = _af_rmul(u1_inv, g1) u1_inv_slp = slps[i][gb][:] u1_inv_slp.reverse() u1_inv_slp = [(i, (g,)) for g in u1_inv_slp] slp = u1_inv_slp + slp h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps) if j <= base_len: # new strong generator h at level j y = False elif h: # h fixes all base points y = False moved = 0 while h[moved] == moved: moved += 1 _base.append(moved) base_len += 1 strong_gens_distr.append([]) if y is False: # if a new strong generator is found, update the # data structures and start over h = _af_new(h) strong_gens_slp.append((h, slp)) for l in range(i + 1, j): strong_gens_distr[l].append(h) transversals[l], slps[l] =\ _orbit_transversal(degree, strong_gens_distr[l], _base[l], pairs=True, af=True, slp=True) transversals[l] = dict(transversals[l]) orbs[l] = list(transversals[l].keys()) i = j - 1 # continue main loop using the flag continue_i = True if continue_i is True: break if continue_i is True: break if continue_i is True: continue i -= 1 strong_gens = _gens[:] if slp_dict: # create the list of the strong generators strong_gens and # rewrite the indices of strong_gens_slp in terms of the # elements of strong_gens for k, slp in strong_gens_slp: strong_gens.append(k) for i in range(len(slp)): s = slp[i] if isinstance(s[1], tuple): slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1 else: slp[i] = strong_gens_distr[s[0]][s[1]] strong_gens_slp = dict(strong_gens_slp) # add the original generators for g in _gens: strong_gens_slp[g] = [g] return (_base, strong_gens, strong_gens_slp) strong_gens.extend([k for k, _ in strong_gens_slp]) return _base, strong_gens def schreier_sims_random(self, base=None, gens=None, consec_succ=10, _random_prec=None): r"""Randomized Schreier-Sims algorithm. The randomized Schreier-Sims algorithm takes the sequence ``base`` and the generating set ``gens``, and extends ``base`` to a base, and ``gens`` to a strong generating set relative to that base with probability of a wrong answer at most `2^{-consec\_succ}`, provided the random generators are sufficiently random. Parameters ========== base The sequence to be extended to a base. gens The generating set to be extended to a strong generating set. consec_succ The parameter defining the probability of a wrong answer. _random_prec An internal parameter used for testing purposes. Returns ======= (base, strong_gens) ``base`` is the base and ``strong_gens`` is the strong generating set relative to it. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> S = SymmetricGroup(5) >>> base, strong_gens = S.schreier_sims_random(consec_succ=5) >>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP True Notes ===== The algorithm is described in detail in [1], pp. 97-98. It extends the orbits ``orbs`` and the permutation groups ``stabs`` to basic orbits and basic stabilizers for the base and strong generating set produced in the end. The idea of the extension process is to "sift" random group elements through the stabilizer chain and amend the stabilizers/orbits along the way when a sift is not successful. The helper function ``_strip`` is used to attempt to decompose a random group element according to the current state of the stabilizer chain and report whether the element was fully decomposed (successful sift) or not (unsuccessful sift). In the latter case, the level at which the sift failed is reported and used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly. The halting condition is for ``consec_succ`` consecutive successful sifts to pass. This makes sure that the current ``base`` and ``gens`` form a BSGS with probability at least `1 - 1/\text{consec\_succ}`. See Also ======== schreier_sims """ if base is None: base = [] if gens is None: gens = self.generators base_len = len(base) n = self.degree # make sure no generator fixes all base points for gen in gens: if all(gen(x) == x for x in base): new = 0 while gen._array_form[new] == new: new += 1 base.append(new) base_len += 1 # distribute generators according to basic stabilizers strong_gens_distr = _distribute_gens_by_base(base, gens) # initialize the basic stabilizers, basic transversals and basic orbits transversals = {} orbs = {} for i in range(base_len): transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i], base[i], pairs=True)) orbs[i] = list(transversals[i].keys()) # initialize the number of consecutive elements sifted c = 0 # start sifting random elements while the number of consecutive sifts # is less than consec_succ while c < consec_succ: if _random_prec is None: g = self.random_pr() else: g = _random_prec['g'].pop() h, j = _strip(g, base, orbs, transversals) y = True # determine whether a new base point is needed if j <= base_len: y = False elif not h.is_Identity: y = False moved = 0 while h(moved) == moved: moved += 1 base.append(moved) base_len += 1 strong_gens_distr.append([]) # if the element doesn't sift, amend the strong generators and # associated stabilizers and orbits if y is False: for l in range(1, j): strong_gens_distr[l].append(h) transversals[l] = dict(_orbit_transversal(n, strong_gens_distr[l], base[l], pairs=True)) orbs[l] = list(transversals[l].keys()) c = 0 else: c += 1 # build the strong generating set strong_gens = strong_gens_distr[0][:] for gen in strong_gens_distr[1]: if gen not in strong_gens: strong_gens.append(gen) return base, strong_gens def schreier_vector(self, alpha): """Computes the schreier vector for ``alpha``. The Schreier vector efficiently stores information about the orbit of ``alpha``. It can later be used to quickly obtain elements of the group that send ``alpha`` to a particular element in the orbit. Notice that the Schreier vector depends on the order in which the group generators are listed. For a definition, see [3]. Since list indices start from zero, we adopt the convention to use "None" instead of 0 to signify that an element doesn't belong to the orbit. For the algorithm and its correctness, see [2], pp.78-80. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> a = Permutation([2, 4, 6, 3, 1, 5, 0]) >>> b = Permutation([0, 1, 3, 5, 4, 6, 2]) >>> G = PermutationGroup([a, b]) >>> G.schreier_vector(0) [-1, None, 0, 1, None, 1, 0] See Also ======== orbit """ n = self.degree v = [None]*n v[alpha] = -1 orb = [alpha] used = [False]*n used[alpha] = True gens = self.generators r = len(gens) for b in orb: for i in range(r): temp = gens[i]._array_form[b] if used[temp] is False: orb.append(temp) used[temp] = True v[temp] = i return v def stabilizer(self, alpha): r"""Return the stabilizer subgroup of ``alpha``. The stabilizer of `\alpha` is the group `G_\alpha = \{g \in G | g(\alpha) = \alpha\}`. For a proof of correctness, see [1], p.79. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> G = DihedralGroup(6) >>> G.stabilizer(5) PermutationGroup([ (5)(0 4)(1 3)]) See Also ======== orbit """ return PermGroup(_stabilizer(self._degree, self._generators, alpha)) @property def strong_gens(self): r"""Return a strong generating set from the Schreier-Sims algorithm. A generating set `S = \{g_1, g_2, ..., g_t\}` for a permutation group `G` is a strong generating set relative to the sequence of points (referred to as a "base") `(b_1, b_2, ..., b_k)` if, for `1 \leq i \leq k` we have that the intersection of the pointwise stabilizer `G^{(i+1)} := G_{b_1, b_2, ..., b_i}` with `S` generates the pointwise stabilizer `G^{(i+1)}`. The concepts of a base and strong generating set and their applications are discussed in depth in [1], pp. 87-89 and [2], pp. 55-57. Examples ======== >>> from sympy.combinatorics.named_groups import DihedralGroup >>> D = DihedralGroup(4) >>> D.strong_gens [(0 1 2 3), (0 3)(1 2), (1 3)] >>> D.base [0, 1] See Also ======== base, basic_transversals, basic_orbits, basic_stabilizers """ if self._strong_gens == []: self.schreier_sims() return self._strong_gens def subgroup(self, gens): """ Return the subgroup generated by `gens` which is a list of elements of the group """ if not all([g in self for g in gens]): raise ValueError("The group doesn't contain the supplied generators") G = PermutationGroup(gens) return G def subgroup_search(self, prop, base=None, strong_gens=None, tests=None, init_subgroup=None): """Find the subgroup of all elements satisfying the property ``prop``. This is done by a depth-first search with respect to base images that uses several tests to prune the search tree. Parameters ========== prop The property to be used. Has to be callable on group elements and always return ``True`` or ``False``. It is assumed that all group elements satisfying ``prop`` indeed form a subgroup. base A base for the supergroup. strong_gens A strong generating set for the supergroup. tests A list of callables of length equal to the length of ``base``. These are used to rule out group elements by partial base images, so that ``tests[l](g)`` returns False if the element ``g`` is known not to satisfy prop base on where g sends the first ``l + 1`` base points. init_subgroup if a subgroup of the sought group is known in advance, it can be passed to the function as this parameter. Returns ======= res The subgroup of all elements satisfying ``prop``. The generating set for this group is guaranteed to be a strong generating set relative to the base ``base``. Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... AlternatingGroup) >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> S = SymmetricGroup(7) >>> prop_even = lambda x: x.is_even >>> base, strong_gens = S.schreier_sims_incremental() >>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens) >>> G.is_subgroup(AlternatingGroup(7)) True >>> _verify_bsgs(G, base, G.generators) True Notes ===== This function is extremely lengthy and complicated and will require some careful attention. The implementation is described in [1], pp. 114-117, and the comments for the code here follow the lines of the pseudocode in the book for clarity. The complexity is exponential in general, since the search process by itself visits all members of the supergroup. However, there are a lot of tests which are used to prune the search tree, and users can define their own tests via the ``tests`` parameter, so in practice, and for some computations, it's not terrible. A crucial part in the procedure is the frequent base change performed (this is line 11 in the pseudocode) in order to obtain a new basic stabilizer. The book mentiones that this can be done by using ``.baseswap(...)``, however the current implementation uses a more straightforward way to find the next basic stabilizer - calling the function ``.stabilizer(...)`` on the previous basic stabilizer. """ # initialize BSGS and basic group properties def get_reps(orbits): # get the minimal element in the base ordering return [min(orbit, key = lambda x: base_ordering[x]) \ for orbit in orbits] def update_nu(l): temp_index = len(basic_orbits[l]) + 1 -\ len(res_basic_orbits_init_base[l]) # this corresponds to the element larger than all points if temp_index >= len(sorted_orbits[l]): nu[l] = base_ordering[degree] else: nu[l] = sorted_orbits[l][temp_index] if base is None: base, strong_gens = self.schreier_sims_incremental() base_len = len(base) degree = self.degree identity = _af_new(list(range(degree))) base_ordering = _base_ordering(base, degree) # add an element larger than all points base_ordering.append(degree) # add an element smaller than all points base_ordering.append(-1) # compute BSGS-related structures strong_gens_distr = _distribute_gens_by_base(base, strong_gens) basic_orbits, transversals = _orbits_transversals_from_bsgs(base, strong_gens_distr) # handle subgroup initialization and tests if init_subgroup is None: init_subgroup = PermutationGroup([identity]) if tests is None: trivial_test = lambda x: True tests = [] for i in range(base_len): tests.append(trivial_test) # line 1: more initializations. res = init_subgroup f = base_len - 1 l = base_len - 1 # line 2: set the base for K to the base for G res_base = base[:] # line 3: compute BSGS and related structures for K res_base, res_strong_gens = res.schreier_sims_incremental( base=res_base) res_strong_gens_distr = _distribute_gens_by_base(res_base, res_strong_gens) res_generators = res.generators res_basic_orbits_init_base = \ [_orbit(degree, res_strong_gens_distr[i], res_base[i])\ for i in range(base_len)] # initialize orbit representatives orbit_reps = [None]*base_len # line 4: orbit representatives for f-th basic stabilizer of K orbits = _orbits(degree, res_strong_gens_distr[f]) orbit_reps[f] = get_reps(orbits) # line 5: remove the base point from the representatives to avoid # getting the identity element as a generator for K orbit_reps[f].remove(base[f]) # line 6: more initializations c = [0]*base_len u = [identity]*base_len sorted_orbits = [None]*base_len for i in range(base_len): sorted_orbits[i] = basic_orbits[i][:] sorted_orbits[i].sort(key=lambda point: base_ordering[point]) # line 7: initializations mu = [None]*base_len nu = [None]*base_len # this corresponds to the element smaller than all points mu[l] = degree + 1 update_nu(l) # initialize computed words computed_words = [identity]*base_len # line 8: main loop while True: # apply all the tests while l < base_len - 1 and \ computed_words[l](base[l]) in orbit_reps[l] and \ base_ordering[mu[l]] < \ base_ordering[computed_words[l](base[l])] < \ base_ordering[nu[l]] and \ tests[l](computed_words): # line 11: change the (partial) base of K new_point = computed_words[l](base[l]) res_base[l] = new_point new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l], new_point) res_strong_gens_distr[l + 1] = new_stab_gens # line 12: calculate minimal orbit representatives for the # l+1-th basic stabilizer orbits = _orbits(degree, new_stab_gens) orbit_reps[l + 1] = get_reps(orbits) # line 13: amend sorted orbits l += 1 temp_orbit = [computed_words[l - 1](point) for point in basic_orbits[l]] temp_orbit.sort(key=lambda point: base_ordering[point]) sorted_orbits[l] = temp_orbit # lines 14 and 15: update variables used minimality tests new_mu = degree + 1 for i in range(l): if base[l] in res_basic_orbits_init_base[i]: candidate = computed_words[i](base[i]) if base_ordering[candidate] > base_ordering[new_mu]: new_mu = candidate mu[l] = new_mu update_nu(l) # line 16: determine the new transversal element c[l] = 0 temp_point = sorted_orbits[l][c[l]] gamma = computed_words[l - 1]._array_form.index(temp_point) u[l] = transversals[l][gamma] # update computed words computed_words[l] = rmul(computed_words[l - 1], u[l]) # lines 17 & 18: apply the tests to the group element found g = computed_words[l] temp_point = g(base[l]) if l == base_len - 1 and \ base_ordering[mu[l]] < \ base_ordering[temp_point] < base_ordering[nu[l]] and \ temp_point in orbit_reps[l] and \ tests[l](computed_words) and \ prop(g): # line 19: reset the base of K res_generators.append(g) res_base = base[:] # line 20: recalculate basic orbits (and transversals) res_strong_gens.append(g) res_strong_gens_distr = _distribute_gens_by_base(res_base, res_strong_gens) res_basic_orbits_init_base = \ [_orbit(degree, res_strong_gens_distr[i], res_base[i]) \ for i in range(base_len)] # line 21: recalculate orbit representatives # line 22: reset the search depth orbit_reps[f] = get_reps(orbits) l = f # line 23: go up the tree until in the first branch not fully # searched while l >= 0 and c[l] == len(basic_orbits[l]) - 1: l = l - 1 # line 24: if the entire tree is traversed, return K if l == -1: return PermutationGroup(res_generators) # lines 25-27: update orbit representatives if l < f: # line 26 f = l c[l] = 0 # line 27 temp_orbits = _orbits(degree, res_strong_gens_distr[f]) orbit_reps[f] = get_reps(temp_orbits) # line 28: update variables used for minimality testing mu[l] = degree + 1 temp_index = len(basic_orbits[l]) + 1 - \ len(res_basic_orbits_init_base[l]) if temp_index >= len(sorted_orbits[l]): nu[l] = base_ordering[degree] else: nu[l] = sorted_orbits[l][temp_index] # line 29: set the next element from the current branch and update # accordingly c[l] += 1 if l == 0: gamma = sorted_orbits[l][c[l]] else: gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]]) u[l] = transversals[l][gamma] if l == 0: computed_words[l] = u[l] else: computed_words[l] = rmul(computed_words[l - 1], u[l]) @property def transitivity_degree(self): r"""Compute the degree of transitivity of the group. A permutation group `G` acting on `\Omega = \{0, 1, ..., n-1\}` is ``k``-fold transitive, if, for any k points `(a_1, a_2, ..., a_k)\in\Omega` and any k points `(b_1, b_2, ..., b_k)\in\Omega` there exists `g\in G` such that `g(a_1)=b_1, g(a_2)=b_2, ..., g(a_k)=b_k` The degree of transitivity of `G` is the maximum ``k`` such that `G` is ``k``-fold transitive. ([8]) Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> a = Permutation([1, 2, 0]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.transitivity_degree 3 See Also ======== is_transitive, orbit """ if self._transitivity_degree is None: n = self.degree G = self # if G is k-transitive, a tuple (a_0,..,a_k) # can be brought to (b_0,...,b_(k-1), b_k) # where b_0,...,b_(k-1) are fixed points; # consider the group G_k which stabilizes b_0,...,b_(k-1) # if G_k is transitive on the subset excluding b_0,...,b_(k-1) # then G is (k+1)-transitive for i in range(n): orb = G.orbit((i)) if len(orb) != n - i: self._transitivity_degree = i return i G = G.stabilizer(i) self._transitivity_degree = n return n else: return self._transitivity_degree def _p_elements_group(G, p): ''' For an abelian p-group G return the subgroup consisting of all elements of order p (and the identity) ''' gens = G.generators[:] gens = sorted(gens, key=lambda x: x.order(), reverse=True) gens_p = [g**(g.order()/p) for g in gens] gens_r = [] for i in range(len(gens)): x = gens[i] x_order = x.order() # x_p has order p x_p = x**(x_order/p) if i > 0: P = PermutationGroup(gens_p[:i]) else: P = PermutationGroup(G.identity) if x**(x_order/p) not in P: gens_r.append(x**(x_order/p)) else: # replace x by an element of order (x.order()/p) # so that gens still generates G g = P.generator_product(x_p, original=True) for s in g: x = x*s**-1 x_order = x_order/p # insert x to gens so that the sorting is preserved del gens[i] del gens_p[i] j = i - 1 while j < len(gens) and gens[j].order() >= x_order: j += 1 gens = gens[:j] + [x] + gens[j:] gens_p = gens_p[:j] + [x] + gens_p[j:] return PermutationGroup(gens_r) def _sylow_alt_sym(self, p): ''' Return a p-Sylow subgroup of a symmetric or an alternating group. The algorithm for this is hinted at in [1], Chapter 4, Exercise 4. For Sym(n) with n = p^i, the idea is as follows. Partition the interval [0..n-1] into p equal parts, each of length p^(i-1): [0..p^(i-1)-1], [p^(i-1)..2*p^(i-1)-1]...[(p-1)*p^(i-1)..p^i-1]. Find a p-Sylow subgroup of Sym(p^(i-1)) (treated as a subgroup of ``self``) acting on each of the parts. Call the subgroups P_1, P_2...P_p. The generators for the subgroups P_2...P_p can be obtained from those of P_1 by applying a "shifting" permutation to them, that is, a permutation mapping [0..p^(i-1)-1] to the second part (the other parts are obtained by using the shift multiple times). The union of this permutation and the generators of P_1 is a p-Sylow subgroup of ``self``. For n not equal to a power of p, partition [0..n-1] in accordance with how n would be written in base p. E.g. for p=2 and n=11, 11 = 2^3 + 2^2 + 1 so the partition is [[0..7], [8..9], {10}]. To generate a p-Sylow subgroup, take the union of the generators for each of the parts. For the above example, {(0 1), (0 2)(1 3), (0 4), (1 5)(2 7)} from the first part, {(8 9)} from the second part and nothing from the third. This gives 4 generators in total, and the subgroup they generate is p-Sylow. Alternating groups are treated the same except when p=2. In this case, (0 1)(s s+1) should be added for an appropriate s (the start of a part) for each part in the partitions. See Also ======== sylow_subgroup, is_alt_sym ''' n = self.degree gens = [] identity = Permutation(n-1) # the case of 2-sylow subgroups of alternating groups # needs special treatment alt = p == 2 and all(g.is_even for g in self.generators) # find the presentation of n in base p coeffs = [] m = n while m > 0: coeffs.append(m % p) m = m // p power = len(coeffs)-1 # for a symmetric group, gens[:i] is the generating # set for a p-Sylow subgroup on [0..p**(i-1)-1]. For # alternating groups, the same is given by gens[:2*(i-1)] for i in range(1, power+1): if i == 1 and alt: # (0 1) shouldn't be added for alternating groups continue gen = Permutation([(j + p**(i-1)) % p**i for j in range(p**i)]) gens.append(identity*gen) if alt: gen = Permutation(0, 1)*gen*Permutation(0, 1)*gen gens.append(gen) # the first point in the current part (see the algorithm # description in the docstring) start = 0 while power > 0: a = coeffs[power] # make the permutation shifting the start of the first # part ([0..p^i-1] for some i) to the current one for s in range(a): shift = Permutation() if start > 0: for i in range(p**power): shift = shift(i, start + i) if alt: gen = Permutation(0, 1)*shift*Permutation(0, 1)*shift gens.append(gen) j = 2*(power - 1) else: j = power for i, gen in enumerate(gens[:j]): if alt and i % 2 == 1: continue # shift the generator to the start of the # partition part gen = shift*gen*shift gens.append(gen) start += p**power power = power-1 return gens def sylow_subgroup(self, p): ''' Return a p-Sylow subgroup of the group. The algorithm is described in [1], Chapter 4, Section 7 Examples ======== >>> from sympy.combinatorics.named_groups import DihedralGroup >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> D = DihedralGroup(6) >>> S = D.sylow_subgroup(2) >>> S.order() 4 >>> G = SymmetricGroup(6) >>> S = G.sylow_subgroup(5) >>> S.order() 5 >>> G1 = AlternatingGroup(3) >>> G2 = AlternatingGroup(5) >>> G3 = AlternatingGroup(9) >>> S1 = G1.sylow_subgroup(3) >>> S2 = G2.sylow_subgroup(3) >>> S3 = G3.sylow_subgroup(3) >>> len1 = len(S1.lower_central_series()) >>> len2 = len(S2.lower_central_series()) >>> len3 = len(S3.lower_central_series()) >>> len1 == len2 True >>> len1 < len3 True ''' from sympy.combinatorics.homomorphisms import ( orbit_homomorphism, block_homomorphism) from sympy.ntheory.primetest import isprime if not isprime(p): raise ValueError("p must be a prime") def is_p_group(G): # check if the order of G is a power of p # and return the power m = G.order() n = 0 while m % p == 0: m = m/p n += 1 if m == 1: return True, n return False, n def _sylow_reduce(mu, nu): # reduction based on two homomorphisms # mu and nu with trivially intersecting # kernels Q = mu.image().sylow_subgroup(p) Q = mu.invert_subgroup(Q) nu = nu.restrict_to(Q) R = nu.image().sylow_subgroup(p) return nu.invert_subgroup(R) order = self.order() if order % p != 0: return PermutationGroup([self.identity]) p_group, n = is_p_group(self) if p_group: return self if self.is_alt_sym(): return PermutationGroup(self._sylow_alt_sym(p)) # if there is a non-trivial orbit with size not divisible # by p, the sylow subgroup is contained in its stabilizer # (by orbit-stabilizer theorem) orbits = self.orbits() non_p_orbits = [o for o in orbits if len(o) % p != 0 and len(o) != 1] if non_p_orbits: G = self.stabilizer(list(non_p_orbits[0]).pop()) return G.sylow_subgroup(p) if not self.is_transitive(): # apply _sylow_reduce to orbit actions orbits = sorted(orbits, key = lambda x: len(x)) omega1 = orbits.pop() omega2 = orbits[0].union(*orbits) mu = orbit_homomorphism(self, omega1) nu = orbit_homomorphism(self, omega2) return _sylow_reduce(mu, nu) blocks = self.minimal_blocks() if len(blocks) > 1: # apply _sylow_reduce to block system actions mu = block_homomorphism(self, blocks[0]) nu = block_homomorphism(self, blocks[1]) return _sylow_reduce(mu, nu) elif len(blocks) == 1: block = list(blocks)[0] if any(e != 0 for e in block): # self is imprimitive mu = block_homomorphism(self, block) if not is_p_group(mu.image())[0]: S = mu.image().sylow_subgroup(p) return mu.invert_subgroup(S).sylow_subgroup(p) # find an element of order p g = self.random() g_order = g.order() while g_order % p != 0 or g_order == 0: g = self.random() g_order = g.order() g = g**(g_order // p) if order % p**2 != 0: return PermutationGroup(g) C = self.centralizer(g) while C.order() % p**n != 0: S = C.sylow_subgroup(p) s_order = S.order() Z = S.center() P = Z._p_elements_group(p) h = P.random() C_h = self.centralizer(h) while C_h.order() % p*s_order != 0: h = P.random() C_h = self.centralizer(h) C = C_h return C.sylow_subgroup(p) def _block_verify(H, L, alpha): delta = sorted(list(H.orbit(alpha))) H_gens = H.generators # p[i] will be the number of the block # delta[i] belongs to p = [-1]*len(delta) blocks = [-1]*len(delta) B = [[]] # future list of blocks u = [0]*len(delta) # u[i] in L s.t. alpha^u[i] = B[0][i] t = L.orbit_transversal(alpha, pairs=True) for a, beta in t: B[0].append(a) i_a = delta.index(a) p[i_a] = 0 blocks[i_a] = alpha u[i_a] = beta rho = 0 m = 0 # number of blocks - 1 while rho <= m: beta = B[rho][0] for g in H_gens: d = beta^g i_d = delta.index(d) sigma = p[i_d] if sigma < 0: # define a new block m += 1 sigma = m u[i_d] = u[delta.index(beta)]*g p[i_d] = sigma rep = d blocks[i_d] = rep newb = [rep] for gamma in B[rho][1:]: i_gamma = delta.index(gamma) d = gamma^g i_d = delta.index(d) if p[i_d] < 0: u[i_d] = u[i_gamma]*g p[i_d] = sigma blocks[i_d] = rep newb.append(d) else: # B[rho] is not a block s = u[i_gamma]*g*u[i_d]**(-1) return False, s B.append(newb) else: for h in B[rho][1:]: if not h^g in B[sigma]: # B[rho] is not a block s = u[delta.index(beta)]*g*u[i_d]**(-1) return False, s rho += 1 return True, blocks def _verify(H, K, phi, z, alpha): ''' Return a list of relators ``rels`` in generators ``gens`_h` that are mapped to ``H.generators`` by ``phi`` so that given a finite presentation <gens_k | rels_k> of ``K`` on a subset of ``gens_h`` <gens_h | rels_k + rels> is a finite presentation of ``H``. ``H`` should be generated by the union of ``K.generators`` and ``z`` (a single generator), and ``H.stabilizer(alpha) == K``; ``phi`` is a canonical injection from a free group into a permutation group containing ``H``. The algorithm is described in [1], Chapter 6. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.homomorphisms import homomorphism >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup >>> H = PermutationGroup(Permutation(0, 2), Permutation (1, 5)) >>> K = PermutationGroup(Permutation(5)(0, 2)) >>> F = free_group("x_0 x_1")[0] >>> gens = F.generators >>> phi = homomorphism(F, H, F.generators, H.generators) >>> rels_k = [gens[0]**2] # relators for presentation of K >>> z= Permutation(1, 5) >>> check, rels_h = H._verify(K, phi, z, 1) >>> check True >>> rels = rels_k + rels_h >>> G = FpGroup(F, rels) # presentation of H >>> G.order() == H.order() True See also ======== strong_presentation, presentation, stabilizer ''' orbit = H.orbit(alpha) beta = alpha^(z**-1) K_beta = K.stabilizer(beta) # orbit representatives of K_beta gammas = [alpha, beta] orbits = list(set(tuple(K_beta.orbit(o)) for o in orbit)) orbit_reps = [orb[0] for orb in orbits] for rep in orbit_reps: if rep not in gammas: gammas.append(rep) # orbit transversal of K betas = [alpha, beta] transversal = {alpha: phi.invert(H.identity), beta: phi.invert(z**-1)} for s, g in K.orbit_transversal(beta, pairs=True): if not s in transversal: transversal[s] = transversal[beta]*phi.invert(g) union = K.orbit(alpha).union(K.orbit(beta)) while (len(union) < len(orbit)): for gamma in gammas: if gamma in union: r = gamma^z if r not in union: betas.append(r) transversal[r] = transversal[gamma]*phi.invert(z) for s, g in K.orbit_transversal(r, pairs=True): if not s in transversal: transversal[s] = transversal[r]*phi.invert(g) union = union.union(K.orbit(r)) break # compute relators rels = [] for b in betas: k_gens = K.stabilizer(b).generators for y in k_gens: new_rel = transversal[b] gens = K.generator_product(y, original=True) for g in gens[::-1]: new_rel = new_rel*phi.invert(g) new_rel = new_rel*transversal[b]**-1 perm = phi(new_rel) try: gens = K.generator_product(perm, original=True) except ValueError: return False, perm for g in gens: new_rel = new_rel*phi.invert(g)**-1 if new_rel not in rels: rels.append(new_rel) for gamma in gammas: new_rel = transversal[gamma]*phi.invert(z)*transversal[gamma^z]**-1 perm = phi(new_rel) try: gens = K.generator_product(perm, original=True) except ValueError: return False, perm for g in gens: new_rel = new_rel*phi.invert(g)**-1 if new_rel not in rels: rels.append(new_rel) return True, rels def strong_presentation(G): ''' Return a strong finite presentation of `G`. The generators of the returned group are in the same order as the strong generators of `G`. The algorithm is based on Sims' Verify algorithm described in [1], Chapter 6. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.named_groups import DihedralGroup >>> P = DihedralGroup(4) >>> G = P.strong_presentation() >>> P.order() == G.order() True See Also ======== presentation, _verify ''' from sympy.combinatorics.fp_groups import (FpGroup, simplify_presentation) from sympy.combinatorics.free_groups import free_group from sympy.combinatorics.homomorphisms import (block_homomorphism, homomorphism, GroupHomomorphism) strong_gens = G.strong_gens[:] stabs = G.basic_stabilizers[:] base = G.base[:] # injection from a free group on len(strong_gens) # generators into G gen_syms = [('x_%d'%i) for i in range(len(strong_gens))] F = free_group(', '.join(gen_syms))[0] phi = homomorphism(F, G, F.generators, strong_gens) H = PermutationGroup(G.identity) while stabs: alpha = base.pop() K = H H = stabs.pop() new_gens = [g for g in H.generators if g not in K] if K.order() == 1: z = new_gens.pop() rels = [F.generators[-1]**z.order()] intermediate_gens = [z] K = PermutationGroup(intermediate_gens) # add generators one at a time building up from K to H while new_gens: z = new_gens.pop() intermediate_gens = [z] + intermediate_gens K_s = PermutationGroup(intermediate_gens) orbit = K_s.orbit(alpha) orbit_k = K.orbit(alpha) # split into cases based on the orbit of K_s if orbit_k == orbit: if z in K: rel = phi.invert(z) perm = z else: t = K.orbit_rep(alpha, alpha^z) rel = phi.invert(z)*phi.invert(t)**-1 perm = z*t**-1 for g in K.generator_product(perm, original=True): rel = rel*phi.invert(g)**-1 new_rels = [rel] elif len(orbit_k) == 1: # `success` is always true because `strong_gens` # and `base` are already a verified BSGS. Later # this could be changed to start with a randomly # generated (potential) BSGS, and then new elements # would have to be appended to it when `success` # is false. success, new_rels = K_s._verify(K, phi, z, alpha) else: # K.orbit(alpha) should be a block # under the action of K_s on K_s.orbit(alpha) check, block = K_s._block_verify(K, alpha) if check: # apply _verify to the action of K_s # on the block system; for convenience, # add the blocks as additional points # that K_s should act on t = block_homomorphism(K_s, block) m = t.codomain.degree # number of blocks d = K_s.degree # conjugating with p will shift # permutations in t.image() to # higher numbers, e.g. # p*(0 1)*p = (m m+1) p = Permutation() for i in range(m): p *= Permutation(i, i+d) t_img = t.images # combine generators of K_s with their # action on the block system images = {g: g*p*t_img[g]*p for g in t_img} for g in G.strong_gens[:-len(K_s.generators)]: images[g] = g K_s_act = PermutationGroup(list(images.values())) f = GroupHomomorphism(G, K_s_act, images) K_act = PermutationGroup([f(g) for g in K.generators]) success, new_rels = K_s_act._verify(K_act, f.compose(phi), f(z), d) for n in new_rels: if not n in rels: rels.append(n) K = K_s group = FpGroup(F, rels) return simplify_presentation(group) def presentation(G, eliminate_gens=True): ''' Return an `FpGroup` presentation of the group. The algorithm is described in [1], Chapter 6.1. ''' from sympy.combinatorics.fp_groups import (FpGroup, simplify_presentation) from sympy.combinatorics.coset_table import CosetTable from sympy.combinatorics.free_groups import free_group from sympy.combinatorics.homomorphisms import homomorphism from itertools import product if G._fp_presentation: return G._fp_presentation if G._fp_presentation: return G._fp_presentation def _factor_group_by_rels(G, rels): if isinstance(G, FpGroup): rels.extend(G.relators) return FpGroup(G.free_group, list(set(rels))) return FpGroup(G, rels) gens = G.generators len_g = len(gens) if len_g == 1: order = gens[0].order() # handle the trivial group if order == 1: return free_group([])[0] F, x = free_group('x') return FpGroup(F, [x**order]) if G.order() > 20: half_gens = G.generators[0:(len_g+1)//2] else: half_gens = [] H = PermutationGroup(half_gens) H_p = H.presentation() len_h = len(H_p.generators) C = G.coset_table(H) n = len(C) # subgroup index gen_syms = [('x_%d'%i) for i in range(len(gens))] F = free_group(', '.join(gen_syms))[0] # mapping generators of H_p to those of F images = [F.generators[i] for i in range(len_h)] R = homomorphism(H_p, F, H_p.generators, images, check=False) # rewrite relators rels = R(H_p.relators) G_p = FpGroup(F, rels) # injective homomorphism from G_p into G T = homomorphism(G_p, G, G_p.generators, gens) C_p = CosetTable(G_p, []) C_p.table = [[None]*(2*len_g) for i in range(n)] # initiate the coset transversal transversal = [None]*n transversal[0] = G_p.identity # fill in the coset table as much as possible for i in range(2*len_h): C_p.table[0][i] = 0 gamma = 1 for alpha, x in product(range(0, n), range(2*len_g)): beta = C[alpha][x] if beta == gamma: gen = G_p.generators[x//2]**((-1)**(x % 2)) transversal[beta] = transversal[alpha]*gen C_p.table[alpha][x] = beta C_p.table[beta][x + (-1)**(x % 2)] = alpha gamma += 1 if gamma == n: break C_p.p = list(range(n)) beta = x = 0 while not C_p.is_complete(): # find the first undefined entry while C_p.table[beta][x] == C[beta][x]: x = (x + 1) % (2*len_g) if x == 0: beta = (beta + 1) % n # define a new relator gen = G_p.generators[x//2]**((-1)**(x % 2)) new_rel = transversal[beta]*gen*transversal[C[beta][x]]**-1 perm = T(new_rel) next = G_p.identity for s in H.generator_product(perm, original=True): next = next*T.invert(s)**-1 new_rel = new_rel*next # continue coset enumeration G_p = _factor_group_by_rels(G_p, [new_rel]) C_p.scan_and_fill(0, new_rel) C_p = G_p.coset_enumeration([], strategy="coset_table", draft=C_p, max_cosets=n, incomplete=True) G._fp_presentation = simplify_presentation(G_p) return G._fp_presentation def polycyclic_group(self): """ Return the PolycyclicGroup instance with below parameters: * ``pc_sequence`` : Polycyclic sequence is formed by collecting all the missing generators between the adjacent groups in the derived series of given permutation group. * ``pc_series`` : Polycyclic series is formed by adding all the missing generators of ``der[i+1]`` in ``der[i]``, where ``der`` represents the derived series. * ``relative_order`` : A list, computed by the ratio of adjacent groups in pc_series. """ from sympy.combinatorics.pc_groups import PolycyclicGroup if not self.is_polycyclic: raise ValueError("The group must be solvable") der = self.derived_series() pc_series = [] pc_sequence = [] relative_order = [] pc_series.append(der[-1]) der.reverse() for i in range(len(der)-1): H = der[i] for g in der[i+1].generators: if g not in H: H = PermutationGroup([g] + H.generators) pc_series.insert(0, H) pc_sequence.insert(0, g) G1 = pc_series[0].order() G2 = pc_series[1].order() relative_order.insert(0, G1 // G2) return PolycyclicGroup(pc_sequence, pc_series, relative_order, collector=None) def _orbit(degree, generators, alpha, action='tuples'): r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set. The time complexity of the algorithm used here is `O(|Orb|*r)` where `|Orb|` is the size of the orbit and ``r`` is the number of generators of the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21. Here alpha can be a single point, or a list of points. If alpha is a single point, the ordinary orbit is computed. if alpha is a list of points, there are three available options: 'union' - computes the union of the orbits of the points in the list 'tuples' - computes the orbit of the list interpreted as an ordered tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) ) 'sets' - computes the orbit of the list interpreted as a sets Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup, _orbit >>> a = Permutation([1, 2, 0, 4, 5, 6, 3]) >>> G = PermutationGroup([a]) >>> _orbit(G.degree, G.generators, 0) {0, 1, 2} >>> _orbit(G.degree, G.generators, [0, 4], 'union') {0, 1, 2, 3, 4, 5, 6} See Also ======== orbit, orbit_transversal """ if not hasattr(alpha, '__getitem__'): alpha = [alpha] gens = [x._array_form for x in generators] if len(alpha) == 1 or action == 'union': orb = alpha used = [False]*degree for el in alpha: used[el] = True for b in orb: for gen in gens: temp = gen[b] if used[temp] == False: orb.append(temp) used[temp] = True return set(orb) elif action == 'tuples': alpha = tuple(alpha) orb = [alpha] used = {alpha} for b in orb: for gen in gens: temp = tuple([gen[x] for x in b]) if temp not in used: orb.append(temp) used.add(temp) return set(orb) elif action == 'sets': alpha = frozenset(alpha) orb = [alpha] used = {alpha} for b in orb: for gen in gens: temp = frozenset([gen[x] for x in b]) if temp not in used: orb.append(temp) used.add(temp) return {tuple(x) for x in orb} def _orbits(degree, generators): """Compute the orbits of G. If ``rep=False`` it returns a list of sets else it returns a list of representatives of the orbits Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup, _orbits >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> _orbits(a.size, [a, b]) [{0, 1, 2}] """ orbs = [] sorted_I = list(range(degree)) I = set(sorted_I) while I: i = sorted_I[0] orb = _orbit(degree, generators, i) orbs.append(orb) # remove all indices that are in this orbit I -= orb sorted_I = [i for i in sorted_I if i not in orb] return orbs def _orbit_transversal(degree, generators, alpha, pairs, af=False, slp=False): r"""Computes a transversal for the orbit of ``alpha`` as a set. generators generators of the group ``G`` For a permutation group ``G``, a transversal for the orbit `Orb = \{g(\alpha) | g \in G\}` is a set `\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`. Note that there may be more than one possible transversal. If ``pairs`` is set to ``True``, it returns the list of pairs `(\beta, g_\beta)`. For a proof of correctness, see [1], p.79 if ``af`` is ``True``, the transversal elements are given in array form. If `slp` is `True`, a dictionary `{beta: slp_beta}` is returned for `\beta \in Orb` where `slp_beta` is a list of indices of the generators in `generators` s.t. if `slp_beta = [i_1 ... i_n]` `g_\beta = generators[i_n]*...*generators[i_1]`. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.named_groups import DihedralGroup >>> from sympy.combinatorics.perm_groups import _orbit_transversal >>> G = DihedralGroup(6) >>> _orbit_transversal(G.degree, G.generators, 0, False) [(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)] """ tr = [(alpha, list(range(degree)))] slp_dict = {alpha: []} used = [False]*degree used[alpha] = True gens = [x._array_form for x in generators] for x, px in tr: px_slp = slp_dict[x] for gen in gens: temp = gen[x] if used[temp] == False: slp_dict[temp] = [gens.index(gen)] + px_slp tr.append((temp, _af_rmul(gen, px))) used[temp] = True if pairs: if not af: tr = [(x, _af_new(y)) for x, y in tr] if not slp: return tr return tr, slp_dict if af: tr = [y for _, y in tr] if not slp: return tr return tr, slp_dict tr = [_af_new(y) for _, y in tr] if not slp: return tr return tr, slp_dict def _stabilizer(degree, generators, alpha): r"""Return the stabilizer subgroup of ``alpha``. The stabilizer of `\alpha` is the group `G_\alpha = \{g \in G | g(\alpha) = \alpha\}`. For a proof of correctness, see [1], p.79. degree : degree of G generators : generators of G Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.perm_groups import _stabilizer >>> from sympy.combinatorics.named_groups import DihedralGroup >>> G = DihedralGroup(6) >>> _stabilizer(G.degree, G.generators, 5) [(5)(0 4)(1 3), (5)] See Also ======== orbit """ orb = [alpha] table = {alpha: list(range(degree))} table_inv = {alpha: list(range(degree))} used = [False]*degree used[alpha] = True gens = [x._array_form for x in generators] stab_gens = [] for b in orb: for gen in gens: temp = gen[b] if used[temp] is False: gen_temp = _af_rmul(gen, table[b]) orb.append(temp) table[temp] = gen_temp table_inv[temp] = _af_invert(gen_temp) used[temp] = True else: schreier_gen = _af_rmuln(table_inv[temp], gen, table[b]) if schreier_gen not in stab_gens: stab_gens.append(schreier_gen) return [_af_new(x) for x in stab_gens] PermGroup = PermutationGroup
1c16eecff497f46aafb640565add072944f7713826594ffc5c84404f2353652d
from __future__ import print_function, division from sympy.combinatorics.permutations import Permutation from sympy.core.symbol import symbols from sympy.matrices import Matrix from sympy.utilities.iterables import variations, rotate_left def symmetric(n): """ Generates the symmetric group of order n, Sn. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.generators import symmetric >>> list(symmetric(3)) [(2), (1 2), (2)(0 1), (0 1 2), (0 2 1), (0 2)] """ for perm in variations(list(range(n)), n): yield Permutation(perm) def cyclic(n): """ Generates the cyclic group of order n, Cn. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.generators import cyclic >>> list(cyclic(5)) [(4), (0 1 2 3 4), (0 2 4 1 3), (0 3 1 4 2), (0 4 3 2 1)] See Also ======== dihedral """ gen = list(range(n)) for i in range(n): yield Permutation(gen) gen = rotate_left(gen, 1) def alternating(n): """ Generates the alternating group of order n, An. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.generators import alternating >>> list(alternating(3)) [(2), (0 1 2), (0 2 1)] """ for perm in variations(list(range(n)), n): p = Permutation(perm) if p.is_even: yield p def dihedral(n): """ Generates the dihedral group of order 2n, Dn. The result is given as a subgroup of Sn, except for the special cases n=1 (the group S2) and n=2 (the Klein 4-group) where that's not possible and embeddings in S2 and S4 respectively are given. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.generators import dihedral >>> list(dihedral(3)) [(2), (0 2), (0 1 2), (1 2), (0 2 1), (2)(0 1)] See Also ======== cyclic """ if n == 1: yield Permutation([0, 1]) yield Permutation([1, 0]) elif n == 2: yield Permutation([0, 1, 2, 3]) yield Permutation([1, 0, 3, 2]) yield Permutation([2, 3, 0, 1]) yield Permutation([3, 2, 1, 0]) else: gen = list(range(n)) for i in range(n): yield Permutation(gen) yield Permutation(gen[::-1]) gen = rotate_left(gen, 1) def rubik_cube_generators(): """Return the permutations of the 3x3 Rubik's cube, see http://www.gap-system.org/Doc/Examples/rubik.html """ a = [ [(1, 3, 8, 6), (2, 5, 7, 4), (9, 33, 25, 17), (10, 34, 26, 18), (11, 35, 27, 19)], [(9, 11, 16, 14), (10, 13, 15, 12), (1, 17, 41, 40), (4, 20, 44, 37), (6, 22, 46, 35)], [(17, 19, 24, 22), (18, 21, 23, 20), (6, 25, 43, 16), (7, 28, 42, 13), (8, 30, 41, 11)], [(25, 27, 32, 30), (26, 29, 31, 28), (3, 38, 43, 19), (5, 36, 45, 21), (8, 33, 48, 24)], [(33, 35, 40, 38), (34, 37, 39, 36), (3, 9, 46, 32), (2, 12, 47, 29), (1, 14, 48, 27)], [(41, 43, 48, 46), (42, 45, 47, 44), (14, 22, 30, 38), (15, 23, 31, 39), (16, 24, 32, 40)] ] return [Permutation([[i - 1 for i in xi] for xi in x], size=48) for x in a] def rubik(n): """Return permutations for an nxn Rubik's cube. Permutations returned are for rotation of each of the slice from the face up to the last face for each of the 3 sides (in this order): front, right and bottom. Hence, the first n - 1 permutations are for the slices from the front. """ if n < 2: raise ValueError('dimension of cube must be > 1') # 1-based reference to rows and columns in Matrix def getr(f, i): return faces[f].col(n - i) def getl(f, i): return faces[f].col(i - 1) def getu(f, i): return faces[f].row(i - 1) def getd(f, i): return faces[f].row(n - i) def setr(f, i, s): faces[f][:, n - i] = Matrix(n, 1, s) def setl(f, i, s): faces[f][:, i - 1] = Matrix(n, 1, s) def setu(f, i, s): faces[f][i - 1, :] = Matrix(1, n, s) def setd(f, i, s): faces[f][n - i, :] = Matrix(1, n, s) # motion of a single face def cw(F, r=1): for _ in range(r): face = faces[F] rv = [] for c in range(n): for r in range(n - 1, -1, -1): rv.append(face[r, c]) faces[F] = Matrix(n, n, rv) def ccw(F): cw(F, 3) # motion of plane i from the F side; # fcw(0) moves the F face, fcw(1) moves the plane # just behind the front face, etc... def fcw(i, r=1): for _ in range(r): if i == 0: cw(F) i += 1 temp = getr(L, i) setr(L, i, list((getu(D, i)))) setu(D, i, list(reversed(getl(R, i)))) setl(R, i, list((getd(U, i)))) setd(U, i, list(reversed(temp))) i -= 1 def fccw(i): fcw(i, 3) # motion of the entire cube from the F side def FCW(r=1): for _ in range(r): cw(F) ccw(B) cw(U) t = faces[U] cw(L) faces[U] = faces[L] cw(D) faces[L] = faces[D] cw(R) faces[D] = faces[R] faces[R] = t def FCCW(): FCW(3) # motion of the entire cube from the U side def UCW(r=1): for _ in range(r): cw(U) ccw(D) t = faces[F] faces[F] = faces[R] faces[R] = faces[B] faces[B] = faces[L] faces[L] = t def UCCW(): UCW(3) # defining the permutations for the cube U, F, R, B, L, D = names = symbols('U, F, R, B, L, D') # the faces are represented by nxn matrices faces = {} count = 0 for fi in range(6): f = [] for a in range(n**2): f.append(count) count += 1 faces[names[fi]] = Matrix(n, n, f) # this will either return the value of the current permutation # (show != 1) or else append the permutation to the group, g def perm(show=0): # add perm to the list of perms p = [] for f in names: p.extend(faces[f]) if show: return p g.append(Permutation(p)) g = [] # container for the group's permutations I = list(range(6*n**2)) # the identity permutation used for checking # define permutations corresponding to cw rotations of the planes # up TO the last plane from that direction; by not including the # last plane, the orientation of the cube is maintained. # F slices for i in range(n - 1): fcw(i) perm() fccw(i) # restore assert perm(1) == I # R slices # bring R to front UCW() for i in range(n - 1): fcw(i) # put it back in place UCCW() # record perm() # restore # bring face to front UCW() fccw(i) # restore UCCW() assert perm(1) == I # D slices # bring up bottom FCW() UCCW() FCCW() for i in range(n - 1): # turn strip fcw(i) # put bottom back on the bottom FCW() UCW() FCCW() # record perm() # restore # bring up bottom FCW() UCCW() FCCW() # turn strip fccw(i) # put bottom back on the bottom FCW() UCW() FCCW() assert perm(1) == I return g
7181a8895fc0017ad5889e2c020473ee00339e5cd4ce7acfdc1255dc49721d07
from __future__ import print_function, division from sympy.core import Basic import random class GrayCode(Basic): """ A Gray code is essentially a Hamiltonian walk on a n-dimensional cube with edge length of one. The vertices of the cube are represented by vectors whose values are binary. The Hamilton walk visits each vertex exactly once. The Gray code for a 3d cube is ['000','100','110','010','011','111','101', '001']. A Gray code solves the problem of sequentially generating all possible subsets of n objects in such a way that each subset is obtained from the previous one by either deleting or adding a single object. In the above example, 1 indicates that the object is present, and 0 indicates that its absent. Gray codes have applications in statistics as well when we want to compute various statistics related to subsets in an efficient manner. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> a = GrayCode(4) >>> list(a.generate_gray()) ['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100', \ '1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000'] References ========== .. [1] Nijenhuis,A. and Wilf,H.S.(1978). Combinatorial Algorithms. Academic Press. .. [2] Knuth, D. (2011). The Art of Computer Programming, Vol 4 Addison Wesley """ _skip = False _current = 0 _rank = None def __new__(cls, n, *args, **kw_args): """ Default constructor. It takes a single argument ``n`` which gives the dimension of the Gray code. The starting Gray code string (``start``) or the starting ``rank`` may also be given; the default is to start at rank = 0 ('0...0'). Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(3) >>> a GrayCode(3) >>> a.n 3 >>> a = GrayCode(3, start='100') >>> a.current '100' >>> a = GrayCode(4, rank=4) >>> a.current '0110' >>> a.rank 4 """ if n < 1 or int(n) != n: raise ValueError( 'Gray code dimension must be a positive integer, not %i' % n) n = int(n) args = (n,) + args obj = Basic.__new__(cls, *args) if 'start' in kw_args: obj._current = kw_args["start"] if len(obj._current) > n: raise ValueError('Gray code start has length %i but ' 'should not be greater than %i' % (len(obj._current), n)) elif 'rank' in kw_args: if int(kw_args["rank"]) != kw_args["rank"]: raise ValueError('Gray code rank must be a positive integer, ' 'not %i' % kw_args["rank"]) obj._rank = int(kw_args["rank"]) % obj.selections obj._current = obj.unrank(n, obj._rank) return obj def next(self, delta=1): """ Returns the Gray code a distance ``delta`` (default = 1) from the current value in canonical order. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(3, start='110') >>> a.next().current '111' >>> a.next(-1).current '010' """ return GrayCode(self.n, rank=(self.rank + delta) % self.selections) @property def selections(self): """ Returns the number of bit vectors in the Gray code. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(3) >>> a.selections 8 """ return 2**self.n @property def n(self): """ Returns the dimension of the Gray code. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(5) >>> a.n 5 """ return self.args[0] def generate_gray(self, **hints): """ Generates the sequence of bit vectors of a Gray Code. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> list(a.generate_gray(start='011')) ['011', '010', '110', '111', '101', '100'] >>> list(a.generate_gray(rank=4)) ['110', '111', '101', '100'] See Also ======== skip References ========== .. [1] Knuth, D. (2011). The Art of Computer Programming, Vol 4, Addison Wesley """ bits = self.n start = None if "start" in hints: start = hints["start"] elif "rank" in hints: start = GrayCode.unrank(self.n, hints["rank"]) if start is not None: self._current = start current = self.current graycode_bin = gray_to_bin(current) if len(graycode_bin) > self.n: raise ValueError('Gray code start has length %i but should ' 'not be greater than %i' % (len(graycode_bin), bits)) self._current = int(current, 2) graycode_int = int(''.join(graycode_bin), 2) for i in range(graycode_int, 1 << bits): if self._skip: self._skip = False else: yield self.current bbtc = (i ^ (i + 1)) gbtc = (bbtc ^ (bbtc >> 1)) self._current = (self._current ^ gbtc) self._current = 0 def skip(self): """ Skips the bit generation. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(3) >>> for i in a.generate_gray(): ... if i == '010': ... a.skip() ... print(i) ... 000 001 011 010 111 101 100 See Also ======== generate_gray """ self._skip = True @property def rank(self): """ Ranks the Gray code. A ranking algorithm determines the position (or rank) of a combinatorial object among all the objects w.r.t. a given order. For example, the 4 bit binary reflected Gray code (BRGC) '0101' has a rank of 6 as it appears in the 6th position in the canonical ordering of the family of 4 bit Gray codes. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> GrayCode(3, start='100').rank 7 >>> GrayCode(3, rank=7).current '100' See Also ======== unrank References ========== .. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html """ if self._rank is None: self._rank = int(gray_to_bin(self.current), 2) return self._rank @property def current(self): """ Returns the currently referenced Gray code as a bit string. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> GrayCode(3, start='100').current '100' """ rv = self._current or '0' if type(rv) is not str: rv = bin(rv)[2:] return rv.rjust(self.n, '0') @classmethod def unrank(self, n, rank): """ Unranks an n-bit sized Gray code of rank k. This method exists so that a derivative GrayCode class can define its own code of a given rank. The string here is generated in reverse order to allow for tail-call optimization. Examples ======== >>> from sympy.combinatorics.graycode import GrayCode >>> GrayCode(5, rank=3).current '00010' >>> GrayCode.unrank(5, 3) '00010' See Also ======== rank """ def _unrank(k, n): if n == 1: return str(k % 2) m = 2**(n - 1) if k < m: return '0' + _unrank(k, n - 1) return '1' + _unrank(m - (k % m) - 1, n - 1) return _unrank(rank, n) def random_bitstring(n): """ Generates a random bitlist of length n. Examples ======== >>> from sympy.combinatorics.graycode import random_bitstring >>> random_bitstring(3) # doctest: +SKIP 100 """ return ''.join([random.choice('01') for i in range(n)]) def gray_to_bin(bin_list): """ Convert from Gray coding to binary coding. We assume big endian encoding. Examples ======== >>> from sympy.combinatorics.graycode import gray_to_bin >>> gray_to_bin('100') '111' See Also ======== bin_to_gray """ b = [bin_list[0]] for i in range(1, len(bin_list)): b += str(int(b[i - 1] != bin_list[i])) return ''.join(b) def bin_to_gray(bin_list): """ Convert from binary coding to gray coding. We assume big endian encoding. Examples ======== >>> from sympy.combinatorics.graycode import bin_to_gray >>> bin_to_gray('111') '100' See Also ======== gray_to_bin """ b = [bin_list[0]] for i in range(1, len(bin_list)): b += str(int(bin_list[i]) ^ int(bin_list[i - 1])) return ''.join(b) def get_subset_from_bitstring(super_set, bitstring): """ Gets the subset defined by the bitstring. Examples ======== >>> from sympy.combinatorics.graycode import get_subset_from_bitstring >>> get_subset_from_bitstring(['a', 'b', 'c', 'd'], '0011') ['c', 'd'] >>> get_subset_from_bitstring(['c', 'a', 'c', 'c'], '1100') ['c', 'a'] See Also ======== graycode_subsets """ if len(super_set) != len(bitstring): raise ValueError("The sizes of the lists are not equal") return [super_set[i] for i, j in enumerate(bitstring) if bitstring[i] == '1'] def graycode_subsets(gray_code_set): """ Generates the subsets as enumerated by a Gray code. Examples ======== >>> from sympy.combinatorics.graycode import graycode_subsets >>> list(graycode_subsets(['a', 'b', 'c'])) [[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], \ ['a', 'c'], ['a']] >>> list(graycode_subsets(['a', 'b', 'c', 'c'])) [[], ['c'], ['c', 'c'], ['c'], ['b', 'c'], ['b', 'c', 'c'], \ ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], ['a', 'b', 'c', 'c'], \ ['a', 'b', 'c'], ['a', 'c'], ['a', 'c', 'c'], ['a', 'c'], ['a']] See Also ======== get_subset_from_bitstring """ for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()): yield get_subset_from_bitstring(gray_code_set, bitstring)
3726d3f27b55f271c42fd89ff5634be6e4fe022f332c2b190df3f3ad47700290
from __future__ import print_function, division import random from collections import defaultdict from sympy.core.parameters import global_parameters from sympy.core.basic import Atom from sympy.core.expr import Expr from sympy.core.compatibility import \ is_sequence, reduce, as_int, Iterable from sympy.core.numbers import Integer from sympy.core.sympify import _sympify from sympy.logic.boolalg import as_Boolean from sympy.matrices import zeros from sympy.polys.polytools import lcm from sympy.utilities.iterables import (flatten, has_variety, minlex, has_dups, runs) from mpmath.libmp.libintmath import ifac def _af_rmul(a, b): """ Return the product b*a; input and output are array forms. The ith value is a[b[i]]. Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> _af_rmul(a, b) [1, 2, 0] >>> [a[b[i]] for i in range(3)] [1, 2, 0] This handles the operands in reverse order compared to the ``*`` operator: >>> a = Permutation(a) >>> b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] See Also ======== rmul, _af_rmuln """ return [a[i] for i in b] def _af_rmuln(*abc): """ Given [a, b, c, ...] return the product of ...*c*b*a using array forms. The ith value is a[b[c[i]]]. Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> _af_rmul(a, b) [1, 2, 0] >>> [a[b[i]] for i in range(3)] [1, 2, 0] This handles the operands in reverse order compared to the ``*`` operator: >>> a = Permutation(a); b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] See Also ======== rmul, _af_rmul """ a = abc m = len(a) if m == 3: p0, p1, p2 = a return [p0[p1[i]] for i in p2] if m == 4: p0, p1, p2, p3 = a return [p0[p1[p2[i]]] for i in p3] if m == 5: p0, p1, p2, p3, p4 = a return [p0[p1[p2[p3[i]]]] for i in p4] if m == 6: p0, p1, p2, p3, p4, p5 = a return [p0[p1[p2[p3[p4[i]]]]] for i in p5] if m == 7: p0, p1, p2, p3, p4, p5, p6 = a return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6] if m == 8: p0, p1, p2, p3, p4, p5, p6, p7 = a return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7] if m == 1: return a[0][:] if m == 2: a, b = a return [a[i] for i in b] if m == 0: raise ValueError("String must not be empty") p0 = _af_rmuln(*a[:m//2]) p1 = _af_rmuln(*a[m//2:]) return [p0[i] for i in p1] def _af_parity(pi): """ Computes the parity of a permutation in array form. The parity of a permutation reflects the parity of the number of inversions in the permutation, i.e., the number of pairs of x and y such that x > y but p[x] < p[y]. Examples ======== >>> from sympy.combinatorics.permutations import _af_parity >>> _af_parity([0, 1, 2, 3]) 0 >>> _af_parity([3, 2, 0, 1]) 1 See Also ======== Permutation """ n = len(pi) a = [0] * n c = 0 for j in range(n): if a[j] == 0: c += 1 a[j] = 1 i = j while pi[i] != j: i = pi[i] a[i] = 1 return (n - c) % 2 def _af_invert(a): """ Finds the inverse, ~A, of a permutation, A, given in array form. Examples ======== >>> from sympy.combinatorics.permutations import _af_invert, _af_rmul >>> A = [1, 2, 0, 3] >>> _af_invert(A) [2, 0, 1, 3] >>> _af_rmul(_, A) [0, 1, 2, 3] See Also ======== Permutation, __invert__ """ inv_form = [0] * len(a) for i, ai in enumerate(a): inv_form[ai] = i return inv_form def _af_pow(a, n): """ Routine for finding powers of a permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation, _af_pow >>> p = Permutation([2, 0, 3, 1]) >>> p.order() 4 >>> _af_pow(p._array_form, 4) [0, 1, 2, 3] """ if n == 0: return list(range(len(a))) if n < 0: return _af_pow(_af_invert(a), -n) if n == 1: return a[:] elif n == 2: b = [a[i] for i in a] elif n == 3: b = [a[a[i]] for i in a] elif n == 4: b = [a[a[a[i]]] for i in a] else: # use binary multiplication b = list(range(len(a))) while 1: if n & 1: b = [b[i] for i in a] n -= 1 if not n: break if n % 4 == 0: a = [a[a[a[i]]] for i in a] n = n // 4 elif n % 2 == 0: a = [a[i] for i in a] n = n // 2 return b def _af_commutes_with(a, b): """ Checks if the two permutations with array forms given by ``a`` and ``b`` commute. Examples ======== >>> from sympy.combinatorics.permutations import _af_commutes_with >>> _af_commutes_with([1, 2, 0], [0, 2, 1]) False See Also ======== Permutation, commutes_with """ return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1)) class Cycle(dict): """ Wrapper around dict which provides the functionality of a disjoint cycle. A cycle shows the rule to use to move subsets of elements to obtain a permutation. The Cycle class is more flexible than Permutation in that 1) all elements need not be present in order to investigate how multiple cycles act in sequence and 2) it can contain singletons: >>> from sympy.combinatorics.permutations import Perm, Cycle A Cycle will automatically parse a cycle given as a tuple on the rhs: >>> Cycle(1, 2)(2, 3) (1 3 2) The identity cycle, Cycle(), can be used to start a product: >>> Cycle()(1, 2)(2, 3) (1 3 2) The array form of a Cycle can be obtained by calling the list method (or passing it to the list function) and all elements from 0 will be shown: >>> a = Cycle(1, 2) >>> a.list() [0, 2, 1] >>> list(a) [0, 2, 1] If a larger (or smaller) range is desired use the list method and provide the desired size -- but the Cycle cannot be truncated to a size smaller than the largest element that is out of place: >>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3) >>> b.list() [0, 2, 1, 3, 4] >>> b.list(b.size + 1) [0, 2, 1, 3, 4, 5] >>> b.list(-1) [0, 2, 1] Singletons are not shown when printing with one exception: the largest element is always shown -- as a singleton if necessary: >>> Cycle(1, 4, 10)(4, 5) (1 5 4 10) >>> Cycle(1, 2)(4)(5)(10) (1 2)(10) The array form can be used to instantiate a Permutation so other properties of the permutation can be investigated: >>> Perm(Cycle(1, 2)(3, 4).list()).transpositions() [(1, 2), (3, 4)] Notes ===== The underlying structure of the Cycle is a dictionary and although the __iter__ method has been redefined to give the array form of the cycle, the underlying dictionary items are still available with the such methods as items(): >>> list(Cycle(1, 2).items()) [(1, 2), (2, 1)] See Also ======== Permutation """ def __missing__(self, arg): """Enter arg into dictionary and return arg.""" return as_int(arg) def __iter__(self): for i in self.list(): yield i def __call__(self, *other): """Return product of cycles processed from R to L. Examples ======== >>> from sympy.combinatorics.permutations import Cycle as C >>> from sympy.combinatorics.permutations import Permutation as Perm >>> C(1, 2)(2, 3) (1 3 2) An instance of a Cycle will automatically parse list-like objects and Permutations that are on the right. It is more flexible than the Permutation in that all elements need not be present: >>> a = C(1, 2) >>> a(2, 3) (1 3 2) >>> a(2, 3)(4, 5) (1 3 2)(4 5) """ rv = Cycle(*other) for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]): rv[k] = v return rv def list(self, size=None): """Return the cycles as an explicit list starting from 0 up to the greater of the largest value in the cycles and size. Truncation of trailing unmoved items will occur when size is less than the maximum element in the cycle; if this is desired, setting ``size=-1`` will guarantee such trimming. Examples ======== >>> from sympy.combinatorics.permutations import Cycle >>> from sympy.combinatorics.permutations import Permutation >>> p = Cycle(2, 3)(4, 5) >>> p.list() [0, 1, 3, 2, 5, 4] >>> p.list(10) [0, 1, 3, 2, 5, 4, 6, 7, 8, 9] Passing a length too small will trim trailing, unchanged elements in the permutation: >>> Cycle(2, 4)(1, 2, 4).list(-1) [0, 2, 1] """ if not self and size is None: raise ValueError('must give size for empty Cycle') if size is not None: big = max([i for i in self.keys() if self[i] != i] + [0]) size = max(size, big + 1) else: size = self.size return [self[i] for i in range(size)] def __repr__(self): """We want it to print as a Cycle, not as a dict. Examples ======== >>> from sympy.combinatorics import Cycle >>> Cycle(1, 2) (1 2) >>> print(_) (1 2) >>> list(Cycle(1, 2).items()) [(1, 2), (2, 1)] """ if not self: return 'Cycle()' cycles = Permutation(self).cyclic_form s = ''.join(str(tuple(c)) for c in cycles) big = self.size - 1 if not any(i == big for c in cycles for i in c): s += '(%s)' % big return 'Cycle%s' % s def __str__(self): """We want it to be printed in a Cycle notation with no comma in-between. Examples ======== >>> from sympy.combinatorics import Cycle >>> Cycle(1, 2) (1 2) >>> Cycle(1, 2, 4)(5, 6) (1 2 4)(5 6) """ if not self: return '()' cycles = Permutation(self).cyclic_form s = ''.join(str(tuple(c)) for c in cycles) big = self.size - 1 if not any(i == big for c in cycles for i in c): s += '(%s)' % big s = s.replace(',', '') return s def __init__(self, *args): """Load up a Cycle instance with the values for the cycle. Examples ======== >>> from sympy.combinatorics.permutations import Cycle >>> Cycle(1, 2, 6) (1 2 6) """ if not args: return if len(args) == 1: if isinstance(args[0], Permutation): for c in args[0].cyclic_form: self.update(self(*c)) return elif isinstance(args[0], Cycle): for k, v in args[0].items(): self[k] = v return args = [as_int(a) for a in args] if any(i < 0 for i in args): raise ValueError('negative integers are not allowed in a cycle.') if has_dups(args): raise ValueError('All elements must be unique in a cycle.') for i in range(-len(args), 0): self[args[i]] = args[i + 1] @property def size(self): if not self: return 0 return max(self.keys()) + 1 def copy(self): return Cycle(self) class Permutation(Atom): """ A permutation, alternatively known as an 'arrangement number' or 'ordering' is an arrangement of the elements of an ordered list into a one-to-one mapping with itself. The permutation of a given arrangement is given by indicating the positions of the elements after re-arrangement [2]_. For example, if one started with elements [x, y, a, b] (in that order) and they were reordered as [x, y, b, a] then the permutation would be [0, 1, 3, 2]. Notice that (in SymPy) the first element is always referred to as 0 and the permutation uses the indices of the elements in the original ordering, not the elements (a, b, etc...) themselves. >>> from sympy.combinatorics import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) Permutations Notation ===================== Permutations are commonly represented in disjoint cycle or array forms. Array Notation and 2-line Form ------------------------------------ In the 2-line form, the elements and their final positions are shown as a matrix with 2 rows: [0 1 2 ... n-1] [p(0) p(1) p(2) ... p(n-1)] Since the first line is always range(n), where n is the size of p, it is sufficient to represent the permutation by the second line, referred to as the "array form" of the permutation. This is entered in brackets as the argument to the Permutation class: >>> p = Permutation([0, 2, 1]); p Permutation([0, 2, 1]) Given i in range(p.size), the permutation maps i to i^p >>> [i^p for i in range(p.size)] [0, 2, 1] The composite of two permutations p*q means first apply p, then q, so i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules: >>> q = Permutation([2, 1, 0]) >>> [i^p^q for i in range(3)] [2, 0, 1] >>> [i^(p*q) for i in range(3)] [2, 0, 1] One can use also the notation p(i) = i^p, but then the composition rule is (p*q)(i) = q(p(i)), not p(q(i)): >>> [(p*q)(i) for i in range(p.size)] [2, 0, 1] >>> [q(p(i)) for i in range(p.size)] [2, 0, 1] >>> [p(q(i)) for i in range(p.size)] [1, 2, 0] Disjoint Cycle Notation ----------------------- In disjoint cycle notation, only the elements that have shifted are indicated. In the above case, the 2 and 1 switched places. This can be entered in two ways: >>> Permutation(1, 2) == Permutation([[1, 2]]) == p True Only the relative ordering of elements in a cycle matter: >>> Permutation(1,2,3) == Permutation(2,3,1) == Permutation(3,1,2) True The disjoint cycle notation is convenient when representing permutations that have several cycles in them: >>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]]) True It also provides some economy in entry when computing products of permutations that are written in disjoint cycle notation: >>> Permutation(1, 2)(1, 3)(2, 3) Permutation([0, 3, 2, 1]) >>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]]) True Caution: when the cycles have common elements between them then the order in which the permutations are applied matters. The convention is that the permutations are applied from *right to left*. In the following, the transposition of elements 2 and 3 is followed by the transposition of elements 1 and 2: >>> Permutation(1, 2)(2, 3) == Permutation([(1, 2), (2, 3)]) True >>> Permutation(1, 2)(2, 3).list() [0, 3, 1, 2] If the first and second elements had been swapped first, followed by the swapping of the second and third, the result would have been [0, 2, 3, 1]. If, for some reason, you want to apply the cycles in the order they are entered, you can simply reverse the order of cycles: >>> Permutation([(1, 2), (2, 3)][::-1]).list() [0, 2, 3, 1] Entering a singleton in a permutation is a way to indicate the size of the permutation. The ``size`` keyword can also be used. Array-form entry: >>> Permutation([[1, 2], [9]]) Permutation([0, 2, 1], size=10) >>> Permutation([[1, 2]], size=10) Permutation([0, 2, 1], size=10) Cyclic-form entry: >>> Permutation(1, 2, size=10) Permutation([0, 2, 1], size=10) >>> Permutation(9)(1, 2) Permutation([0, 2, 1], size=10) Caution: no singleton containing an element larger than the largest in any previous cycle can be entered. This is an important difference in how Permutation and Cycle handle the __call__ syntax. A singleton argument at the start of a Permutation performs instantiation of the Permutation and is permitted: >>> Permutation(5) Permutation([], size=6) A singleton entered after instantiation is a call to the permutation -- a function call -- and if the argument is out of range it will trigger an error. For this reason, it is better to start the cycle with the singleton: The following fails because there is is no element 3: >>> Permutation(1, 2)(3) Traceback (most recent call last): ... IndexError: list index out of range This is ok: only the call to an out of range singleton is prohibited; otherwise the permutation autosizes: >>> Permutation(3)(1, 2) Permutation([0, 2, 1, 3]) >>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2) True Equality testing ---------------- The array forms must be the same in order for permutations to be equal: >>> Permutation([1, 0, 2, 3]) == Permutation([1, 0]) False Identity Permutation -------------------- The identity permutation is a permutation in which no element is out of place. It can be entered in a variety of ways. All the following create an identity permutation of size 4: >>> I = Permutation([0, 1, 2, 3]) >>> all(p == I for p in [ ... Permutation(3), ... Permutation(range(4)), ... Permutation([], size=4), ... Permutation(size=4)]) True Watch out for entering the range *inside* a set of brackets (which is cycle notation): >>> I == Permutation([range(4)]) False Permutation Printing ==================== There are a few things to note about how Permutations are printed. 1) If you prefer one form (array or cycle) over another, you can set ``init_printing`` with the ``perm_cyclic`` flag. >>> from sympy import init_printing >>> p = Permutation(1, 2)(4, 5)(3, 4) >>> p Permutation([0, 2, 1, 4, 5, 3]) >>> init_printing(perm_cyclic=True, pretty_print=False) >>> p (1 2)(3 4 5) 2) Regardless of the setting, a list of elements in the array for cyclic form can be obtained and either of those can be copied and supplied as the argument to Permutation: >>> p.array_form [0, 2, 1, 4, 5, 3] >>> p.cyclic_form [[1, 2], [3, 4, 5]] >>> Permutation(_) == p True 3) Printing is economical in that as little as possible is printed while retaining all information about the size of the permutation: >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation([1, 0, 2, 3]) Permutation([1, 0, 2, 3]) >>> Permutation([1, 0, 2, 3], size=20) Permutation([1, 0], size=20) >>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20) Permutation([1, 0, 2, 4, 3], size=20) >>> p = Permutation([1, 0, 2, 3]) >>> init_printing(perm_cyclic=True, pretty_print=False) >>> p (3)(0 1) >>> init_printing(perm_cyclic=False, pretty_print=False) The 2 was not printed but it is still there as can be seen with the array_form and size methods: >>> p.array_form [1, 0, 2, 3] >>> p.size 4 Short introduction to other methods =================================== The permutation can act as a bijective function, telling what element is located at a given position >>> q = Permutation([5, 2, 3, 4, 1, 0]) >>> q.array_form[1] # the hard way 2 >>> q(1) # the easy way 2 >>> {i: q(i) for i in range(q.size)} # showing the bijection {0: 5, 1: 2, 2: 3, 3: 4, 4: 1, 5: 0} The full cyclic form (including singletons) can be obtained: >>> p.full_cyclic_form [[0, 1], [2], [3]] Any permutation can be factored into transpositions of pairs of elements: >>> Permutation([[1, 2], [3, 4, 5]]).transpositions() [(1, 2), (3, 5), (3, 4)] >>> Permutation.rmul(*[Permutation([ti], size=6) for ti in _]).cyclic_form [[1, 2], [3, 4, 5]] The number of permutations on a set of n elements is given by n! and is called the cardinality. >>> p.size 4 >>> p.cardinality 24 A given permutation has a rank among all the possible permutations of the same elements, but what that rank is depends on how the permutations are enumerated. (There are a number of different methods of doing so.) The lexicographic rank is given by the rank method and this rank is used to increment a permutation with addition/subtraction: >>> p.rank() 6 >>> p + 1 Permutation([1, 0, 3, 2]) >>> p.next_lex() Permutation([1, 0, 3, 2]) >>> _.rank() 7 >>> p.unrank_lex(p.size, rank=7) Permutation([1, 0, 3, 2]) The product of two permutations p and q is defined as their composition as functions, (p*q)(i) = q(p(i)) [6]_. >>> p = Permutation([1, 0, 2, 3]) >>> q = Permutation([2, 3, 1, 0]) >>> list(q*p) [2, 3, 0, 1] >>> list(p*q) [3, 2, 1, 0] >>> [q(p(i)) for i in range(p.size)] [3, 2, 1, 0] The permutation can be 'applied' to any list-like object, not only Permutations: >>> p(['zero', 'one', 'four', 'two']) ['one', 'zero', 'four', 'two'] >>> p('zo42') ['o', 'z', '4', '2'] If you have a list of arbitrary elements, the corresponding permutation can be found with the from_sequence method: >>> Permutation.from_sequence('SymPy') Permutation([1, 3, 2, 0, 4]) See Also ======== Cycle References ========== .. [1] Skiena, S. 'Permutations.' 1.1 in Implementing Discrete Mathematics Combinatorics and Graph Theory with Mathematica. Reading, MA: Addison-Wesley, pp. 3-16, 1990. .. [2] Knuth, D. E. The Art of Computer Programming, Vol. 4: Combinatorial Algorithms, 1st ed. Reading, MA: Addison-Wesley, 2011. .. [3] Wendy Myrvold and Frank Ruskey. 2001. Ranking and unranking permutations in linear time. Inf. Process. Lett. 79, 6 (September 2001), 281-284. DOI=10.1016/S0020-0190(01)00141-7 .. [4] D. L. Kreher, D. R. Stinson 'Combinatorial Algorithms' CRC Press, 1999 .. [5] Graham, R. L.; Knuth, D. E.; and Patashnik, O. Concrete Mathematics: A Foundation for Computer Science, 2nd ed. Reading, MA: Addison-Wesley, 1994. .. [6] https://en.wikipedia.org/wiki/Permutation#Product_and_inverse .. [7] https://en.wikipedia.org/wiki/Lehmer_code """ is_Permutation = True _array_form = None _cyclic_form = None _cycle_structure = None _size = None _rank = None def __new__(cls, *args, **kwargs): """ Constructor for the Permutation object from a list or a list of lists in which all elements of the permutation may appear only once. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) Permutations entered in array-form are left unaltered: >>> Permutation([0, 2, 1]) Permutation([0, 2, 1]) Permutations entered in cyclic form are converted to array form; singletons need not be entered, but can be entered to indicate the largest element: >>> Permutation([[4, 5, 6], [0, 1]]) Permutation([1, 0, 2, 3, 5, 6, 4]) >>> Permutation([[4, 5, 6], [0, 1], [19]]) Permutation([1, 0, 2, 3, 5, 6, 4], size=20) All manipulation of permutations assumes that the smallest element is 0 (in keeping with 0-based indexing in Python) so if the 0 is missing when entering a permutation in array form, an error will be raised: >>> Permutation([2, 1]) Traceback (most recent call last): ... ValueError: Integers 0 through 2 must be present. If a permutation is entered in cyclic form, it can be entered without singletons and the ``size`` specified so those values can be filled in, otherwise the array form will only extend to the maximum value in the cycles: >>> Permutation([[1, 4], [3, 5, 2]], size=10) Permutation([0, 4, 3, 5, 1, 2], size=10) >>> _.array_form [0, 4, 3, 5, 1, 2, 6, 7, 8, 9] """ size = kwargs.pop('size', None) if size is not None: size = int(size) #a) () #b) (1) = identity #c) (1, 2) = cycle #d) ([1, 2, 3]) = array form #e) ([[1, 2]]) = cyclic form #f) (Cycle) = conversion to permutation #g) (Permutation) = adjust size or return copy ok = True if not args: # a return cls._af_new(list(range(size or 0))) elif len(args) > 1: # c return cls._af_new(Cycle(*args).list(size)) if len(args) == 1: a = args[0] if isinstance(a, cls): # g if size is None or size == a.size: return a return cls(a.array_form, size=size) if isinstance(a, Cycle): # f return cls._af_new(a.list(size)) if not is_sequence(a): # b if size is not None and a + 1 > size: raise ValueError('size is too small when max is %s' % a) return cls._af_new(list(range(a + 1))) if has_variety(is_sequence(ai) for ai in a): ok = False else: ok = False if not ok: raise ValueError("Permutation argument must be a list of ints, " "a list of lists, Permutation or Cycle.") # safe to assume args are valid; this also makes a copy # of the args args = list(args[0]) is_cycle = args and is_sequence(args[0]) if is_cycle: # e args = [[int(i) for i in c] for c in args] else: # d args = [int(i) for i in args] # if there are n elements present, 0, 1, ..., n-1 should be present # unless a cycle notation has been provided. A 0 will be added # for convenience in case one wants to enter permutations where # counting starts from 1. temp = flatten(args) if has_dups(temp) and not is_cycle: raise ValueError('there were repeated elements.') temp = set(temp) if not is_cycle: if any(i not in temp for i in range(len(temp))): raise ValueError('Integers 0 through %s must be present.' % max(temp)) if size is not None and temp and max(temp) + 1 > size: raise ValueError('max element should not exceed %s' % (size - 1)) if is_cycle: # it's not necessarily canonical so we won't store # it -- use the array form instead c = Cycle() for ci in args: c = c(*ci) aform = c.list() else: aform = list(args) if size and size > len(aform): # don't allow for truncation of permutation which # might split a cycle and lead to an invalid aform # but do allow the permutation size to be increased aform.extend(list(range(len(aform), size))) return cls._af_new(aform) def _eval_Eq(self, other): other = _sympify(other) if not isinstance(other, Permutation): return None if self._size != other._size: return None return as_Boolean(self._array_form == other._array_form) @classmethod def _af_new(cls, perm): """A method to produce a Permutation object from a list; the list is bound to the _array_form attribute, so it must not be modified; this method is meant for internal use only; the list ``a`` is supposed to be generated as a temporary value in a method, so p = Perm._af_new(a) is the only object to hold a reference to ``a``:: Examples ======== >>> from sympy.combinatorics.permutations import Perm >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> a = [2, 1, 3, 0] >>> p = Perm._af_new(a) >>> p Permutation([2, 1, 3, 0]) """ p = super(Permutation, cls).__new__(cls) p._array_form = perm p._size = len(perm) return p def _hashable_content(self): # the array_form (a list) is the Permutation arg, so we need to # return a tuple, instead return tuple(self.array_form) @property def array_form(self): """ Return a copy of the attribute _array_form Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[2, 0], [3, 1]]) >>> p.array_form [2, 3, 0, 1] >>> Permutation([[2, 0, 3, 1]]).array_form [3, 2, 0, 1] >>> Permutation([2, 0, 3, 1]).array_form [2, 0, 3, 1] >>> Permutation([[1, 2], [4, 5]]).array_form [0, 2, 1, 3, 5, 4] """ return self._array_form[:] def list(self, size=None): """Return the permutation as an explicit list, possibly trimming unmoved elements if size is less than the maximum element in the permutation; if this is desired, setting ``size=-1`` will guarantee such trimming. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation(2, 3)(4, 5) >>> p.list() [0, 1, 3, 2, 5, 4] >>> p.list(10) [0, 1, 3, 2, 5, 4, 6, 7, 8, 9] Passing a length too small will trim trailing, unchanged elements in the permutation: >>> Permutation(2, 4)(1, 2, 4).list(-1) [0, 2, 1] >>> Permutation(3).list(-1) [] """ if not self and size is None: raise ValueError('must give size for empty Cycle') rv = self.array_form if size is not None: if size > self.size: rv.extend(list(range(self.size, size))) else: # find first value from rhs where rv[i] != i i = self.size - 1 while rv: if rv[-1] != i: break rv.pop() i -= 1 return rv @property def cyclic_form(self): """ This is used to convert to the cyclic notation from the canonical notation. Singletons are omitted. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 3, 1, 2]) >>> p.cyclic_form [[1, 3, 2]] >>> Permutation([1, 0, 2, 4, 3, 5]).cyclic_form [[0, 1], [3, 4]] See Also ======== array_form, full_cyclic_form """ if self._cyclic_form is not None: return list(self._cyclic_form) array_form = self.array_form unchecked = [True] * len(array_form) cyclic_form = [] for i in range(len(array_form)): if unchecked[i]: cycle = [] cycle.append(i) unchecked[i] = False j = i while unchecked[array_form[j]]: j = array_form[j] cycle.append(j) unchecked[j] = False if len(cycle) > 1: cyclic_form.append(cycle) assert cycle == list(minlex(cycle, is_set=True)) cyclic_form.sort() self._cyclic_form = cyclic_form[:] return cyclic_form @property def full_cyclic_form(self): """Return permutation in cyclic form including singletons. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> Permutation([0, 2, 1]).full_cyclic_form [[0], [1, 2]] """ need = set(range(self.size)) - set(flatten(self.cyclic_form)) rv = self.cyclic_form rv.extend([[i] for i in need]) rv.sort() return rv @property def size(self): """ Returns the number of elements in the permutation. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([[3, 2], [0, 1]]).size 4 See Also ======== cardinality, length, order, rank """ return self._size def support(self): """Return the elements in permutation, P, for which P[i] != i. Examples ======== >>> from sympy.combinatorics import Permutation >>> p = Permutation([[3, 2], [0, 1], [4]]) >>> p.array_form [1, 0, 3, 2, 4] >>> p.support() [0, 1, 2, 3] """ a = self.array_form return [i for i, e in enumerate(a) if a[i] != i] def __add__(self, other): """Return permutation that is other higher in rank than self. The rank is the lexicographical rank, with the identity permutation having rank of 0. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> I = Permutation([0, 1, 2, 3]) >>> a = Permutation([2, 1, 3, 0]) >>> I + a.rank() == a True See Also ======== __sub__, inversion_vector """ rank = (self.rank() + other) % self.cardinality rv = self.unrank_lex(self.size, rank) rv._rank = rank return rv def __sub__(self, other): """Return the permutation that is other lower in rank than self. See Also ======== __add__ """ return self.__add__(-other) @staticmethod def rmul(*args): """ Return product of Permutations [a, b, c, ...] as the Permutation whose ith value is a(b(c(i))). a, b, c, ... can be Permutation objects or tuples. Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> a = Permutation(a); b = Permutation(b) >>> list(Permutation.rmul(a, b)) [1, 2, 0] >>> [a(b(i)) for i in range(3)] [1, 2, 0] This handles the operands in reverse order compared to the ``*`` operator: >>> a = Permutation(a); b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] Notes ===== All items in the sequence will be parsed by Permutation as necessary as long as the first item is a Permutation: >>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b) True The reverse order of arguments will raise a TypeError. """ rv = args[0] for i in range(1, len(args)): rv = args[i]*rv return rv @classmethod def rmul_with_af(cls, *args): """ same as rmul, but the elements of args are Permutation objects which have _array_form """ a = [x._array_form for x in args] rv = cls._af_new(_af_rmuln(*a)) return rv def mul_inv(self, other): """ other*~self, self and other have _array_form """ a = _af_invert(self._array_form) b = other._array_form return self._af_new(_af_rmul(a, b)) def __rmul__(self, other): """This is needed to coerce other to Permutation in rmul.""" cls = type(self) return cls(other)*self def __mul__(self, other): """ Return the product a*b as a Permutation; the ith value is b(a(i)). Examples ======== >>> from sympy.combinatorics.permutations import _af_rmul, Permutation >>> a, b = [1, 0, 2], [0, 2, 1] >>> a = Permutation(a); b = Permutation(b) >>> list(a*b) [2, 0, 1] >>> [b(a(i)) for i in range(3)] [2, 0, 1] This handles operands in reverse order compared to _af_rmul and rmul: >>> al = list(a); bl = list(b) >>> _af_rmul(al, bl) [1, 2, 0] >>> [al[bl[i]] for i in range(3)] [1, 2, 0] It is acceptable for the arrays to have different lengths; the shorter one will be padded to match the longer one: >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> b*Permutation([1, 0]) Permutation([1, 2, 0]) >>> Permutation([1, 0])*b Permutation([2, 0, 1]) It is also acceptable to allow coercion to handle conversion of a single list to the left of a Permutation: >>> [0, 1]*a # no change: 2-element identity Permutation([1, 0, 2]) >>> [[0, 1]]*a # exchange first two elements Permutation([0, 1, 2]) You cannot use more than 1 cycle notation in a product of cycles since coercion can only handle one argument to the left. To handle multiple cycles it is convenient to use Cycle instead of Permutation: >>> [[1, 2]]*[[2, 3]]*Permutation([]) # doctest: +SKIP >>> from sympy.combinatorics.permutations import Cycle >>> Cycle(1, 2)(2, 3) (1 3 2) """ a = self.array_form # __rmul__ makes sure the other is a Permutation b = other.array_form if not b: perm = a else: b.extend(list(range(len(b), len(a)))) perm = [b[i] for i in a] + b[len(a):] return self._af_new(perm) def commutes_with(self, other): """ Checks if the elements are commuting. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> a = Permutation([1, 4, 3, 0, 2, 5]) >>> b = Permutation([0, 1, 2, 3, 4, 5]) >>> a.commutes_with(b) True >>> b = Permutation([2, 3, 5, 4, 1, 0]) >>> a.commutes_with(b) False """ a = self.array_form b = other.array_form return _af_commutes_with(a, b) def __pow__(self, n): """ Routine for finding powers of a permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([2, 0, 3, 1]) >>> p.order() 4 >>> p**4 Permutation([0, 1, 2, 3]) """ if isinstance(n, Permutation): raise NotImplementedError( 'p**p is not defined; do you mean p^p (conjugate)?') n = int(n) return self._af_new(_af_pow(self.array_form, n)) def __rxor__(self, i): """Return self(i) when ``i`` is an int. Examples ======== >>> from sympy.combinatorics import Permutation >>> p = Permutation(1, 2, 9) >>> 2^p == p(2) == 9 True """ if int(i) == i: return self(i) else: raise NotImplementedError( "i^p = p(i) when i is an integer, not %s." % i) def __xor__(self, h): """Return the conjugate permutation ``~h*self*h` `. If ``a`` and ``b`` are conjugates, ``a = h*b*~h`` and ``b = ~h*a*h`` and both have the same cycle structure. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation(1, 2, 9) >>> q = Permutation(6, 9, 8) >>> p*q != q*p True Calculate and check properties of the conjugate: >>> c = p^q >>> c == ~q*p*q and p == q*c*~q True The expression q^p^r is equivalent to q^(p*r): >>> r = Permutation(9)(4, 6, 8) >>> q^p^r == q^(p*r) True If the term to the left of the conjugate operator, i, is an integer then this is interpreted as selecting the ith element from the permutation to the right: >>> all(i^p == p(i) for i in range(p.size)) True Note that the * operator as higher precedence than the ^ operator: >>> q^r*p^r == q^(r*p)^r == Permutation(9)(1, 6, 4) True Notes ===== In Python the precedence rule is p^q^r = (p^q)^r which differs in general from p^(q^r) >>> q^p^r (9)(1 4 8) >>> q^(p^r) (9)(1 8 6) For a given r and p, both of the following are conjugates of p: ~r*p*r and r*p*~r. But these are not necessarily the same: >>> ~r*p*r == r*p*~r True >>> p = Permutation(1, 2, 9)(5, 6) >>> ~r*p*r == r*p*~r False The conjugate ~r*p*r was chosen so that ``p^q^r`` would be equivalent to ``p^(q*r)`` rather than ``p^(r*q)``. To obtain r*p*~r, pass ~r to this method: >>> p^~r == r*p*~r True """ if self.size != h.size: raise ValueError("The permutations must be of equal size.") a = [None]*self.size h = h._array_form p = self._array_form for i in range(self.size): a[h[i]] = h[p[i]] return self._af_new(a) def transpositions(self): """ Return the permutation decomposed into a list of transpositions. It is always possible to express a permutation as the product of transpositions, see [1] Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[1, 2, 3], [0, 4, 5, 6, 7]]) >>> t = p.transpositions() >>> t [(0, 7), (0, 6), (0, 5), (0, 4), (1, 3), (1, 2)] >>> print(''.join(str(c) for c in t)) (0, 7)(0, 6)(0, 5)(0, 4)(1, 3)(1, 2) >>> Permutation.rmul(*[Permutation([ti], size=p.size) for ti in t]) == p True References ========== .. [1] https://en.wikipedia.org/wiki/Transposition_%28mathematics%29#Properties """ a = self.cyclic_form res = [] for x in a: nx = len(x) if nx == 2: res.append(tuple(x)) elif nx > 2: first = x[0] for y in x[nx - 1:0:-1]: res.append((first, y)) return res @classmethod def from_sequence(self, i, key=None): """Return the permutation needed to obtain ``i`` from the sorted elements of ``i``. If custom sorting is desired, a key can be given. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation.from_sequence('SymPy') (4)(0 1 3) >>> _(sorted("SymPy")) ['S', 'y', 'm', 'P', 'y'] >>> Permutation.from_sequence('SymPy', key=lambda x: x.lower()) (4)(0 2)(1 3) """ ic = list(zip(i, list(range(len(i))))) if key: ic.sort(key=lambda x: key(x[0])) else: ic.sort() return ~Permutation([i[1] for i in ic]) def __invert__(self): """ Return the inverse of the permutation. A permutation multiplied by its inverse is the identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([[2, 0], [3, 1]]) >>> ~p Permutation([2, 3, 0, 1]) >>> _ == p**-1 True >>> p*~p == ~p*p == Permutation([0, 1, 2, 3]) True """ return self._af_new(_af_invert(self._array_form)) def __iter__(self): """Yield elements from array form. Examples ======== >>> from sympy.combinatorics import Permutation >>> list(Permutation(range(3))) [0, 1, 2] """ for i in self.array_form: yield i def __repr__(self): from sympy.printing.repr import srepr return srepr(self) def __call__(self, *i): """ Allows applying a permutation instance as a bijective function. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([[2, 0], [3, 1]]) >>> p.array_form [2, 3, 0, 1] >>> [p(i) for i in range(4)] [2, 3, 0, 1] If an array is given then the permutation selects the items from the array (i.e. the permutation is applied to the array): >>> from sympy.abc import x >>> p([x, 1, 0, x**2]) [0, x**2, x, 1] """ # list indices can be Integer or int; leave this # as it is (don't test or convert it) because this # gets called a lot and should be fast if len(i) == 1: i = i[0] if not isinstance(i, Iterable): i = as_int(i) if i < 0 or i > self.size: raise TypeError( "{} should be an integer between 0 and {}" .format(i, self.size-1)) return self._array_form[i] # P([a, b, c]) if len(i) != self.size: raise TypeError( "{} should have the length {}.".format(i, self.size)) return [i[j] for j in self._array_form] # P(1, 2, 3) return self*Permutation(Cycle(*i), size=self.size) def atoms(self): """ Returns all the elements of a permutation Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0, 1, 2, 3, 4, 5]).atoms() {0, 1, 2, 3, 4, 5} >>> Permutation([[0, 1], [2, 3], [4, 5]]).atoms() {0, 1, 2, 3, 4, 5} """ return set(self.array_form) def apply(self, i): r"""Apply the permutation to an expression. Parameters ========== i : Expr It should be an integer between $0$ and $n-1$ where $n$ is the size of the permutation. If it is a symbol or a symbolic expression that can have integer values, an ``AppliedPermutation`` object will be returned which can represent an unevaluated function. Notes ===== Any permutation can be defined as a bijective function $\sigma : \{ 0, 1, ..., n-1 \} \rightarrow \{ 0, 1, ..., n-1 \}$ where $n$ denotes the size of the permutation. The definition may even be extended for any set with distinctive elements, such that the permutation can even be applied for real numbers or such, however, it is not implemented for now for computational reasons and the integrity with the group theory module. This function is similar to the ``__call__`` magic, however, ``__call__`` magic already has some other applications like permuting an array or attatching new cycles, which would not always be mathematically consistent. This also guarantees that the return type is a SymPy integer, which guarantees the safety to use assumptions. """ i = _sympify(i) if i.is_integer is False: raise NotImplementedError("{} should be an integer.".format(i)) n = self.size if (i < 0) == True or (i >= n) == True: raise NotImplementedError( "{} should be an integer between 0 and {}".format(i, n-1)) if i.is_Integer: return Integer(self._array_form[i]) return AppliedPermutation(self, i) def next_lex(self): """ Returns the next permutation in lexicographical order. If self is the last permutation in lexicographical order it returns None. See [4] section 2.4. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([2, 3, 1, 0]) >>> p = Permutation([2, 3, 1, 0]); p.rank() 17 >>> p = p.next_lex(); p.rank() 18 See Also ======== rank, unrank_lex """ perm = self.array_form[:] n = len(perm) i = n - 2 while perm[i + 1] < perm[i]: i -= 1 if i == -1: return None else: j = n - 1 while perm[j] < perm[i]: j -= 1 perm[j], perm[i] = perm[i], perm[j] i += 1 j = n - 1 while i < j: perm[j], perm[i] = perm[i], perm[j] i += 1 j -= 1 return self._af_new(perm) @classmethod def unrank_nonlex(self, n, r): """ This is a linear time unranking algorithm that does not respect lexicographic order [3]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation.unrank_nonlex(4, 5) Permutation([2, 0, 3, 1]) >>> Permutation.unrank_nonlex(4, -1) Permutation([0, 1, 2, 3]) See Also ======== next_nonlex, rank_nonlex """ def _unrank1(n, r, a): if n > 0: a[n - 1], a[r % n] = a[r % n], a[n - 1] _unrank1(n - 1, r//n, a) id_perm = list(range(n)) n = int(n) r = r % ifac(n) _unrank1(n, r, id_perm) return self._af_new(id_perm) def rank_nonlex(self, inv_perm=None): """ This is a linear time ranking algorithm that does not enforce lexicographic order [3]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.rank_nonlex() 23 See Also ======== next_nonlex, unrank_nonlex """ def _rank1(n, perm, inv_perm): if n == 1: return 0 s = perm[n - 1] t = inv_perm[n - 1] perm[n - 1], perm[t] = perm[t], s inv_perm[n - 1], inv_perm[s] = inv_perm[s], t return s + n*_rank1(n - 1, perm, inv_perm) if inv_perm is None: inv_perm = (~self).array_form if not inv_perm: return 0 perm = self.array_form[:] r = _rank1(len(perm), perm, inv_perm) return r def next_nonlex(self): """ Returns the next permutation in nonlex order [3]. If self is the last permutation in this order it returns None. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([2, 0, 3, 1]); p.rank_nonlex() 5 >>> p = p.next_nonlex(); p Permutation([3, 0, 1, 2]) >>> p.rank_nonlex() 6 See Also ======== rank_nonlex, unrank_nonlex """ r = self.rank_nonlex() if r == ifac(self.size) - 1: return None return self.unrank_nonlex(self.size, r + 1) def rank(self): """ Returns the lexicographic rank of the permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.rank() 0 >>> p = Permutation([3, 2, 1, 0]) >>> p.rank() 23 See Also ======== next_lex, unrank_lex, cardinality, length, order, size """ if not self._rank is None: return self._rank rank = 0 rho = self.array_form[:] n = self.size - 1 size = n + 1 psize = int(ifac(n)) for j in range(size - 1): rank += rho[j]*psize for i in range(j + 1, size): if rho[i] > rho[j]: rho[i] -= 1 psize //= n n -= 1 self._rank = rank return rank @property def cardinality(self): """ Returns the number of all possible permutations. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.cardinality 24 See Also ======== length, order, rank, size """ return int(ifac(self.size)) def parity(self): """ Computes the parity of a permutation. The parity of a permutation reflects the parity of the number of inversions in the permutation, i.e., the number of pairs of x and y such that ``x > y`` but ``p[x] < p[y]``. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.parity() 0 >>> p = Permutation([3, 2, 0, 1]) >>> p.parity() 1 See Also ======== _af_parity """ if self._cyclic_form is not None: return (self.size - self.cycles) % 2 return _af_parity(self.array_form) @property def is_even(self): """ Checks if a permutation is even. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.is_even True >>> p = Permutation([3, 2, 1, 0]) >>> p.is_even True See Also ======== is_odd """ return not self.is_odd @property def is_odd(self): """ Checks if a permutation is odd. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.is_odd False >>> p = Permutation([3, 2, 0, 1]) >>> p.is_odd True See Also ======== is_even """ return bool(self.parity() % 2) @property def is_Singleton(self): """ Checks to see if the permutation contains only one number and is thus the only possible permutation of this set of numbers Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0]).is_Singleton True >>> Permutation([0, 1]).is_Singleton False See Also ======== is_Empty """ return self.size == 1 @property def is_Empty(self): """ Checks to see if the permutation is a set with zero elements Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([]).is_Empty True >>> Permutation([0]).is_Empty False See Also ======== is_Singleton """ return self.size == 0 @property def is_identity(self): return self.is_Identity @property def is_Identity(self): """ Returns True if the Permutation is an identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([]) >>> p.is_Identity True >>> p = Permutation([[0], [1], [2]]) >>> p.is_Identity True >>> p = Permutation([0, 1, 2]) >>> p.is_Identity True >>> p = Permutation([0, 2, 1]) >>> p.is_Identity False See Also ======== order """ af = self.array_form return not af or all(i == af[i] for i in range(self.size)) def ascents(self): """ Returns the positions of ascents in a permutation, ie, the location where p[i] < p[i+1] Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([4, 0, 1, 3, 2]) >>> p.ascents() [1, 2] See Also ======== descents, inversions, min, max """ a = self.array_form pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]] return pos def descents(self): """ Returns the positions of descents in a permutation, ie, the location where p[i] > p[i+1] Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([4, 0, 1, 3, 2]) >>> p.descents() [0, 3] See Also ======== ascents, inversions, min, max """ a = self.array_form pos = [i for i in range(len(a) - 1) if a[i] > a[i + 1]] return pos def max(self): """ The maximum element moved by the permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([1, 0, 2, 3, 4]) >>> p.max() 1 See Also ======== min, descents, ascents, inversions """ max = 0 a = self.array_form for i in range(len(a)): if a[i] != i and a[i] > max: max = a[i] return max def min(self): """ The minimum element moved by the permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 4, 3, 2]) >>> p.min() 2 See Also ======== max, descents, ascents, inversions """ a = self.array_form min = len(a) for i in range(len(a)): if a[i] != i and a[i] < min: min = a[i] return min def inversions(self): """ Computes the number of inversions of a permutation. An inversion is where i > j but p[i] < p[j]. For small length of p, it iterates over all i and j values and calculates the number of inversions. For large length of p, it uses a variation of merge sort to calculate the number of inversions. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3, 4, 5]) >>> p.inversions() 0 >>> Permutation([3, 2, 1, 0]).inversions() 6 See Also ======== descents, ascents, min, max References ========== .. [1] http://www.cp.eng.chula.ac.th/~piak/teaching/algo/algo2008/count-inv.htm """ inversions = 0 a = self.array_form n = len(a) if n < 130: for i in range(n - 1): b = a[i] for c in a[i + 1:]: if b > c: inversions += 1 else: k = 1 right = 0 arr = a[:] temp = a[:] while k < n: i = 0 while i + k < n: right = i + k * 2 - 1 if right >= n: right = n - 1 inversions += _merge(arr, temp, i, i + k, right) i = i + k * 2 k = k * 2 return inversions def commutator(self, x): """Return the commutator of self and x: ``~x*~self*x*self`` If f and g are part of a group, G, then the commutator of f and g is the group identity iff f and g commute, i.e. fg == gf. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([0, 2, 3, 1]) >>> x = Permutation([2, 0, 3, 1]) >>> c = p.commutator(x); c Permutation([2, 1, 3, 0]) >>> c == ~x*~p*x*p True >>> I = Permutation(3) >>> p = [I + i for i in range(6)] >>> for i in range(len(p)): ... for j in range(len(p)): ... c = p[i].commutator(p[j]) ... if p[i]*p[j] == p[j]*p[i]: ... assert c == I ... else: ... assert c != I ... References ========== https://en.wikipedia.org/wiki/Commutator """ a = self.array_form b = x.array_form n = len(a) if len(b) != n: raise ValueError("The permutations must be of equal size.") inva = [None]*n for i in range(n): inva[a[i]] = i invb = [None]*n for i in range(n): invb[b[i]] = i return self._af_new([a[b[inva[i]]] for i in invb]) def signature(self): """ Gives the signature of the permutation needed to place the elements of the permutation in canonical order. The signature is calculated as (-1)^<number of inversions> Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2]) >>> p.inversions() 0 >>> p.signature() 1 >>> q = Permutation([0,2,1]) >>> q.inversions() 1 >>> q.signature() -1 See Also ======== inversions """ if self.is_even: return 1 return -1 def order(self): """ Computes the order of a permutation. When the permutation is raised to the power of its order it equals the identity permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([3, 1, 5, 2, 4, 0]) >>> p.order() 4 >>> (p**(p.order())) Permutation([], size=6) See Also ======== identity, cardinality, length, rank, size """ return reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1) def length(self): """ Returns the number of integers moved by a permutation. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0, 3, 2, 1]).length() 2 >>> Permutation([[0, 1], [2, 3]]).length() 4 See Also ======== min, max, support, cardinality, order, rank, size """ return len(self.support()) @property def cycle_structure(self): """Return the cycle structure of the permutation as a dictionary indicating the multiplicity of each cycle length. Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation(3).cycle_structure {1: 4} >>> Permutation(0, 4, 3)(1, 2)(5, 6).cycle_structure {2: 2, 3: 1} """ if self._cycle_structure: rv = self._cycle_structure else: rv = defaultdict(int) singletons = self.size for c in self.cyclic_form: rv[len(c)] += 1 singletons -= len(c) if singletons: rv[1] = singletons self._cycle_structure = rv return dict(rv) # make a copy @property def cycles(self): """ Returns the number of cycles contained in the permutation (including singletons). Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation([0, 1, 2]).cycles 3 >>> Permutation([0, 1, 2]).full_cyclic_form [[0], [1], [2]] >>> Permutation(0, 1)(2, 3).cycles 2 See Also ======== sympy.functions.combinatorial.numbers.stirling """ return len(self.full_cyclic_form) def index(self): """ Returns the index of a permutation. The index of a permutation is the sum of all subscripts j such that p[j] is greater than p[j+1]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([3, 0, 2, 1, 4]) >>> p.index() 2 """ a = self.array_form return sum([j for j in range(len(a) - 1) if a[j] > a[j + 1]]) def runs(self): """ Returns the runs of a permutation. An ascending sequence in a permutation is called a run [5]. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([2, 5, 7, 3, 6, 0, 1, 4, 8]) >>> p.runs() [[2, 5, 7], [3, 6], [0, 1, 4, 8]] >>> q = Permutation([1,3,2,0]) >>> q.runs() [[1, 3], [2], [0]] """ return runs(self.array_form) def inversion_vector(self): """Return the inversion vector of the permutation. The inversion vector consists of elements whose value indicates the number of elements in the permutation that are lesser than it and lie on its right hand side. The inversion vector is the same as the Lehmer encoding of a permutation. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([4, 8, 0, 7, 1, 5, 3, 6, 2]) >>> p.inversion_vector() [4, 7, 0, 5, 0, 2, 1, 1] >>> p = Permutation([3, 2, 1, 0]) >>> p.inversion_vector() [3, 2, 1] The inversion vector increases lexicographically with the rank of the permutation, the -ith element cycling through 0..i. >>> p = Permutation(2) >>> while p: ... print('%s %s %s' % (p, p.inversion_vector(), p.rank())) ... p = p.next_lex() (2) [0, 0] 0 (1 2) [0, 1] 1 (2)(0 1) [1, 0] 2 (0 1 2) [1, 1] 3 (0 2 1) [2, 0] 4 (0 2) [2, 1] 5 See Also ======== from_inversion_vector """ self_array_form = self.array_form n = len(self_array_form) inversion_vector = [0] * (n - 1) for i in range(n - 1): val = 0 for j in range(i + 1, n): if self_array_form[j] < self_array_form[i]: val += 1 inversion_vector[i] = val return inversion_vector def rank_trotterjohnson(self): """ Returns the Trotter Johnson rank, which we get from the minimal change algorithm. See [4] section 2.4. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 1, 2, 3]) >>> p.rank_trotterjohnson() 0 >>> p = Permutation([0, 2, 1, 3]) >>> p.rank_trotterjohnson() 7 See Also ======== unrank_trotterjohnson, next_trotterjohnson """ if self.array_form == [] or self.is_Identity: return 0 if self.array_form == [1, 0]: return 1 perm = self.array_form n = self.size rank = 0 for j in range(1, n): k = 1 i = 0 while perm[i] != j: if perm[i] < j: k += 1 i += 1 j1 = j + 1 if rank % 2 == 0: rank = j1*rank + j1 - k else: rank = j1*rank + k - 1 return rank @classmethod def unrank_trotterjohnson(cls, size, rank): """ Trotter Johnson permutation unranking. See [4] section 2.4. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation.unrank_trotterjohnson(5, 10) Permutation([0, 3, 1, 2, 4]) See Also ======== rank_trotterjohnson, next_trotterjohnson """ perm = [0]*size r2 = 0 n = ifac(size) pj = 1 for j in range(2, size + 1): pj *= j r1 = (rank * pj) // n k = r1 - j*r2 if r2 % 2 == 0: for i in range(j - 1, j - k - 1, -1): perm[i] = perm[i - 1] perm[j - k - 1] = j - 1 else: for i in range(j - 1, k, -1): perm[i] = perm[i - 1] perm[k] = j - 1 r2 = r1 return cls._af_new(perm) def next_trotterjohnson(self): """ Returns the next permutation in Trotter-Johnson order. If self is the last permutation it returns None. See [4] section 2.4. If it is desired to generate all such permutations, they can be generated in order more quickly with the ``generate_bell`` function. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation([3, 0, 2, 1]) >>> p.rank_trotterjohnson() 4 >>> p = p.next_trotterjohnson(); p Permutation([0, 3, 2, 1]) >>> p.rank_trotterjohnson() 5 See Also ======== rank_trotterjohnson, unrank_trotterjohnson, sympy.utilities.iterables.generate_bell """ pi = self.array_form[:] n = len(pi) st = 0 rho = pi[:] done = False m = n-1 while m > 0 and not done: d = rho.index(m) for i in range(d, m): rho[i] = rho[i + 1] par = _af_parity(rho[:m]) if par == 1: if d == m: m -= 1 else: pi[st + d], pi[st + d + 1] = pi[st + d + 1], pi[st + d] done = True else: if d == 0: m -= 1 st += 1 else: pi[st + d], pi[st + d - 1] = pi[st + d - 1], pi[st + d] done = True if m == 0: return None return self._af_new(pi) def get_precedence_matrix(self): """ Gets the precedence matrix. This is used for computing the distance between two permutations. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> p = Permutation.josephus(3, 6, 1) >>> p Permutation([2, 5, 3, 1, 4, 0]) >>> p.get_precedence_matrix() Matrix([ [0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 0, 0, 1, 0], [1, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0]]) See Also ======== get_precedence_distance, get_adjacency_matrix, get_adjacency_distance """ m = zeros(self.size) perm = self.array_form for i in range(m.rows): for j in range(i + 1, m.cols): m[perm[i], perm[j]] = 1 return m def get_precedence_distance(self, other): """ Computes the precedence distance between two permutations. Suppose p and p' represent n jobs. The precedence metric counts the number of times a job j is preceded by job i in both p and p'. This metric is commutative. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([2, 0, 4, 3, 1]) >>> q = Permutation([3, 1, 2, 4, 0]) >>> p.get_precedence_distance(q) 7 >>> q.get_precedence_distance(p) 7 See Also ======== get_precedence_matrix, get_adjacency_matrix, get_adjacency_distance """ if self.size != other.size: raise ValueError("The permutations must be of equal size.") self_prec_mat = self.get_precedence_matrix() other_prec_mat = other.get_precedence_matrix() n_prec = 0 for i in range(self.size): for j in range(self.size): if i == j: continue if self_prec_mat[i, j] * other_prec_mat[i, j] == 1: n_prec += 1 d = self.size * (self.size - 1)//2 - n_prec return d def get_adjacency_matrix(self): """ Computes the adjacency matrix of a permutation. If job i is adjacent to job j in a permutation p then we set m[i, j] = 1 where m is the adjacency matrix of p. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation.josephus(3, 6, 1) >>> p.get_adjacency_matrix() Matrix([ [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]) >>> q = Permutation([0, 1, 2, 3]) >>> q.get_adjacency_matrix() Matrix([ [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]) See Also ======== get_precedence_matrix, get_precedence_distance, get_adjacency_distance """ m = zeros(self.size) perm = self.array_form for i in range(self.size - 1): m[perm[i], perm[i + 1]] = 1 return m def get_adjacency_distance(self, other): """ Computes the adjacency distance between two permutations. This metric counts the number of times a pair i,j of jobs is adjacent in both p and p'. If n_adj is this quantity then the adjacency distance is n - n_adj - 1 [1] [1] Reeves, Colin R. Landscapes, Operators and Heuristic search, Annals of Operational Research, 86, pp 473-490. (1999) Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 3, 1, 2, 4]) >>> q = Permutation.josephus(4, 5, 2) >>> p.get_adjacency_distance(q) 3 >>> r = Permutation([0, 2, 1, 4, 3]) >>> p.get_adjacency_distance(r) 4 See Also ======== get_precedence_matrix, get_precedence_distance, get_adjacency_matrix """ if self.size != other.size: raise ValueError("The permutations must be of the same size.") self_adj_mat = self.get_adjacency_matrix() other_adj_mat = other.get_adjacency_matrix() n_adj = 0 for i in range(self.size): for j in range(self.size): if i == j: continue if self_adj_mat[i, j] * other_adj_mat[i, j] == 1: n_adj += 1 d = self.size - n_adj - 1 return d def get_positional_distance(self, other): """ Computes the positional distance between two permutations. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> p = Permutation([0, 3, 1, 2, 4]) >>> q = Permutation.josephus(4, 5, 2) >>> r = Permutation([3, 1, 4, 0, 2]) >>> p.get_positional_distance(q) 12 >>> p.get_positional_distance(r) 12 See Also ======== get_precedence_distance, get_adjacency_distance """ a = self.array_form b = other.array_form if len(a) != len(b): raise ValueError("The permutations must be of the same size.") return sum([abs(a[i] - b[i]) for i in range(len(a))]) @classmethod def josephus(cls, m, n, s=1): """Return as a permutation the shuffling of range(n) using the Josephus scheme in which every m-th item is selected until all have been chosen. The returned permutation has elements listed by the order in which they were selected. The parameter ``s`` stops the selection process when there are ``s`` items remaining and these are selected by continuing the selection, counting by 1 rather than by ``m``. Consider selecting every 3rd item from 6 until only 2 remain:: choices chosen ======== ====== 012345 01 345 2 01 34 25 01 4 253 0 4 2531 0 25314 253140 Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation.josephus(3, 6, 2).array_form [2, 5, 3, 1, 4, 0] References ========== .. [1] https://en.wikipedia.org/wiki/Flavius_Josephus .. [2] https://en.wikipedia.org/wiki/Josephus_problem .. [3] http://www.wou.edu/~burtonl/josephus.html """ from collections import deque m -= 1 Q = deque(list(range(n))) perm = [] while len(Q) > max(s, 1): for dp in range(m): Q.append(Q.popleft()) perm.append(Q.popleft()) perm.extend(list(Q)) return cls(perm) @classmethod def from_inversion_vector(cls, inversion): """ Calculates the permutation from the inversion vector. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> Permutation.from_inversion_vector([3, 2, 1, 0, 0]) Permutation([3, 2, 1, 0, 4, 5]) """ size = len(inversion) N = list(range(size + 1)) perm = [] try: for k in range(size): val = N[inversion[k]] perm.append(val) N.remove(val) except IndexError: raise ValueError("The inversion vector is not valid.") perm.extend(N) return cls._af_new(perm) @classmethod def random(cls, n): """ Generates a random permutation of length ``n``. Uses the underlying Python pseudo-random number generator. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1])) True """ perm_array = list(range(n)) random.shuffle(perm_array) return cls._af_new(perm_array) @classmethod def unrank_lex(cls, size, rank): """ Lexicographic permutation unranking. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> init_printing(perm_cyclic=False, pretty_print=False) >>> a = Permutation.unrank_lex(5, 10) >>> a.rank() 10 >>> a Permutation([0, 2, 4, 1, 3]) See Also ======== rank, next_lex """ perm_array = [0] * size psize = 1 for i in range(size): new_psize = psize*(i + 1) d = (rank % new_psize) // psize rank -= d*psize perm_array[size - i - 1] = d for j in range(size - i, size): if perm_array[j] > d - 1: perm_array[j] += 1 psize = new_psize return cls._af_new(perm_array) def resize(self, n): """Resize the permutation to the new size ``n``. Parameters ========== n : int The new size of the permutation. Raises ====== ValueError If the permutation cannot be resized to the given size. This may only happen when resized to a smaller size than the original. Examples ======== >>> from sympy.combinatorics.permutations import Permutation Increasing the size of a permutation: >>> p = Permutation(0, 1, 2) >>> p = p.resize(5) >>> p (4)(0 1 2) Decreasing the size of the permutation: >>> p = p.resize(4) >>> p (3)(0 1 2) If resizing to the specific size breaks the cycles: >>> p.resize(2) Traceback (most recent call last): ... ValueError: The permutation can not be resized to 2 because the cycle (0, 1, 2) may break. """ aform = self.array_form l = len(aform) if n > l: aform += list(range(l, n)) return Permutation._af_new(aform) elif n < l: cyclic_form = self.full_cyclic_form new_cyclic_form = [] for cycle in cyclic_form: cycle_min = min(cycle) cycle_max = max(cycle) if cycle_min <= n-1: if cycle_max > n-1: raise ValueError( "The permutation can not be resized to {} " "because the cycle {} may break." .format(n, tuple(cycle))) new_cyclic_form.append(cycle) return Permutation(new_cyclic_form) return self # XXX Deprecated flag print_cyclic = None def _merge(arr, temp, left, mid, right): """ Merges two sorted arrays and calculates the inversion count. Helper function for calculating inversions. This method is for internal use only. """ i = k = left j = mid inv_count = 0 while i < mid and j <= right: if arr[i] < arr[j]: temp[k] = arr[i] k += 1 i += 1 else: temp[k] = arr[j] k += 1 j += 1 inv_count += (mid -i) while i < mid: temp[k] = arr[i] k += 1 i += 1 if j <= right: k += right - j + 1 j += right - j + 1 arr[left:k + 1] = temp[left:k + 1] else: arr[left:right + 1] = temp[left:right + 1] return inv_count Perm = Permutation _af_new = Perm._af_new class AppliedPermutation(Expr): """A permutation applied to a symbolic variable. Parameters ========== perm : Permutation x : Expr Examples ======== >>> from sympy import Symbol >>> from sympy.combinatorics import Permutation Creating a symbolic permutation function application: >>> x = Symbol('x') >>> p = Permutation(0, 1, 2) >>> p.apply(x) AppliedPermutation((0 1 2), x) >>> _.subs(x, 1) 2 """ def __new__(cls, perm, x, evaluate=None): if evaluate is None: evaluate = global_parameters.evaluate perm = _sympify(perm) x = _sympify(x) if not isinstance(perm, Permutation): raise ValueError("{} must be a Permutation instance." .format(perm)) if evaluate: if x.is_Integer: return perm.apply(x) obj = super(AppliedPermutation, cls).__new__(cls, perm, x) return obj
2cab8b824011af3582ae006654ccc1a2a3845c3dd51947f04df0fbb10b41c6cf
from __future__ import print_function, division from itertools import combinations from sympy.combinatorics.graycode import GrayCode from sympy.core import Basic class Subset(Basic): """ Represents a basic subset object. We generate subsets using essentially two techniques, binary enumeration and lexicographic enumeration. The Subset class takes two arguments, the first one describes the initial subset to consider and the second describes the superset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.next_binary().subset ['b'] >>> a.prev_binary().subset ['c'] """ _rank_binary = None _rank_lex = None _rank_graycode = None _subset = None _superset = None def __new__(cls, subset, superset): """ Default constructor. It takes the subset and its superset as its parameters. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.subset ['c', 'd'] >>> a.superset ['a', 'b', 'c', 'd'] >>> a.size 2 """ if len(subset) > len(superset): raise ValueError('Invalid arguments have been provided. The ' 'superset must be larger than the subset.') for elem in subset: if elem not in superset: raise ValueError('The superset provided is invalid as it does ' 'not contain the element {}'.format(elem)) obj = Basic.__new__(cls) obj._subset = subset obj._superset = superset return obj def iterate_binary(self, k): """ This is a helper function. It iterates over the binary subsets by k steps. This variable can be both positive or negative. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.iterate_binary(-2).subset ['d'] >>> a = Subset(['a', 'b', 'c'], ['a', 'b', 'c', 'd']) >>> a.iterate_binary(2).subset [] See Also ======== next_binary, prev_binary """ bin_list = Subset.bitlist_from_subset(self.subset, self.superset) n = (int(''.join(bin_list), 2) + k) % 2**self.superset_size bits = bin(n)[2:].rjust(self.superset_size, '0') return Subset.subset_from_bitlist(self.superset, bits) def next_binary(self): """ Generates the next binary ordered subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.next_binary().subset ['b'] >>> a = Subset(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']) >>> a.next_binary().subset [] See Also ======== prev_binary, iterate_binary """ return self.iterate_binary(1) def prev_binary(self): """ Generates the previous binary ordered subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset([], ['a', 'b', 'c', 'd']) >>> a.prev_binary().subset ['a', 'b', 'c', 'd'] >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.prev_binary().subset ['c'] See Also ======== next_binary, iterate_binary """ return self.iterate_binary(-1) def next_lexicographic(self): """ Generates the next lexicographically ordered subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.next_lexicographic().subset ['d'] >>> a = Subset(['d'], ['a', 'b', 'c', 'd']) >>> a.next_lexicographic().subset [] See Also ======== prev_lexicographic """ i = self.superset_size - 1 indices = Subset.subset_indices(self.subset, self.superset) if i in indices: if i - 1 in indices: indices.remove(i - 1) else: indices.remove(i) i = i - 1 while not i in indices and i >= 0: i = i - 1 if i >= 0: indices.remove(i) indices.append(i+1) else: while i not in indices and i >= 0: i = i - 1 indices.append(i + 1) ret_set = [] super_set = self.superset for i in indices: ret_set.append(super_set[i]) return Subset(ret_set, super_set) def prev_lexicographic(self): """ Generates the previous lexicographically ordered subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset([], ['a', 'b', 'c', 'd']) >>> a.prev_lexicographic().subset ['d'] >>> a = Subset(['c','d'], ['a', 'b', 'c', 'd']) >>> a.prev_lexicographic().subset ['c'] See Also ======== next_lexicographic """ i = self.superset_size - 1 indices = Subset.subset_indices(self.subset, self.superset) while i not in indices and i >= 0: i = i - 1 if i - 1 in indices or i == 0: indices.remove(i) else: if i >= 0: indices.remove(i) indices.append(i - 1) indices.append(self.superset_size - 1) ret_set = [] super_set = self.superset for i in indices: ret_set.append(super_set[i]) return Subset(ret_set, super_set) def iterate_graycode(self, k): """ Helper function used for prev_gray and next_gray. It performs k step overs to get the respective Gray codes. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) >>> a.iterate_graycode(3).subset [1, 4] >>> a.iterate_graycode(-2).subset [1, 2, 4] See Also ======== next_gray, prev_gray """ unranked_code = GrayCode.unrank(self.superset_size, (self.rank_gray + k) % self.cardinality) return Subset.subset_from_bitlist(self.superset, unranked_code) def next_gray(self): """ Generates the next Gray code ordered subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) >>> a.next_gray().subset [1, 3] See Also ======== iterate_graycode, prev_gray """ return self.iterate_graycode(1) def prev_gray(self): """ Generates the previous Gray code ordered subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset([2, 3, 4], [1, 2, 3, 4, 5]) >>> a.prev_gray().subset [2, 3, 4, 5] See Also ======== iterate_graycode, next_gray """ return self.iterate_graycode(-1) @property def rank_binary(self): """ Computes the binary ordered rank. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset([], ['a','b','c','d']) >>> a.rank_binary 0 >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.rank_binary 3 See Also ======== iterate_binary, unrank_binary """ if self._rank_binary is None: self._rank_binary = int("".join( Subset.bitlist_from_subset(self.subset, self.superset)), 2) return self._rank_binary @property def rank_lexicographic(self): """ Computes the lexicographic ranking of the subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.rank_lexicographic 14 >>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6]) >>> a.rank_lexicographic 43 """ if self._rank_lex is None: def _ranklex(self, subset_index, i, n): if subset_index == [] or i > n: return 0 if i in subset_index: subset_index.remove(i) return 1 + _ranklex(self, subset_index, i + 1, n) return 2**(n - i - 1) + _ranklex(self, subset_index, i + 1, n) indices = Subset.subset_indices(self.subset, self.superset) self._rank_lex = _ranklex(self, indices, 0, self.superset_size) return self._rank_lex @property def rank_gray(self): """ Computes the Gray code ranking of the subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c','d'], ['a','b','c','d']) >>> a.rank_gray 2 >>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6]) >>> a.rank_gray 27 See Also ======== iterate_graycode, unrank_gray """ if self._rank_graycode is None: bits = Subset.bitlist_from_subset(self.subset, self.superset) self._rank_graycode = GrayCode(len(bits), start=bits).rank return self._rank_graycode @property def subset(self): """ Gets the subset represented by the current instance. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.subset ['c', 'd'] See Also ======== superset, size, superset_size, cardinality """ return self._subset @property def size(self): """ Gets the size of the subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.size 2 See Also ======== subset, superset, superset_size, cardinality """ return len(self.subset) @property def superset(self): """ Gets the superset of the subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.superset ['a', 'b', 'c', 'd'] See Also ======== subset, size, superset_size, cardinality """ return self._superset @property def superset_size(self): """ Returns the size of the superset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.superset_size 4 See Also ======== subset, superset, size, cardinality """ return len(self.superset) @property def cardinality(self): """ Returns the number of all possible subsets. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd']) >>> a.cardinality 16 See Also ======== subset, superset, size, superset_size """ return 2**(self.superset_size) @classmethod def subset_from_bitlist(self, super_set, bitlist): """ Gets the subset defined by the bitlist. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> Subset.subset_from_bitlist(['a', 'b', 'c', 'd'], '0011').subset ['c', 'd'] See Also ======== bitlist_from_subset """ if len(super_set) != len(bitlist): raise ValueError("The sizes of the lists are not equal") ret_set = [] for i in range(len(bitlist)): if bitlist[i] == '1': ret_set.append(super_set[i]) return Subset(ret_set, super_set) @classmethod def bitlist_from_subset(self, subset, superset): """ Gets the bitlist corresponding to a subset. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> Subset.bitlist_from_subset(['c', 'd'], ['a', 'b', 'c', 'd']) '0011' See Also ======== subset_from_bitlist """ bitlist = ['0'] * len(superset) if type(subset) is Subset: subset = subset.subset for i in Subset.subset_indices(subset, superset): bitlist[i] = '1' return ''.join(bitlist) @classmethod def unrank_binary(self, rank, superset): """ Gets the binary ordered subset of the specified rank. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> Subset.unrank_binary(4, ['a', 'b', 'c', 'd']).subset ['b'] See Also ======== iterate_binary, rank_binary """ bits = bin(rank)[2:].rjust(len(superset), '0') return Subset.subset_from_bitlist(superset, bits) @classmethod def unrank_gray(self, rank, superset): """ Gets the Gray code ordered subset of the specified rank. Examples ======== >>> from sympy.combinatorics.subsets import Subset >>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset ['a', 'b'] >>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset [] See Also ======== iterate_graycode, rank_gray """ graycode_bitlist = GrayCode.unrank(len(superset), rank) return Subset.subset_from_bitlist(superset, graycode_bitlist) @classmethod def subset_indices(self, subset, superset): """Return indices of subset in superset in a list; the list is empty if all elements of subset are not in superset. Examples ======== >>> from sympy.combinatorics import Subset >>> superset = [1, 3, 2, 5, 4] >>> Subset.subset_indices([3, 2, 1], superset) [1, 2, 0] >>> Subset.subset_indices([1, 6], superset) [] >>> Subset.subset_indices([], superset) [] """ a, b = superset, subset sb = set(b) d = {} for i, ai in enumerate(a): if ai in sb: d[ai] = i sb.remove(ai) if not sb: break else: return list() return [d[bi] for bi in b] def ksubsets(superset, k): """ Finds the subsets of size k in lexicographic order. This uses the itertools generator. Examples ======== >>> from sympy.combinatorics.subsets import ksubsets >>> list(ksubsets([1, 2, 3], 2)) [(1, 2), (1, 3), (2, 3)] >>> list(ksubsets([1, 2, 3, 4, 5], 2)) [(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), \ (2, 5), (3, 4), (3, 5), (4, 5)] See Also ======== Subset """ return combinations(superset, k)
7207e6ee823af65143d6a1e0503cc9ab3d3216ee1b970c7cbaeb6356ee346530
from __future__ import print_function, division from sympy.combinatorics.perm_groups import PermutationGroup from sympy.combinatorics.permutations import Permutation from sympy.utilities.iterables import uniq _af_new = Permutation._af_new def DirectProduct(*groups): """ Returns the direct product of several groups as a permutation group. This is implemented much like the __mul__ procedure for taking the direct product of two permutation groups, but the idea of shifting the generators is realized in the case of an arbitrary number of groups. A call to DirectProduct(G1, G2, ..., Gn) is generally expected to be faster than a call to G1*G2*...*Gn (and thus the need for this algorithm). Examples ======== >>> from sympy.combinatorics.group_constructs import DirectProduct >>> from sympy.combinatorics.named_groups import CyclicGroup >>> C = CyclicGroup(4) >>> G = DirectProduct(C, C, C) >>> G.order() 64 See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.__mul__ """ degrees = [] gens_count = [] total_degree = 0 total_gens = 0 for group in groups: current_deg = group.degree current_num_gens = len(group.generators) degrees.append(current_deg) total_degree += current_deg gens_count.append(current_num_gens) total_gens += current_num_gens array_gens = [] for i in range(total_gens): array_gens.append(list(range(total_degree))) current_gen = 0 current_deg = 0 for i in range(len(gens_count)): for j in range(current_gen, current_gen + gens_count[i]): gen = ((groups[i].generators)[j - current_gen]).array_form array_gens[j][current_deg:current_deg + degrees[i]] = \ [x + current_deg for x in gen] current_gen += gens_count[i] current_deg += degrees[i] perm_gens = list(uniq([_af_new(list(a)) for a in array_gens])) return PermutationGroup(perm_gens, dups=False)
c02919cbd3bdd1a353b5db3fc41b32f88d1ce2c8b395a653e3fc273c1b0f91bb
from __future__ import print_function, division from typing import Dict, List from sympy.core import S from sympy.core.compatibility import is_sequence, as_int from sympy.core.expr import Expr from sympy.core.symbol import Symbol, symbols as _symbols from sympy.core.sympify import CantSympify from sympy.printing.defaults import DefaultPrinting from sympy.utilities import public from sympy.utilities.iterables import flatten from sympy.utilities.magic import pollute @public def free_group(symbols): """Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1))``. Parameters ========== symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty) Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y, z = free_group("x, y, z") >>> F <free group on the generators (x, y, z)> >>> x**2*y**-1 x**2*y**-1 >>> type(_) <class 'sympy.combinatorics.free_groups.FreeGroupElement'> """ _free_group = FreeGroup(symbols) return (_free_group,) + tuple(_free_group.generators) @public def xfree_group(symbols): """Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1)))``. Parameters ========== symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty) Examples ======== >>> from sympy.combinatorics.free_groups import xfree_group >>> F, (x, y, z) = xfree_group("x, y, z") >>> F <free group on the generators (x, y, z)> >>> y**2*x**-2*z**-1 y**2*x**-2*z**-1 >>> type(_) <class 'sympy.combinatorics.free_groups.FreeGroupElement'> """ _free_group = FreeGroup(symbols) return (_free_group, _free_group.generators) @public def vfree_group(symbols): """Construct a free group and inject ``f_0, f_1, ..., f_(n-1)`` as symbols into the global namespace. Parameters ========== symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty) Examples ======== >>> from sympy.combinatorics.free_groups import vfree_group >>> vfree_group("x, y, z") <free group on the generators (x, y, z)> >>> x**2*y**-2*z x**2*y**-2*z >>> type(_) <class 'sympy.combinatorics.free_groups.FreeGroupElement'> """ _free_group = FreeGroup(symbols) pollute([sym.name for sym in _free_group.symbols], _free_group.generators) return _free_group def _parse_symbols(symbols): if not symbols: return tuple() if isinstance(symbols, str): return _symbols(symbols, seq=True) elif isinstance(symbols, Expr or FreeGroupElement): return (symbols,) elif is_sequence(symbols): if all(isinstance(s, str) for s in symbols): return _symbols(symbols) elif all(isinstance(s, Expr) for s in symbols): return symbols raise ValueError("The type of `symbols` must be one of the following: " "a str, Symbol/Expr or a sequence of " "one of these types") ############################################################################## # FREE GROUP # ############################################################################## _free_group_cache = {} # type: Dict[int, FreeGroup] class FreeGroup(DefaultPrinting): """ Free group with finite or infinite number of generators. Its input API is that of a str, Symbol/Expr or a sequence of one of these types (which may be empty) See Also ======== sympy.polys.rings.PolyRing References ========== .. [1] http://www.gap-system.org/Manuals/doc/ref/chap37.html .. [2] https://en.wikipedia.org/wiki/Free_group """ is_associative = True is_group = True is_FreeGroup = True is_PermutationGroup = False relators = [] # type: List[Expr] def __new__(cls, symbols): symbols = tuple(_parse_symbols(symbols)) rank = len(symbols) _hash = hash((cls.__name__, symbols, rank)) obj = _free_group_cache.get(_hash) if obj is None: obj = object.__new__(cls) obj._hash = _hash obj._rank = rank # dtype method is used to create new instances of FreeGroupElement obj.dtype = type("FreeGroupElement", (FreeGroupElement,), {"group": obj}) obj.symbols = symbols obj.generators = obj._generators() obj._gens_set = set(obj.generators) for symbol, generator in zip(obj.symbols, obj.generators): if isinstance(symbol, Symbol): name = symbol.name if hasattr(obj, name): setattr(obj, name, generator) _free_group_cache[_hash] = obj return obj def _generators(group): """Returns the generators of the FreeGroup. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y, z = free_group("x, y, z") >>> F.generators (x, y, z) """ gens = [] for sym in group.symbols: elm = ((sym, 1),) gens.append(group.dtype(elm)) return tuple(gens) def clone(self, symbols=None): return self.__class__(symbols or self.symbols) def __contains__(self, i): """Return True if ``i`` is contained in FreeGroup.""" if not isinstance(i, FreeGroupElement): return False group = i.group return self == group def __hash__(self): return self._hash def __len__(self): return self.rank def __str__(self): if self.rank > 30: str_form = "<free group with %s generators>" % self.rank else: str_form = "<free group on the generators " gens = self.generators str_form += str(gens) + ">" return str_form __repr__ = __str__ def __getitem__(self, index): symbols = self.symbols[index] return self.clone(symbols=symbols) def __eq__(self, other): """No ``FreeGroup`` is equal to any "other" ``FreeGroup``. """ return self is other def index(self, gen): """Return the index of the generator `gen` from ``(f_0, ..., f_(n-1))``. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> F.index(y) 1 >>> F.index(x) 0 """ if isinstance(gen, self.dtype): return self.generators.index(gen) else: raise ValueError("expected a generator of Free Group %s, got %s" % (self, gen)) def order(self): """Return the order of the free group. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> F.order() oo >>> free_group("")[0].order() 1 """ if self.rank == 0: return 1 else: return S.Infinity @property def elements(self): """ Return the elements of the free group. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> (z,) = free_group("") >>> z.elements {<identity>} """ if self.rank == 0: # A set containing Identity element of `FreeGroup` self is returned return {self.identity} else: raise ValueError("Group contains infinitely many elements" ", hence can't be represented") @property def rank(self): r""" In group theory, the `rank` of a group `G`, denoted `G.rank`, can refer to the smallest cardinality of a generating set for G, that is \operatorname{rank}(G)=\min\{ |X|: X\subseteq G, \left\langle X\right\rangle =G\}. """ return self._rank @property def is_abelian(self): """Returns if the group is Abelian. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y, z = free_group("x y z") >>> f.is_abelian False """ if self.rank == 0 or self.rank == 1: return True else: return False @property def identity(self): """Returns the identity element of free group.""" return self.dtype() def contains(self, g): """Tests if Free Group element ``g`` belong to self, ``G``. In mathematical terms any linear combination of generators of a Free Group is contained in it. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y, z = free_group("x y z") >>> f.contains(x**3*y**2) True """ if not isinstance(g, FreeGroupElement): return False elif self != g.group: return False else: return True def center(self): """Returns the center of the free group `self`.""" return {self.identity} ############################################################################ # FreeGroupElement # ############################################################################ class FreeGroupElement(CantSympify, DefaultPrinting, tuple): """Used to create elements of FreeGroup. It can not be used directly to create a free group element. It is called by the `dtype` method of the `FreeGroup` class. """ is_assoc_word = True def new(self, init): return self.__class__(init) _hash = None def __hash__(self): _hash = self._hash if _hash is None: self._hash = _hash = hash((self.group, frozenset(tuple(self)))) return _hash def copy(self): return self.new(self) @property def is_identity(self): if self.array_form == tuple(): return True else: return False @property def array_form(self): """ SymPy provides two different internal kinds of representation of associative words. The first one is called the `array_form` which is a tuple containing `tuples` as its elements, where the size of each tuple is two. At the first position the tuple contains the `symbol-generator`, while at the second position of tuple contains the exponent of that generator at the position. Since elements (i.e. words) don't commute, the indexing of tuple makes that property to stay. The structure in ``array_form`` of ``FreeGroupElement`` is of form: ``( ( symbol_of_gen , exponent ), ( , ), ... ( , ) )`` Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y, z = free_group("x y z") >>> (x*z).array_form ((x, 1), (z, 1)) >>> (x**2*z*y*x**2).array_form ((x, 2), (z, 1), (y, 1), (x, 2)) See Also ======== letter_repr """ return tuple(self) @property def letter_form(self): """ The letter representation of a ``FreeGroupElement`` is a tuple of generator symbols, with each entry corresponding to a group generator. Inverses of the generators are represented by negative generator symbols. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b, c, d = free_group("a b c d") >>> (a**3).letter_form (a, a, a) >>> (a**2*d**-2*a*b**-4).letter_form (a, a, -d, -d, a, -b, -b, -b, -b) >>> (a**-2*b**3*d).letter_form (-a, -a, b, b, b, d) See Also ======== array_form """ return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j) for i, j in self.array_form])) def __getitem__(self, i): group = self.group r = self.letter_form[i] if r.is_Symbol: return group.dtype(((r, 1),)) else: return group.dtype(((-r, -1),)) def index(self, gen): if len(gen) != 1: raise ValueError() return (self.letter_form).index(gen.letter_form[0]) @property def letter_form_elm(self): """ """ group = self.group r = self.letter_form return [group.dtype(((elm,1),)) if elm.is_Symbol \ else group.dtype(((-elm,-1),)) for elm in r] @property def ext_rep(self): """This is called the External Representation of ``FreeGroupElement`` """ return tuple(flatten(self.array_form)) def __contains__(self, gen): return gen.array_form[0][0] in tuple([r[0] for r in self.array_form]) def __str__(self): if self.is_identity: return "<identity>" str_form = "" array_form = self.array_form for i in range(len(array_form)): if i == len(array_form) - 1: if array_form[i][1] == 1: str_form += str(array_form[i][0]) else: str_form += str(array_form[i][0]) + \ "**" + str(array_form[i][1]) else: if array_form[i][1] == 1: str_form += str(array_form[i][0]) + "*" else: str_form += str(array_form[i][0]) + \ "**" + str(array_form[i][1]) + "*" return str_form __repr__ = __str__ def __pow__(self, n): n = as_int(n) group = self.group if n == 0: return group.identity if n < 0: n = -n return (self.inverse())**n result = self for i in range(n - 1): result = result*self # this method can be improved instead of just returning the # multiplication of elements return result def __mul__(self, other): """Returns the product of elements belonging to the same ``FreeGroup``. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y, z = free_group("x y z") >>> x*y**2*y**-4 x*y**-2 >>> z*y**-2 z*y**-2 >>> x**2*y*y**-1*x**-2 <identity> """ group = self.group if not isinstance(other, group.dtype): raise TypeError("only FreeGroup elements of same FreeGroup can " "be multiplied") if self.is_identity: return other if other.is_identity: return self r = list(self.array_form + other.array_form) zero_mul_simp(r, len(self.array_form) - 1) return group.dtype(tuple(r)) def __div__(self, other): group = self.group if not isinstance(other, group.dtype): raise TypeError("only FreeGroup elements of same FreeGroup can " "be multiplied") return self*(other.inverse()) def __rdiv__(self, other): group = self.group if not isinstance(other, group.dtype): raise TypeError("only FreeGroup elements of same FreeGroup can " "be multiplied") return other*(self.inverse()) __truediv__ = __div__ __rtruediv__ = __rdiv__ def __add__(self, other): return NotImplemented def inverse(self): """ Returns the inverse of a ``FreeGroupElement`` element Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y, z = free_group("x y z") >>> x.inverse() x**-1 >>> (x*y).inverse() y**-1*x**-1 """ group = self.group r = tuple([(i, -j) for i, j in self.array_form[::-1]]) return group.dtype(r) def order(self): """Find the order of a ``FreeGroupElement``. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y = free_group("x y") >>> (x**2*y*y**-1*x**-2).order() 1 """ if self.is_identity: return 1 else: return S.Infinity def commutator(self, other): """ Return the commutator of `self` and `x`: ``~x*~self*x*self`` """ group = self.group if not isinstance(other, group.dtype): raise ValueError("commutator of only FreeGroupElement of the same " "FreeGroup exists") else: return self.inverse()*other.inverse()*self*other def eliminate_words(self, words, _all=False, inverse=True): ''' Replace each subword from the dictionary `words` by words[subword]. If words is a list, replace the words by the identity. ''' again = True new = self if isinstance(words, dict): while again: again = False for sub in words: prev = new new = new.eliminate_word(sub, words[sub], _all=_all, inverse=inverse) if new != prev: again = True else: while again: again = False for sub in words: prev = new new = new.eliminate_word(sub, _all=_all, inverse=inverse) if new != prev: again = True return new def eliminate_word(self, gen, by=None, _all=False, inverse=True): """ For an associative word `self`, a subword `gen`, and an associative word `by` (identity by default), return the associative word obtained by replacing each occurrence of `gen` in `self` by `by`. If `_all = True`, the occurrences of `gen` that may appear after the first substitution will also be replaced and so on until no occurrences are found. This might not always terminate (e.g. `(x).eliminate_word(x, x**2, _all=True)`). Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y = free_group("x y") >>> w = x**5*y*x**2*y**-4*x >>> w.eliminate_word( x, x**2 ) x**10*y*x**4*y**-4*x**2 >>> w.eliminate_word( x, y**-1 ) y**-11 >>> w.eliminate_word(x**5) y*x**2*y**-4*x >>> w.eliminate_word(x*y, y) x**4*y*x**2*y**-4*x See Also ======== substituted_word """ if by is None: by = self.group.identity if self.is_independent(gen) or gen == by: return self if gen == self: return by if gen**-1 == by: _all = False word = self l = len(gen) try: i = word.subword_index(gen) k = 1 except ValueError: if not inverse: return word try: i = word.subword_index(gen**-1) k = -1 except ValueError: return word word = word.subword(0, i)*by**k*word.subword(i+l, len(word)).eliminate_word(gen, by) if _all: return word.eliminate_word(gen, by, _all=True, inverse=inverse) else: return word def __len__(self): """ For an associative word `self`, returns the number of letters in it. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b = free_group("a b") >>> w = a**5*b*a**2*b**-4*a >>> len(w) 13 >>> len(a**17) 17 >>> len(w**0) 0 """ return sum(abs(j) for (i, j) in self) def __eq__(self, other): """ Two associative words are equal if they are words over the same alphabet and if they are sequences of the same letters. This is equivalent to saying that the external representations of the words are equal. There is no "universal" empty word, every alphabet has its own empty word. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1") >>> f <free group on the generators (swapnil0, swapnil1)> >>> g, swap0, swap1 = free_group("swap0 swap1") >>> g <free group on the generators (swap0, swap1)> >>> swapnil0 == swapnil1 False >>> swapnil0*swapnil1 == swapnil1/swapnil1*swapnil0*swapnil1 True >>> swapnil0*swapnil1 == swapnil1*swapnil0 False >>> swapnil1**0 == swap0**0 False """ group = self.group if not isinstance(other, group.dtype): return False return tuple.__eq__(self, other) def __lt__(self, other): """ The ordering of associative words is defined by length and lexicography (this ordering is called short-lex ordering), that is, shorter words are smaller than longer words, and words of the same length are compared w.r.t. the lexicographical ordering induced by the ordering of generators. Generators are sorted according to the order in which they were created. If the generators are invertible then each generator `g` is larger than its inverse `g^{-1}`, and `g^{-1}` is larger than every generator that is smaller than `g`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b = free_group("a b") >>> b < a False >>> a < a.inverse() False """ group = self.group if not isinstance(other, group.dtype): raise TypeError("only FreeGroup elements of same FreeGroup can " "be compared") l = len(self) m = len(other) # implement lenlex order if l < m: return True elif l > m: return False for i in range(l): a = self[i].array_form[0] b = other[i].array_form[0] p = group.symbols.index(a[0]) q = group.symbols.index(b[0]) if p < q: return True elif p > q: return False elif a[1] < b[1]: return True elif a[1] > b[1]: return False return False def __le__(self, other): return (self == other or self < other) def __gt__(self, other): """ Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, x, y, z = free_group("x y z") >>> y**2 > x**2 True >>> y*z > z*y False >>> x > x.inverse() True """ group = self.group if not isinstance(other, group.dtype): raise TypeError("only FreeGroup elements of same FreeGroup can " "be compared") return not self <= other def __ge__(self, other): return not self < other def exponent_sum(self, gen): """ For an associative word `self` and a generator or inverse of generator `gen`, ``exponent_sum`` returns the number of times `gen` appears in `self` minus the number of times its inverse appears in `self`. If neither `gen` nor its inverse occur in `self` then 0 is returned. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> w = x**2*y**3 >>> w.exponent_sum(x) 2 >>> w.exponent_sum(x**-1) -2 >>> w = x**2*y**4*x**-3 >>> w.exponent_sum(x) -1 See Also ======== generator_count """ if len(gen) != 1: raise ValueError("gen must be a generator or inverse of a generator") s = gen.array_form[0] return s[1]*sum([i[1] for i in self.array_form if i[0] == s[0]]) def generator_count(self, gen): """ For an associative word `self` and a generator `gen`, ``generator_count`` returns the multiplicity of generator `gen` in `self`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> w = x**2*y**3 >>> w.generator_count(x) 2 >>> w = x**2*y**4*x**-3 >>> w.generator_count(x) 5 See Also ======== exponent_sum """ if len(gen) != 1 or gen.array_form[0][1] < 0: raise ValueError("gen must be a generator") s = gen.array_form[0] return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]]) def subword(self, from_i, to_j, strict=True): """ For an associative word `self` and two positive integers `from_i` and `to_j`, `subword` returns the subword of `self` that begins at position `from_i` and ends at `to_j - 1`, indexing is done with origin 0. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b = free_group("a b") >>> w = a**5*b*a**2*b**-4*a >>> w.subword(2, 6) a**3*b """ group = self.group if not strict: from_i = max(from_i, 0) to_j = min(len(self), to_j) if from_i < 0 or to_j > len(self): raise ValueError("`from_i`, `to_j` must be positive and no greater than " "the length of associative word") if to_j <= from_i: return group.identity else: letter_form = self.letter_form[from_i: to_j] array_form = letter_form_to_array_form(letter_form, group) return group.dtype(array_form) def subword_index(self, word, start = 0): ''' Find the index of `word` in `self`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b = free_group("a b") >>> w = a**2*b*a*b**3 >>> w.subword_index(a*b*a*b) 1 ''' l = len(word) self_lf = self.letter_form word_lf = word.letter_form index = None for i in range(start,len(self_lf)-l+1): if self_lf[i:i+l] == word_lf: index = i break if index is not None: return index else: raise ValueError("The given word is not a subword of self") def is_dependent(self, word): """ Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> (x**4*y**-3).is_dependent(x**4*y**-2) True >>> (x**2*y**-1).is_dependent(x*y) False >>> (x*y**2*x*y**2).is_dependent(x*y**2) True >>> (x**12).is_dependent(x**-4) True See Also ======== is_independent """ try: return self.subword_index(word) is not None except ValueError: pass try: return self.subword_index(word**-1) is not None except ValueError: return False def is_independent(self, word): """ See Also ======== is_dependent """ return not self.is_dependent(word) def contains_generators(self): """ Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y, z = free_group("x, y, z") >>> (x**2*y**-1).contains_generators() {x, y} >>> (x**3*z).contains_generators() {x, z} """ group = self.group gens = set() for syllable in self.array_form: gens.add(group.dtype(((syllable[0], 1),))) return set(gens) def cyclic_subword(self, from_i, to_j): group = self.group l = len(self) letter_form = self.letter_form period1 = int(from_i/l) if from_i >= l: from_i -= l*period1 to_j -= l*period1 diff = to_j - from_i word = letter_form[from_i: to_j] period2 = int(to_j/l) - 1 word += letter_form*period2 + letter_form[:diff-l+from_i-l*period2] word = letter_form_to_array_form(word, group) return group.dtype(word) def cyclic_conjugates(self): """Returns a words which are cyclic to the word `self`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> w = x*y*x*y*x >>> w.cyclic_conjugates() {x*y*x**2*y, x**2*y*x*y, y*x*y*x**2, y*x**2*y*x, x*y*x*y*x} >>> s = x*y*x**2*y*x >>> s.cyclic_conjugates() {x**2*y*x**2*y, y*x**2*y*x**2, x*y*x**2*y*x} References ========== http://planetmath.org/cyclicpermutation """ return {self.cyclic_subword(i, i+len(self)) for i in range(len(self))} def is_cyclic_conjugate(self, w): """ Checks whether words ``self``, ``w`` are cyclic conjugates. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> w1 = x**2*y**5 >>> w2 = x*y**5*x >>> w1.is_cyclic_conjugate(w2) True >>> w3 = x**-1*y**5*x**-1 >>> w3.is_cyclic_conjugate(w2) False """ l1 = len(self) l2 = len(w) if l1 != l2: return False w1 = self.identity_cyclic_reduction() w2 = w.identity_cyclic_reduction() letter1 = w1.letter_form letter2 = w2.letter_form str1 = ' '.join(map(str, letter1)) str2 = ' '.join(map(str, letter2)) if len(str1) != len(str2): return False return str1 in str2 + ' ' + str2 def number_syllables(self): """Returns the number of syllables of the associative word `self`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1") >>> (swapnil1**3*swapnil0*swapnil1**-1).number_syllables() 3 """ return len(self.array_form) def exponent_syllable(self, i): """ Returns the exponent of the `i`-th syllable of the associative word `self`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b = free_group("a b") >>> w = a**5*b*a**2*b**-4*a >>> w.exponent_syllable( 2 ) 2 """ return self.array_form[i][1] def generator_syllable(self, i): """ Returns the symbol of the generator that is involved in the i-th syllable of the associative word `self`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b = free_group("a b") >>> w = a**5*b*a**2*b**-4*a >>> w.generator_syllable( 3 ) b """ return self.array_form[i][0] def sub_syllables(self, from_i, to_j): """ `sub_syllables` returns the subword of the associative word `self` that consists of syllables from positions `from_to` to `to_j`, where `from_to` and `to_j` must be positive integers and indexing is done with origin 0. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> f, a, b = free_group("a, b") >>> w = a**5*b*a**2*b**-4*a >>> w.sub_syllables(1, 2) b >>> w.sub_syllables(3, 3) <identity> """ if not isinstance(from_i, int) or not isinstance(to_j, int): raise ValueError("both arguments should be integers") group = self.group if to_j <= from_i: return group.identity else: r = tuple(self.array_form[from_i: to_j]) return group.dtype(r) def substituted_word(self, from_i, to_j, by): """ Returns the associative word obtained by replacing the subword of `self` that begins at position `from_i` and ends at position `to_j - 1` by the associative word `by`. `from_i` and `to_j` must be positive integers, indexing is done with origin 0. In other words, `w.substituted_word(w, from_i, to_j, by)` is the product of the three words: `w.subword(0, from_i)`, `by`, and `w.subword(to_j len(w))`. See Also ======== eliminate_word """ lw = len(self) if from_i >= to_j or from_i > lw or to_j > lw: raise ValueError("values should be within bounds") # otherwise there are four possibilities # first if from=1 and to=lw then if from_i == 0 and to_j == lw: return by elif from_i == 0: # second if from_i=1 (and to_j < lw) then return by*self.subword(to_j, lw) elif to_j == lw: # third if to_j=1 (and from_i > 1) then return self.subword(0, from_i)*by else: # finally return self.subword(0, from_i)*by*self.subword(to_j, lw) def is_cyclically_reduced(self): r"""Returns whether the word is cyclically reduced or not. A word is cyclically reduced if by forming the cycle of the word, the word is not reduced, i.e a word w = `a_1 ... a_n` is called cyclically reduced if `a_1 \ne a_n^{-1}`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> (x**2*y**-1*x**-1).is_cyclically_reduced() False >>> (y*x**2*y**2).is_cyclically_reduced() True """ if not self: return True return self[0] != self[-1]**-1 def identity_cyclic_reduction(self): """Return a unique cyclically reduced version of the word. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> (x**2*y**2*x**-1).identity_cyclic_reduction() x*y**2 >>> (x**-3*y**-1*x**5).identity_cyclic_reduction() x**2*y**-1 References ========== http://planetmath.org/cyclicallyreduced """ word = self.copy() group = self.group while not word.is_cyclically_reduced(): exp1 = word.exponent_syllable(0) exp2 = word.exponent_syllable(-1) r = exp1 + exp2 if r == 0: rep = word.array_form[1: word.number_syllables() - 1] else: rep = ((word.generator_syllable(0), exp1 + exp2),) + \ word.array_form[1: word.number_syllables() - 1] word = group.dtype(rep) return word def cyclic_reduction(self, removed=False): """Return a cyclically reduced version of the word. Unlike `identity_cyclic_reduction`, this will not cyclically permute the reduced word - just remove the "unreduced" bits on either side of it. Compare the examples with those of `identity_cyclic_reduction`. When `removed` is `True`, return a tuple `(word, r)` where self `r` is such that before the reduction the word was either `r*word*r**-1`. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> (x**2*y**2*x**-1).cyclic_reduction() x*y**2 >>> (x**-3*y**-1*x**5).cyclic_reduction() y**-1*x**2 >>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True) (y**-1*x**2, x**-3) """ word = self.copy() g = self.group.identity while not word.is_cyclically_reduced(): exp1 = abs(word.exponent_syllable(0)) exp2 = abs(word.exponent_syllable(-1)) exp = min(exp1, exp2) start = word[0]**abs(exp) end = word[-1]**abs(exp) word = start**-1*word*end**-1 g = g*start if removed: return word, g return word def power_of(self, other): ''' Check if `self == other**n` for some integer n. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> ((x*y)**2).power_of(x*y) True >>> (x**-3*y**-2*x**3).power_of(x**-3*y*x**3) True ''' if self.is_identity: return True l = len(other) if l == 1: # self has to be a power of one generator gens = self.contains_generators() s = other in gens or other**-1 in gens return len(gens) == 1 and s # if self is not cyclically reduced and it is a power of other, # other isn't cyclically reduced and the parts removed during # their reduction must be equal reduced, r1 = self.cyclic_reduction(removed=True) if not r1.is_identity: other, r2 = other.cyclic_reduction(removed=True) if r1 == r2: return reduced.power_of(other) return False if len(self) < l or len(self) % l: return False prefix = self.subword(0, l) if prefix == other or prefix**-1 == other: rest = self.subword(l, len(self)) return rest.power_of(other) return False def letter_form_to_array_form(array_form, group): """ This method converts a list given with possible repetitions of elements in it. It returns a new list such that repetitions of consecutive elements is removed and replace with a tuple element of size two such that the first index contains `value` and the second index contains the number of consecutive repetitions of `value`. """ a = list(array_form[:]) new_array = [] n = 1 symbols = group.symbols for i in range(len(a)): if i == len(a) - 1: if a[i] == a[i - 1]: if (-a[i]) in symbols: new_array.append((-a[i], -n)) else: new_array.append((a[i], n)) else: if (-a[i]) in symbols: new_array.append((-a[i], -1)) else: new_array.append((a[i], 1)) return new_array elif a[i] == a[i + 1]: n += 1 else: if (-a[i]) in symbols: new_array.append((-a[i], -n)) else: new_array.append((a[i], n)) n = 1 def zero_mul_simp(l, index): """Used to combine two reduced words.""" while index >=0 and index < len(l) - 1 and l[index][0] == l[index + 1][0]: exp = l[index][1] + l[index + 1][1] base = l[index][0] l[index] = (base, exp) del l[index + 1] if l[index][1] == 0: del l[index] index -= 1
88e61a9b7d024360edee8fcc974e231773431438d5d541e90f7ebb72e43638dc
from __future__ import print_function, division from sympy.combinatorics.permutations import Permutation, _af_rmul, \ _af_invert, _af_new from sympy.combinatorics.perm_groups import PermutationGroup, _orbit, \ _orbit_transversal from sympy.combinatorics.util import _distribute_gens_by_base, \ _orbits_transversals_from_bsgs """ References for tensor canonicalization: [1] R. Portugal "Algorithmic simplification of tensor expressions", J. Phys. A 32 (1999) 7779-7789 [2] R. Portugal, B.F. Svaiter "Group-theoretic Approach for Symbolic Tensor Manipulation: I. Free Indices" arXiv:math-ph/0107031v1 [3] L.R.U. Manssur, R. Portugal "Group-theoretic Approach for Symbolic Tensor Manipulation: II. Dummy Indices" arXiv:math-ph/0107032v1 [4] xperm.c part of XPerm written by J. M. Martin-Garcia http://www.xact.es/index.html """ def dummy_sgs(dummies, sym, n): """ Return the strong generators for dummy indices Parameters ========== dummies : list of dummy indices `dummies[2k], dummies[2k+1]` are paired indices sym : symmetry under interchange of contracted dummies:: * None no symmetry * 0 commuting * 1 anticommuting n : number of indices in base form the dummy indices are always in consecutive positions Examples ======== >>> from sympy.combinatorics.tensor_can import dummy_sgs >>> dummy_sgs(list(range(2, 8)), 0, 8) [[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9], [0, 1, 2, 3, 6, 7, 4, 5, 8, 9]] """ if len(dummies) > n: raise ValueError("List too large") res = [] # exchange of contravariant and covariant indices if sym is not None: for j in dummies[::2]: a = list(range(n + 2)) if sym == 1: a[n] = n + 1 a[n + 1] = n a[j], a[j + 1] = a[j + 1], a[j] res.append(a) # rename dummy indices for j in dummies[:-3:2]: a = list(range(n + 2)) a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1] res.append(a) return res def _min_dummies(dummies, sym, indices): """ Return list of minima of the orbits of indices in group of dummies see `double_coset_can_rep` for the description of `dummies` and `sym` indices is the initial list of dummy indices Examples ======== >>> from sympy.combinatorics.tensor_can import _min_dummies >>> _min_dummies([list(range(2, 8))], [0], list(range(10))) [0, 1, 2, 2, 2, 2, 2, 2, 8, 9] """ num_types = len(sym) m = [] for dx in dummies: if dx: m.append(min(dx)) else: m.append(None) res = indices[:] for i in range(num_types): for c, i in enumerate(indices): for j in range(num_types): if i in dummies[j]: res[c] = m[j] break return res def _trace_S(s, j, b, S_cosets): """ Return the representative h satisfying s[h[b]] == j If there is not such a representative return None """ for h in S_cosets[b]: if s[h[b]] == j: return h return None def _trace_D(gj, p_i, Dxtrav): """ Return the representative h satisfying h[gj] == p_i If there is not such a representative return None """ for h in Dxtrav: if h[gj] == p_i: return h return None def _dumx_remove(dumx, dumx_flat, p0): """ remove p0 from dumx """ res = [] for dx in dumx: if p0 not in dx: res.append(dx) continue k = dx.index(p0) if k % 2 == 0: p0_paired = dx[k + 1] else: p0_paired = dx[k - 1] dx.remove(p0) dx.remove(p0_paired) dumx_flat.remove(p0) dumx_flat.remove(p0_paired) res.append(dx) def transversal2coset(size, base, transversal): a = [] j = 0 for i in range(size): if i in base: a.append(sorted(transversal[j].values())) j += 1 else: a.append([list(range(size))]) j = len(a) - 1 while a[j] == [list(range(size))]: j -= 1 return a[:j + 1] def double_coset_can_rep(dummies, sym, b_S, sgens, S_transversals, g): """ Butler-Portugal algorithm for tensor canonicalization with dummy indices Parameters ========== dummies list of lists of dummy indices, one list for each type of index; the dummy indices are put in order contravariant, covariant [d0, -d0, d1, -d1, ...]. sym list of the symmetries of the index metric for each type. possible symmetries of the metrics * 0 symmetric * 1 antisymmetric * None no symmetry b_S base of a minimal slot symmetry BSGS. sgens generators of the slot symmetry BSGS. S_transversals transversals for the slot BSGS. g permutation representing the tensor. Returns ======= Return 0 if the tensor is zero, else return the array form of the permutation representing the canonical form of the tensor. Notes ===== A tensor with dummy indices can be represented in a number of equivalent ways which typically grows exponentially with the number of indices. To be able to establish if two tensors with many indices are equal becomes computationally very slow in absence of an efficient algorithm. The Butler-Portugal algorithm [3] is an efficient algorithm to put tensors in canonical form, solving the above problem. Portugal observed that a tensor can be represented by a permutation, and that the class of tensors equivalent to it under slot and dummy symmetries is equivalent to the double coset `D*g*S` (Note: in this documentation we use the conventions for multiplication of permutations p, q with (p*q)(i) = p[q[i]] which is opposite to the one used in the Permutation class) Using the algorithm by Butler to find a representative of the double coset one can find a canonical form for the tensor. To see this correspondence, let `g` be a permutation in array form; a tensor with indices `ind` (the indices including both the contravariant and the covariant ones) can be written as `t = T(ind[g[0]],..., ind[g[n-1]])`, where `n= len(ind)`; `g` has size `n + 2`, the last two indices for the sign of the tensor (trick introduced in [4]). A slot symmetry transformation `s` is a permutation acting on the slots `t -> T(ind[(g*s)[0]],..., ind[(g*s)[n-1]])` A dummy symmetry transformation acts on `ind` `t -> T(ind[(d*g)[0]],..., ind[(d*g)[n-1]])` Being interested only in the transformations of the tensor under these symmetries, one can represent the tensor by `g`, which transforms as `g -> d*g*s`, so it belongs to the coset `D*g*S`, or in other words to the set of all permutations allowed by the slot and dummy symmetries. Let us explain the conventions by an example. Given a tensor `T^{d3 d2 d1}{}_{d1 d2 d3}` with the slot symmetries `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}` `T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}` and symmetric metric, find the tensor equivalent to it which is the lowest under the ordering of indices: lexicographic ordering `d1, d2, d3` and then contravariant before covariant index; that is the canonical form of the tensor. The canonical form is `-T^{d1 d2 d3}{}_{d1 d2 d3}` obtained using `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`. To convert this problem in the input for this function, use the following ordering of the index names (- for covariant for short) `d1, -d1, d2, -d2, d3, -d3` `T^{d3 d2 d1}{}_{d1 d2 d3}` corresponds to `g = [4, 2, 0, 1, 3, 5, 6, 7]` where the last two indices are for the sign `sgens = [Permutation(0, 2)(6, 7), Permutation(0, 4)(6, 7)]` sgens[0] is the slot symmetry `-(0, 2)` `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}` sgens[1] is the slot symmetry `-(0, 4)` `T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}` The dummy symmetry group D is generated by the strong base generators `[(0, 1), (2, 3), (4, 5), (0, 2)(1, 3), (0, 4)(1, 5)]` where the first three interchange covariant and contravariant positions of the same index (d1 <-> -d1) and the last two interchange the dummy indices themselves (d1 <-> d2). The dummy symmetry acts from the left `d = [1, 0, 2, 3, 4, 5, 6, 7]` exchange `d1 <-> -d1` `T^{d3 d2 d1}{}_{d1 d2 d3} == T^{d3 d2}{}_{d1}{}^{d1}{}_{d2 d3}` `g=[4, 2, 0, 1, 3, 5, 6, 7] -> [4, 2, 1, 0, 3, 5, 6, 7] = _af_rmul(d, g)` which differs from `_af_rmul(g, d)`. The slot symmetry acts from the right `s = [2, 1, 0, 3, 4, 5, 7, 6]` exchanges slots 0 and 2 and changes sign `T^{d3 d2 d1}{}_{d1 d2 d3} == -T^{d1 d2 d3}{}_{d1 d2 d3}` `g=[4,2,0,1,3,5,6,7] -> [0, 2, 4, 1, 3, 5, 7, 6] = _af_rmul(g, s)` Example in which the tensor is zero, same slot symmetries as above: `T^{d2}{}_{d1 d3}{}^{d1 d3}{}_{d2}` `= -T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,4)`; `= T_{d3 d1}{}^{d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,2)`; `= T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` symmetric metric; `= 0` since two of these lines have tensors differ only for the sign. The double coset D*g*S consists of permutations `h = d*g*s` corresponding to equivalent tensors; if there are two `h` which are the same apart from the sign, return zero; otherwise choose as representative the tensor with indices ordered lexicographically according to `[d1, -d1, d2, -d2, d3, -d3]` that is `rep = min(D*g*S) = min([d*g*s for d in D for s in S])` The indices are fixed one by one; first choose the lowest index for slot 0, then the lowest remaining index for slot 1, etc. Doing this one obtains a chain of stabilizers `S -> S_{b0} -> S_{b0,b1} -> ...` and `D -> D_{p0} -> D_{p0,p1} -> ...` where `[b0, b1, ...] = range(b)` is a base of the symmetric group; the strong base `b_S` of S is an ordered sublist of it; therefore it is sufficient to compute once the strong base generators of S using the Schreier-Sims algorithm; the stabilizers of the strong base generators are the strong base generators of the stabilizer subgroup. `dbase = [p0, p1, ...]` is not in general in lexicographic order, so that one must recompute the strong base generators each time; however this is trivial, there is no need to use the Schreier-Sims algorithm for D. The algorithm keeps a TAB of elements `(s_i, d_i, h_i)` where `h_i = d_i*g*s_i` satisfying `h_i[j] = p_j` for `0 <= j < i` starting from `s_0 = id, d_0 = id, h_0 = g`. The equations `h_0[0] = p_0, h_1[1] = p_1,...` are solved in this order, choosing each time the lowest possible value of p_i For `j < i` `d_i*g*s_i*S_{b_0,...,b_{i-1}}*b_j = D_{p_0,...,p_{i-1}}*p_j` so that for dx in `D_{p_0,...,p_{i-1}}` and sx in `S_{base[0],...,base[i-1]}` one has `dx*d_i*g*s_i*sx*b_j = p_j` Search for dx, sx such that this equation holds for `j = i`; it can be written as `s_i*sx*b_j = J, dx*d_i*g*J = p_j` `sx*b_j = s_i**-1*J; sx = trace(s_i**-1, S_{b_0,...,b_{i-1}})` `dx**-1*p_j = d_i*g*J; dx = trace(d_i*g*J, D_{p_0,...,p_{i-1}})` `s_{i+1} = s_i*trace(s_i**-1*J, S_{b_0,...,b_{i-1}})` `d_{i+1} = trace(d_i*g*J, D_{p_0,...,p_{i-1}})**-1*d_i` `h_{i+1}*b_i = d_{i+1}*g*s_{i+1}*b_i = p_i` `h_n*b_j = p_j` for all j, so that `h_n` is the solution. Add the found `(s, d, h)` to TAB1. At the end of the iteration sort TAB1 with respect to the `h`; if there are two consecutive `h` in TAB1 which differ only for the sign, the tensor is zero, so return 0; if there are two consecutive `h` which are equal, keep only one. Then stabilize the slot generators under `i` and the dummy generators under `p_i`. Assign `TAB = TAB1` at the end of the iteration step. At the end `TAB` contains a unique `(s, d, h)`, since all the slots of the tensor `h` have been fixed to have the minimum value according to the symmetries. The algorithm returns `h`. It is important that the slot BSGS has lexicographic minimal base, otherwise there is an `i` which does not belong to the slot base for which `p_i` is fixed by the dummy symmetry only, while `i` is not invariant from the slot stabilizer, so `p_i` is not in general the minimal value. This algorithm differs slightly from the original algorithm [3]: the canonical form is minimal lexicographically, and the BSGS has minimal base under lexicographic order. Equal tensors `h` are eliminated from TAB. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.tensor_can import double_coset_can_rep, get_transversals >>> gens = [Permutation(x) for x in [[2, 1, 0, 3, 4, 5, 7, 6], [4, 1, 2, 3, 0, 5, 7, 6]]] >>> base = [0, 2] >>> g = Permutation([4, 2, 0, 1, 3, 5, 6, 7]) >>> transversals = get_transversals(base, gens) >>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g) [0, 1, 2, 3, 4, 5, 7, 6] >>> g = Permutation([4, 1, 3, 0, 5, 2, 6, 7]) >>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g) 0 """ size = g.size g = g.array_form num_dummies = size - 2 indices = list(range(num_dummies)) all_metrics_with_sym = all([_ is not None for _ in sym]) num_types = len(sym) dumx = dummies[:] dumx_flat = [] for dx in dumx: dumx_flat.extend(dx) b_S = b_S[:] sgensx = [h._array_form for h in sgens] if b_S: S_transversals = transversal2coset(size, b_S, S_transversals) # strong generating set for D dsgsx = [] for i in range(num_types): dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies)) idn = list(range(size)) # TAB = list of entries (s, d, h) where h = _af_rmuln(d,g,s) # for short, in the following d*g*s means _af_rmuln(d,g,s) TAB = [(idn, idn, g)] for i in range(size - 2): b = i testb = b in b_S and sgensx if testb: sgensx1 = [_af_new(_) for _ in sgensx] deltab = _orbit(size, sgensx1, b) else: deltab = {b} # p1 = min(IMAGES) = min(Union D_p*h*deltab for h in TAB) if all_metrics_with_sym: md = _min_dummies(dumx, sym, indices) else: md = [min(_orbit(size, [_af_new( ddx) for ddx in dsgsx], ii)) for ii in range(size - 2)] p_i = min([min([md[h[x]] for x in deltab]) for s, d, h in TAB]) dsgsx1 = [_af_new(_) for _ in dsgsx] Dxtrav = _orbit_transversal(size, dsgsx1, p_i, False, af=True) \ if dsgsx else None if Dxtrav: Dxtrav = [_af_invert(x) for x in Dxtrav] # compute the orbit of p_i for ii in range(num_types): if p_i in dumx[ii]: # the orbit is made by all the indices in dum[ii] if sym[ii] is not None: deltap = dumx[ii] else: # the orbit is made by all the even indices if p_i # is even, by all the odd indices if p_i is odd p_i_index = dumx[ii].index(p_i) % 2 deltap = dumx[ii][p_i_index::2] break else: deltap = [p_i] TAB1 = [] while TAB: s, d, h = TAB.pop() if min([md[h[x]] for x in deltab]) != p_i: continue deltab1 = [x for x in deltab if md[h[x]] == p_i] # NEXT = s*deltab1 intersection (d*g)**-1*deltap dg = _af_rmul(d, g) dginv = _af_invert(dg) sdeltab = [s[x] for x in deltab1] gdeltap = [dginv[x] for x in deltap] NEXT = [x for x in sdeltab if x in gdeltap] # d, s satisfy # d*g*s*base[i-1] = p_{i-1}; using the stabilizers # d*g*s*S_{base[0],...,base[i-1]}*base[i-1] = # D_{p_0,...,p_{i-1}}*p_{i-1} # so that to find d1, s1 satisfying d1*g*s1*b = p_i # one can look for dx in D_{p_0,...,p_{i-1}} and # sx in S_{base[0],...,base[i-1]} # d1 = dx*d; s1 = s*sx # d1*g*s1*b = dx*d*g*s*sx*b = p_i for j in NEXT: if testb: # solve s1*b = j with s1 = s*sx for some element sx # of the stabilizer of ..., base[i-1] # sx*b = s**-1*j; sx = _trace_S(s, j,...) # s1 = s*trace_S(s**-1*j,...) s1 = _trace_S(s, j, b, S_transversals) if not s1: continue else: s1 = [s[ix] for ix in s1] else: s1 = s # assert s1[b] == j # invariant # solve d1*g*j = p_i with d1 = dx*d for some element dg # of the stabilizer of ..., p_{i-1} # dx**-1*p_i = d*g*j; dx**-1 = trace_D(d*g*j,...) # d1 = trace_D(d*g*j,...)**-1*d # to save an inversion in the inner loop; notice we did # Dxtrav = [perm_af_invert(x) for x in Dxtrav] out of the loop if Dxtrav: d1 = _trace_D(dg[j], p_i, Dxtrav) if not d1: continue else: if p_i != dg[j]: continue d1 = idn assert d1[dg[j]] == p_i # invariant d1 = [d1[ix] for ix in d] h1 = [d1[g[ix]] for ix in s1] # assert h1[b] == p_i # invariant TAB1.append((s1, d1, h1)) # if TAB contains equal permutations, keep only one of them; # if TAB contains equal permutations up to the sign, return 0 TAB1.sort(key=lambda x: x[-1]) prev = [0] * size while TAB1: s, d, h = TAB1.pop() if h[:-2] == prev[:-2]: if h[-1] != prev[-1]: return 0 else: TAB.append((s, d, h)) prev = h # stabilize the SGS sgensx = [h for h in sgensx if h[b] == b] if b in b_S: b_S.remove(b) _dumx_remove(dumx, dumx_flat, p_i) dsgsx = [] for i in range(num_types): dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies)) return TAB[0][-1] def canonical_free(base, gens, g, num_free): """ canonicalization of a tensor with respect to free indices choosing the minimum with respect to lexicographical ordering in the free indices ``base``, ``gens`` BSGS for slot permutation group ``g`` permutation representing the tensor ``num_free`` number of free indices The indices must be ordered with first the free indices see explanation in double_coset_can_rep The algorithm is a variation of the one given in [2]. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.tensor_can import canonical_free >>> gens = [[1, 0, 2, 3, 5, 4], [2, 3, 0, 1, 4, 5],[0, 1, 3, 2, 5, 4]] >>> gens = [Permutation(h) for h in gens] >>> base = [0, 2] >>> g = Permutation([2, 1, 0, 3, 4, 5]) >>> canonical_free(base, gens, g, 4) [0, 3, 1, 2, 5, 4] Consider the product of Riemann tensors ``T = R^{a}_{d0}^{d1,d2}*R_{d2,d1}^{d0,b}`` The order of the indices is ``[a, b, d0, -d0, d1, -d1, d2, -d2]`` The permutation corresponding to the tensor is ``g = [0, 3, 4, 6, 7, 5, 2, 1, 8, 9]`` In particular ``a`` is position ``0``, ``b`` is in position ``9``. Use the slot symmetries to get `T` is a form which is the minimal in lexicographic order in the free indices ``a`` and ``b``, e.g. ``-R^{a}_{d0}^{d1,d2}*R^{b,d0}_{d2,d1}`` corresponding to ``[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]`` >>> from sympy.combinatorics.tensor_can import riemann_bsgs, tensor_gens >>> base, gens = riemann_bsgs >>> size, sbase, sgens = tensor_gens(base, gens, [[], []], 0) >>> g = Permutation([0, 3, 4, 6, 7, 5, 2, 1, 8, 9]) >>> canonical_free(sbase, [Permutation(h) for h in sgens], g, 2) [0, 3, 4, 6, 1, 2, 7, 5, 9, 8] """ g = g.array_form size = len(g) if not base: return g[:] transversals = get_transversals(base, gens) for x in sorted(g[:-2]): if x not in base: base.append(x) h = g for i, transv in enumerate(transversals): h_i = [size]*num_free # find the element s in transversals[i] such that # _af_rmul(h, s) has its free elements with the lowest position in h s = None for sk in transv.values(): h1 = _af_rmul(h, sk) hi = [h1.index(ix) for ix in range(num_free)] if hi < h_i: h_i = hi s = sk if s: h = _af_rmul(h, s) return h def _get_map_slots(size, fixed_slots): res = list(range(size)) pos = 0 for i in range(size): if i in fixed_slots: continue res[i] = pos pos += 1 return res def _lift_sgens(size, fixed_slots, free, s): a = [] j = k = 0 fd = list(zip(fixed_slots, free)) fd = [y for x, y in sorted(fd)] num_free = len(free) for i in range(size): if i in fixed_slots: a.append(fd[k]) k += 1 else: a.append(s[j] + num_free) j += 1 return a def canonicalize(g, dummies, msym, *v): """ canonicalize tensor formed by tensors Parameters ========== g : permutation representing the tensor dummies : list representing the dummy indices it can be a list of dummy indices of the same type or a list of lists of dummy indices, one list for each type of index; the dummy indices must come after the free indices, and put in order contravariant, covariant [d0, -d0, d1,-d1,...] msym : symmetry of the metric(s) it can be an integer or a list; in the first case it is the symmetry of the dummy index metric; in the second case it is the list of the symmetries of the index metric for each type v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i` base_i, gens_i : BSGS for tensors of this type. The BSGS should have minimal base under lexicographic ordering; if not, an attempt is made do get the minimal BSGS; in case of failure, canonicalize_naive is used, which is much slower. n_i : number of tensors of type `i`. sym_i : symmetry under exchange of component tensors of type `i`. Both for msym and sym_i the cases are * None no symmetry * 0 commuting * 1 anticommuting Returns ======= 0 if the tensor is zero, else return the array form of the permutation representing the canonical form of the tensor. Algorithm ========= First one uses canonical_free to get the minimum tensor under lexicographic order, using only the slot symmetries. If the component tensors have not minimal BSGS, it is attempted to find it; if the attempt fails canonicalize_naive is used instead. Compute the residual slot symmetry keeping fixed the free indices using tensor_gens(base, gens, list_free_indices, sym). Reduce the problem eliminating the free indices. Then use double_coset_can_rep and lift back the result reintroducing the free indices. Examples ======== one type of index with commuting metric; `A_{a b}` and `B_{a b}` antisymmetric and commuting `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}` `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices g = [1, 3, 0, 5, 4, 2, 6, 7] `T_c = 0` >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product >>> from sympy.combinatorics import Permutation >>> base2a, gens2a = get_symmetric_group_sgs(2, 1) >>> t0 = (base2a, gens2a, 1, 0) >>> t1 = (base2a, gens2a, 2, 0) >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7]) >>> canonicalize(g, range(6), 0, t0, t1) 0 same as above, but with `B_{a b}` anticommuting `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}` can = [0,2,1,4,3,5,7,6] >>> t1 = (base2a, gens2a, 2, 1) >>> canonicalize(g, range(6), 0, t0, t1) [0, 2, 1, 4, 3, 5, 7, 6] two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order, both with commuting metric `f^{a b c}` antisymmetric, commuting `A_{m a}` no symmetry, commuting `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}` ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n] g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15] The canonical tensor is `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}` can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14] >>> base_f, gens_f = get_symmetric_group_sgs(3, 1) >>> base1, gens1 = get_symmetric_group_sgs(1) >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1) >>> t0 = (base_f, gens_f, 2, 0) >>> t1 = (base_A, gens_A, 4, 0) >>> dummies = [range(2, 10), range(10, 14)] >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15]) >>> canonicalize(g, dummies, [0, 0], t0, t1) [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14] """ from sympy.combinatorics.testutil import canonicalize_naive if not isinstance(msym, list): if not msym in [0, 1, None]: raise ValueError('msym must be 0, 1 or None') num_types = 1 else: num_types = len(msym) if not all(msymx in [0, 1, None] for msymx in msym): raise ValueError('msym entries must be 0, 1 or None') if len(dummies) != num_types: raise ValueError( 'dummies and msym must have the same number of elements') size = g.size num_tensors = 0 v1 = [] for i in range(len(v)): base_i, gens_i, n_i, sym_i = v[i] # check that the BSGS is minimal; # this property is used in double_coset_can_rep; # if it is not minimal use canonicalize_naive if not _is_minimal_bsgs(base_i, gens_i): mbsgs = get_minimal_bsgs(base_i, gens_i) if not mbsgs: can = canonicalize_naive(g, dummies, msym, *v) return can base_i, gens_i = mbsgs v1.append((base_i, gens_i, [[]] * n_i, sym_i)) num_tensors += n_i if num_types == 1 and not isinstance(msym, list): dummies = [dummies] msym = [msym] flat_dummies = [] for dumx in dummies: flat_dummies.extend(dumx) if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)): raise ValueError('dummies is not valid') # slot symmetry of the tensor size1, sbase, sgens = gens_products(*v1) if size != size1: raise ValueError( 'g has size %d, generators have size %d' % (size, size1)) free = [i for i in range(size - 2) if i not in flat_dummies] num_free = len(free) # g1 minimal tensor under slot symmetry g1 = canonical_free(sbase, sgens, g, num_free) if not flat_dummies: return g1 # save the sign of g1 sign = 0 if g1[-1] == size - 1 else 1 # the free indices are kept fixed. # Determine free_i, the list of slots of tensors which are fixed # since they are occupied by free indices, which are fixed. start = 0 for i in range(len(v)): free_i = [] base_i, gens_i, n_i, sym_i = v[i] len_tens = gens_i[0].size - 2 # for each component tensor get a list od fixed islots for j in range(n_i): # get the elements corresponding to the component tensor h = g1[start:(start + len_tens)] fr = [] # get the positions of the fixed elements in h for k in free: if k in h: fr.append(h.index(k)) free_i.append(fr) start += len_tens v1[i] = (base_i, gens_i, free_i, sym_i) # BSGS of the tensor with fixed free indices # if tensor_gens fails in gens_product, use canonicalize_naive size, sbase, sgens = gens_products(*v1) # reduce the permutations getting rid of the free indices pos_free = [g1.index(x) for x in range(num_free)] size_red = size - num_free g1_red = [x - num_free for x in g1 if x in flat_dummies] if sign: g1_red.extend([size_red - 1, size_red - 2]) else: g1_red.extend([size_red - 2, size_red - 1]) map_slots = _get_map_slots(size, pos_free) sbase_red = [map_slots[i] for i in sbase if i not in pos_free] sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens] dummies_red = [[x - num_free for x in y] for y in dummies] transv_red = get_transversals(sbase_red, sgens_red) g1_red = _af_new(g1_red) g2 = double_coset_can_rep( dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red) if g2 == 0: return 0 # lift to the case with the free indices g3 = _lift_sgens(size, pos_free, free, g2) return g3 def perm_af_direct_product(gens1, gens2, signed=True): """ direct products of the generators gens1 and gens2 Examples ======== >>> from sympy.combinatorics.tensor_can import perm_af_direct_product >>> gens1 = [[1, 0, 2, 3], [0, 1, 3, 2]] >>> gens2 = [[1, 0]] >>> perm_af_direct_product(gens1, gens2, False) [[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]] >>> gens1 = [[1, 0, 2, 3, 5, 4], [0, 1, 3, 2, 4, 5]] >>> gens2 = [[1, 0, 2, 3]] >>> perm_af_direct_product(gens1, gens2, True) [[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]] """ gens1 = [list(x) for x in gens1] gens2 = [list(x) for x in gens2] s = 2 if signed else 0 n1 = len(gens1[0]) - s n2 = len(gens2[0]) - s start = list(range(n1)) end = list(range(n1, n1 + n2)) if signed: gens1 = [gen[:-2] + end + [gen[-2] + n2, gen[-1] + n2] for gen in gens1] gens2 = [start + [x + n1 for x in gen] for gen in gens2] else: gens1 = [gen + end for gen in gens1] gens2 = [start + [x + n1 for x in gen] for gen in gens2] res = gens1 + gens2 return res def bsgs_direct_product(base1, gens1, base2, gens2, signed=True): """ Direct product of two BSGS Parameters ========== base1 base of the first BSGS. gens1 strong generating sequence of the first BSGS. base2, gens2 similarly for the second BSGS. signed flag for signed permutations. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.tensor_can import (get_symmetric_group_sgs, bsgs_direct_product) >>> base1, gens1 = get_symmetric_group_sgs(1) >>> base2, gens2 = get_symmetric_group_sgs(2) >>> bsgs_direct_product(base1, gens1, base2, gens2) ([1], [(4)(1 2)]) """ s = 2 if signed else 0 n1 = gens1[0].size - s base = list(base1) base += [x + n1 for x in base2] gens1 = [h._array_form for h in gens1] gens2 = [h._array_form for h in gens2] gens = perm_af_direct_product(gens1, gens2, signed) size = len(gens[0]) id_af = list(range(size)) gens = [h for h in gens if h != id_af] if not gens: gens = [id_af] return base, [_af_new(h) for h in gens] def get_symmetric_group_sgs(n, antisym=False): """ Return base, gens of the minimal BSGS for (anti)symmetric tensor ``n`` rank of the tensor ``antisym = False`` symmetric tensor ``antisym = True`` antisymmetric tensor Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs >>> get_symmetric_group_sgs(3) ([0, 1], [(4)(0 1), (4)(1 2)]) """ if n == 1: return [], [_af_new(list(range(3)))] gens = [Permutation(n - 1)(i, i + 1)._array_form for i in range(n - 1)] if antisym == 0: gens = [x + [n, n + 1] for x in gens] else: gens = [x + [n + 1, n] for x in gens] base = list(range(n - 1)) return base, [_af_new(h) for h in gens] riemann_bsgs = [0, 2], [Permutation(0, 1)(4, 5), Permutation(2, 3)(4, 5), Permutation(5)(0, 2)(1, 3)] def get_transversals(base, gens): """ Return transversals for the group with BSGS base, gens """ if not base: return [] stabs = _distribute_gens_by_base(base, gens) orbits, transversals = _orbits_transversals_from_bsgs(base, stabs) transversals = [{x: h._array_form for x, h in y.items()} for y in transversals] return transversals def _is_minimal_bsgs(base, gens): """ Check if the BSGS has minimal base under lexigographic order. base, gens BSGS Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.tensor_can import riemann_bsgs, _is_minimal_bsgs >>> _is_minimal_bsgs(*riemann_bsgs) True >>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)])) >>> _is_minimal_bsgs(*riemann_bsgs1) False """ base1 = [] sgs1 = gens[:] size = gens[0].size for i in range(size): if not all(h._array_form[i] == i for h in sgs1): base1.append(i) sgs1 = [h for h in sgs1 if h._array_form[i] == i] return base1 == base def get_minimal_bsgs(base, gens): """ Compute a minimal GSGS base, gens BSGS If base, gens is a minimal BSGS return it; else return a minimal BSGS if it fails in finding one, it returns None TODO: use baseswap in the case in which if it fails in finding a minimal BSGS Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.tensor_can import get_minimal_bsgs >>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)])) >>> get_minimal_bsgs(*riemann_bsgs1) ([0, 2], [(0 1)(4 5), (5)(0 2)(1 3), (2 3)(4 5)]) """ G = PermutationGroup(gens) base, gens = G.schreier_sims_incremental() if not _is_minimal_bsgs(base, gens): return None return base, gens def tensor_gens(base, gens, list_free_indices, sym=0): """ Returns size, res_base, res_gens BSGS for n tensors of the same type base, gens BSGS for tensors of this type list_free_indices list of the slots occupied by fixed indices for each of the tensors sym symmetry under commutation of two tensors sym None no symmetry sym 0 commuting sym 1 anticommuting Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.tensor_can import tensor_gens, get_symmetric_group_sgs two symmetric tensors with 3 indices without free indices >>> base, gens = get_symmetric_group_sgs(3) >>> tensor_gens(base, gens, [[], []]) (8, [0, 1, 3, 4], [(7)(0 1), (7)(1 2), (7)(3 4), (7)(4 5), (7)(0 3)(1 4)(2 5)]) two symmetric tensors with 3 indices with free indices in slot 1 and 0 >>> tensor_gens(base, gens, [[1], [0]]) (8, [0, 4], [(7)(0 2), (7)(4 5)]) four symmetric tensors with 3 indices, two of which with free indices """ def _get_bsgs(G, base, gens, free_indices): """ return the BSGS for G.pointwise_stabilizer(free_indices) """ if not free_indices: return base[:], gens[:] else: H = G.pointwise_stabilizer(free_indices) base, sgs = H.schreier_sims_incremental() return base, sgs # if not base there is no slot symmetry for the component tensors # if list_free_indices.count([]) < 2 there is no commutation symmetry # so there is no resulting slot symmetry if not base and list_free_indices.count([]) < 2: n = len(list_free_indices) size = gens[0].size size = n * (gens[0].size - 2) + 2 return size, [], [_af_new(list(range(size)))] # if any(list_free_indices) one needs to compute the pointwise # stabilizer, so G is needed if any(list_free_indices): G = PermutationGroup(gens) else: G = None # no_free list of lists of indices for component tensors without fixed # indices no_free = [] size = gens[0].size id_af = list(range(size)) num_indices = size - 2 if not list_free_indices[0]: no_free.append(list(range(num_indices))) res_base, res_gens = _get_bsgs(G, base, gens, list_free_indices[0]) for i in range(1, len(list_free_indices)): base1, gens1 = _get_bsgs(G, base, gens, list_free_indices[i]) res_base, res_gens = bsgs_direct_product(res_base, res_gens, base1, gens1, 1) if not list_free_indices[i]: no_free.append(list(range(size - 2, size - 2 + num_indices))) size += num_indices nr = size - 2 res_gens = [h for h in res_gens if h._array_form != id_af] # if sym there are no commuting tensors stop here if sym is None or not no_free: if not res_gens: res_gens = [_af_new(id_af)] return size, res_base, res_gens # if the component tensors have moinimal BSGS, so is their direct # product P; the slot symmetry group is S = P*C, where C is the group # to (anti)commute the component tensors with no free indices # a stabilizer has the property S_i = P_i*C_i; # the BSGS of P*C has SGS_P + SGS_C and the base is # the ordered union of the bases of P and C. # If P has minimal BSGS, so has S with this base. base_comm = [] for i in range(len(no_free) - 1): ind1 = no_free[i] ind2 = no_free[i + 1] a = list(range(ind1[0])) a.extend(ind2) a.extend(ind1) base_comm.append(ind1[0]) a.extend(list(range(ind2[-1] + 1, nr))) if sym == 0: a.extend([nr, nr + 1]) else: a.extend([nr + 1, nr]) res_gens.append(_af_new(a)) res_base = list(res_base) # each base is ordered; order the union of the two bases for i in base_comm: if i not in res_base: res_base.append(i) res_base.sort() if not res_gens: res_gens = [_af_new(id_af)] return size, res_base, res_gens def gens_products(*v): """ Returns size, res_base, res_gens BSGS for n tensors of different types v is a sequence of (base_i, gens_i, free_i, sym_i) where base_i, gens_i BSGS of tensor of type `i` free_i list of the fixed slots for each of the tensors of type `i`; if there are `n_i` tensors of type `i` and none of them have fixed slots, `free = [[]]*n_i` sym 0 (1) if the tensors of type `i` (anti)commute among themselves Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, gens_products >>> base, gens = get_symmetric_group_sgs(2) >>> gens_products((base, gens, [[], []], 0)) (6, [0, 2], [(5)(0 1), (5)(2 3), (5)(0 2)(1 3)]) >>> gens_products((base, gens, [[1], []], 0)) (6, [2], [(5)(2 3)]) """ res_size, res_base, res_gens = tensor_gens(*v[0]) for i in range(1, len(v)): size, base, gens = tensor_gens(*v[i]) res_base, res_gens = bsgs_direct_product(res_base, res_gens, base, gens, 1) res_size = res_gens[0].size id_af = list(range(res_size)) res_gens = [h for h in res_gens if h != id_af] if not res_gens: res_gens = [id_af] return res_size, res_base, res_gens
4935cf168c9efba87cd641dc459c5e69c3570a00bf6f976969a22d69fbb6c650
from __future__ import print_function, division from sympy.combinatorics import Permutation as Perm from sympy.combinatorics.perm_groups import PermutationGroup from sympy.core import Basic, Tuple from sympy.core.compatibility import as_int from sympy.sets import FiniteSet from sympy.utilities.iterables import (minlex, unflatten, flatten) rmul = Perm.rmul class Polyhedron(Basic): """ Represents the polyhedral symmetry group (PSG). The PSG is one of the symmetry groups of the Platonic solids. There are three polyhedral groups: the tetrahedral group of order 12, the octahedral group of order 24, and the icosahedral group of order 60. All doctests have been given in the docstring of the constructor of the object. References ========== http://mathworld.wolfram.com/PolyhedralGroup.html """ _edges = None def __new__(cls, corners, faces=[], pgroup=[]): """ The constructor of the Polyhedron group object. It takes up to three parameters: the corners, faces, and allowed transformations. The corners/vertices are entered as a list of arbitrary expressions that are used to identify each vertex. The faces are entered as a list of tuples of indices; a tuple of indices identifies the vertices which define the face. They should be entered in a cw or ccw order; they will be standardized by reversal and rotation to be give the lowest lexical ordering. If no faces are given then no edges will be computed. >>> from sympy.combinatorics.polyhedron import Polyhedron >>> Polyhedron(list('abc'), [(1, 2, 0)]).faces FiniteSet((0, 1, 2)) >>> Polyhedron(list('abc'), [(1, 0, 2)]).faces FiniteSet((0, 1, 2)) The allowed transformations are entered as allowable permutations of the vertices for the polyhedron. Instance of Permutations (as with faces) should refer to the supplied vertices by index. These permutation are stored as a PermutationGroup. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.interactive import init_printing >>> from sympy.abc import w, x, y, z >>> init_printing(pretty_print=False, perm_cyclic=False) Here we construct the Polyhedron object for a tetrahedron. >>> corners = [w, x, y, z] >>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)] Next, allowed transformations of the polyhedron must be given. This is given as permutations of vertices. Although the vertices of a tetrahedron can be numbered in 24 (4!) different ways, there are only 12 different orientations for a physical tetrahedron. The following permutations, applied once or twice, will generate all 12 of the orientations. (The identity permutation, Permutation(range(4)), is not included since it does not change the orientation of the vertices.) >>> pgroup = [Permutation([[0, 1, 2], [3]]), \ Permutation([[0, 1, 3], [2]]), \ Permutation([[0, 2, 3], [1]]), \ Permutation([[1, 2, 3], [0]]), \ Permutation([[0, 1], [2, 3]]), \ Permutation([[0, 2], [1, 3]]), \ Permutation([[0, 3], [1, 2]])] The Polyhedron is now constructed and demonstrated: >>> tetra = Polyhedron(corners, faces, pgroup) >>> tetra.size 4 >>> tetra.edges FiniteSet((0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)) >>> tetra.corners (w, x, y, z) It can be rotated with an arbitrary permutation of vertices, e.g. the following permutation is not in the pgroup: >>> tetra.rotate(Permutation([0, 1, 3, 2])) >>> tetra.corners (w, x, z, y) An allowed permutation of the vertices can be constructed by repeatedly applying permutations from the pgroup to the vertices. Here is a demonstration that applying p and p**2 for every p in pgroup generates all the orientations of a tetrahedron and no others: >>> all = ( (w, x, y, z), \ (x, y, w, z), \ (y, w, x, z), \ (w, z, x, y), \ (z, w, y, x), \ (w, y, z, x), \ (y, z, w, x), \ (x, z, y, w), \ (z, y, x, w), \ (y, x, z, w), \ (x, w, z, y), \ (z, x, w, y) ) >>> got = [] >>> for p in (pgroup + [p**2 for p in pgroup]): ... h = Polyhedron(corners) ... h.rotate(p) ... got.append(h.corners) ... >>> set(got) == set(all) True The make_perm method of a PermutationGroup will randomly pick permutations, multiply them together, and return the permutation that can be applied to the polyhedron to give the orientation produced by those individual permutations. Here, 3 permutations are used: >>> tetra.pgroup.make_perm(3) # doctest: +SKIP Permutation([0, 3, 1, 2]) To select the permutations that should be used, supply a list of indices to the permutations in pgroup in the order they should be applied: >>> use = [0, 0, 2] >>> p002 = tetra.pgroup.make_perm(3, use) >>> p002 Permutation([1, 0, 3, 2]) Apply them one at a time: >>> tetra.reset() >>> for i in use: ... tetra.rotate(pgroup[i]) ... >>> tetra.vertices (x, w, z, y) >>> sequentially = tetra.vertices Apply the composite permutation: >>> tetra.reset() >>> tetra.rotate(p002) >>> tetra.corners (x, w, z, y) >>> tetra.corners in all and tetra.corners == sequentially True Notes ===== Defining permutation groups --------------------------- It is not necessary to enter any permutations, nor is necessary to enter a complete set of transformations. In fact, for a polyhedron, all configurations can be constructed from just two permutations. For example, the orientations of a tetrahedron can be generated from an axis passing through a vertex and face and another axis passing through a different vertex or from an axis passing through the midpoints of two edges opposite of each other. For simplicity of presentation, consider a square -- not a cube -- with vertices 1, 2, 3, and 4: 1-----2 We could think of axes of rotation being: | | 1) through the face | | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4 3-----4 3) lines 1-4 or 2-3 To determine how to write the permutations, imagine 4 cameras, one at each corner, labeled A-D: A B A B 1-----2 1-----3 vertex index: | | | | 1 0 | | | | 2 1 3-----4 2-----4 3 2 C D C D 4 3 original after rotation along 1-4 A diagonal and a face axis will be chosen for the "permutation group" from which any orientation can be constructed. >>> pgroup = [] Imagine a clockwise rotation when viewing 1-4 from camera A. The new orientation is (in camera-order): 1, 3, 2, 4 so the permutation is given using the *indices* of the vertices as: >>> pgroup.append(Permutation((0, 2, 1, 3))) Now imagine rotating clockwise when looking down an axis entering the center of the square as viewed. The new camera-order would be 3, 1, 4, 2 so the permutation is (using indices): >>> pgroup.append(Permutation((2, 0, 3, 1))) The square can now be constructed: ** use real-world labels for the vertices, entering them in camera order ** for the faces we use zero-based indices of the vertices in *edge-order* as the face is traversed; neither the direction nor the starting point matter -- the faces are only used to define edges (if so desired). >>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup) To rotate the square with a single permutation we can do: >>> square.rotate(square.pgroup[0]) >>> square.corners (1, 3, 2, 4) To use more than one permutation (or to use one permutation more than once) it is more convenient to use the make_perm method: >>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations >>> square.reset() # return to initial orientation >>> square.rotate(p011) >>> square.corners (4, 2, 3, 1) Thinking outside the box ------------------------ Although the Polyhedron object has a direct physical meaning, it actually has broader application. In the most general sense it is just a decorated PermutationGroup, allowing one to connect the permutations to something physical. For example, a Rubik's cube is not a proper polyhedron, but the Polyhedron class can be used to represent it in a way that helps to visualize the Rubik's cube. >>> from sympy.utilities.iterables import flatten, unflatten >>> from sympy import symbols >>> from sympy.combinatorics import RubikGroup >>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD']) >>> def show(): ... pairs = unflatten(r2.corners, 2) ... print(pairs[::2]) ... print(pairs[1::2]) ... >>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2)) >>> show() [(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)] [(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)] >>> r2.rotate(0) # cw rotation of F >>> show() [(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)] [(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)] Predefined Polyhedra ==================== For convenience, the vertices and faces are defined for the following standard solids along with a permutation group for transformations. When the polyhedron is oriented as indicated below, the vertices in a given horizontal plane are numbered in ccw direction, starting from the vertex that will give the lowest indices in a given face. (In the net of the vertices, indices preceded by "-" indicate replication of the lhs index in the net.) tetrahedron, tetrahedron_faces ------------------------------ 4 vertices (vertex up) net: 0 0-0 1 2 3-1 4 faces: (0, 1, 2) (0, 2, 3) (0, 3, 1) (1, 2, 3) cube, cube_faces ---------------- 8 vertices (face up) net: 0 1 2 3-0 4 5 6 7-4 6 faces: (0, 1, 2, 3) (0, 1, 5, 4) (1, 2, 6, 5) (2, 3, 7, 6) (0, 3, 7, 4) (4, 5, 6, 7) octahedron, octahedron_faces ---------------------------- 6 vertices (vertex up) net: 0 0 0-0 1 2 3 4-1 5 5 5-5 8 faces: (0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 1, 4) (1, 2, 5) (2, 3, 5) (3, 4, 5) (1, 4, 5) dodecahedron, dodecahedron_faces -------------------------------- 20 vertices (vertex up) net: 0 1 2 3 4 -0 5 6 7 8 9 -5 14 10 11 12 13-14 15 16 17 18 19-15 12 faces: (0, 1, 2, 3, 4) (0, 1, 6, 10, 5) (1, 2, 7, 11, 6) (2, 3, 8, 12, 7) (3, 4, 9, 13, 8) (0, 4, 9, 14, 5) (5, 10, 16, 15, 14) (6, 10, 16, 17, 11) (7, 11, 17, 18, 12) (8, 12, 18, 19, 13) (9, 13, 19, 15, 14)(15, 16, 17, 18, 19) icosahedron, icosahedron_faces ------------------------------ 12 vertices (face up) net: 0 0 0 0 -0 1 2 3 4 5 -1 6 7 8 9 10 -6 11 11 11 11 -11 20 faces: (0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 4, 5) (0, 1, 5) (1, 2, 6) (2, 3, 7) (3, 4, 8) (4, 5, 9) (1, 5, 10) (2, 6, 7) (3, 7, 8) (4, 8, 9) (5, 9, 10) (1, 6, 10) (6, 7, 11) (7, 8, 11) (8, 9, 11) (9, 10, 11) (6, 10, 11) >>> from sympy.combinatorics.polyhedron import cube >>> cube.edges FiniteSet((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (2, 3), (2, 6), (3, 7), (4, 5), (4, 7), (5, 6), (6, 7)) If you want to use letters or other names for the corners you can still use the pre-calculated faces: >>> corners = list('abcdefgh') >>> Polyhedron(corners, cube.faces).corners (a, b, c, d, e, f, g, h) References ========== .. [1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf """ faces = [minlex(f, directed=False, is_set=True) for f in faces] corners, faces, pgroup = args = \ [Tuple(*a) for a in (corners, faces, pgroup)] obj = Basic.__new__(cls, *args) obj._corners = tuple(corners) # in order given obj._faces = FiniteSet(*faces) if pgroup and pgroup[0].size != len(corners): raise ValueError("Permutation size unequal to number of corners.") # use the identity permutation if none are given obj._pgroup = PermutationGroup(( pgroup or [Perm(range(len(corners)))] )) return obj @property def corners(self): """ Get the corners of the Polyhedron. The method ``vertices`` is an alias for ``corners``. Examples ======== >>> from sympy.combinatorics import Polyhedron >>> from sympy.abc import a, b, c, d >>> p = Polyhedron(list('abcd')) >>> p.corners == p.vertices == (a, b, c, d) True See Also ======== array_form, cyclic_form """ return self._corners vertices = corners @property def array_form(self): """Return the indices of the corners. The indices are given relative to the original position of corners. Examples ======== >>> from sympy.combinatorics import Permutation, Cycle >>> from sympy.combinatorics.polyhedron import tetrahedron >>> tetrahedron = tetrahedron.copy() >>> tetrahedron.array_form [0, 1, 2, 3] >>> tetrahedron.rotate(0) >>> tetrahedron.array_form [0, 2, 3, 1] >>> tetrahedron.pgroup[0].array_form [0, 2, 3, 1] See Also ======== corners, cyclic_form """ corners = list(self.args[0]) return [corners.index(c) for c in self.corners] @property def cyclic_form(self): """Return the indices of the corners in cyclic notation. The indices are given relative to the original position of corners. See Also ======== corners, array_form """ return Perm._af_new(self.array_form).cyclic_form @property def size(self): """ Get the number of corners of the Polyhedron. """ return len(self._corners) @property def faces(self): """ Get the faces of the Polyhedron. """ return self._faces @property def pgroup(self): """ Get the permutations of the Polyhedron. """ return self._pgroup @property def edges(self): """ Given the faces of the polyhedra we can get the edges. Examples ======== >>> from sympy.combinatorics import Polyhedron >>> from sympy.abc import a, b, c >>> corners = (a, b, c) >>> faces = [(0, 1, 2)] >>> Polyhedron(corners, faces).edges FiniteSet((0, 1), (0, 2), (1, 2)) """ if self._edges is None: output = set() for face in self.faces: for i in range(len(face)): edge = tuple(sorted([face[i], face[i - 1]])) output.add(edge) self._edges = FiniteSet(*output) return self._edges def rotate(self, perm): """ Apply a permutation to the polyhedron *in place*. The permutation may be given as a Permutation instance or an integer indicating which permutation from pgroup of the Polyhedron should be applied. This is an operation that is analogous to rotation about an axis by a fixed increment. Notes ===== When a Permutation is applied, no check is done to see if that is a valid permutation for the Polyhedron. For example, a cube could be given a permutation which effectively swaps only 2 vertices. A valid permutation (that rotates the object in a physical way) will be obtained if one only uses permutations from the ``pgroup`` of the Polyhedron. On the other hand, allowing arbitrary rotations (applications of permutations) gives a way to follow named elements rather than indices since Polyhedron allows vertices to be named while Permutation works only with indices. Examples ======== >>> from sympy.combinatorics import Polyhedron, Permutation >>> from sympy.combinatorics.polyhedron import cube >>> cube = cube.copy() >>> cube.corners (0, 1, 2, 3, 4, 5, 6, 7) >>> cube.rotate(0) >>> cube.corners (1, 2, 3, 0, 5, 6, 7, 4) A non-physical "rotation" that is not prohibited by this method: >>> cube.reset() >>> cube.rotate(Permutation([[1, 2]], size=8)) >>> cube.corners (0, 2, 1, 3, 4, 5, 6, 7) Polyhedron can be used to follow elements of set that are identified by letters instead of integers: >>> shadow = h5 = Polyhedron(list('abcde')) >>> p = Permutation([3, 0, 1, 2, 4]) >>> h5.rotate(p) >>> h5.corners (d, a, b, c, e) >>> _ == shadow.corners True >>> copy = h5.copy() >>> h5.rotate(p) >>> h5.corners == copy.corners False """ if not isinstance(perm, Perm): perm = self.pgroup[perm] # and we know it's valid else: if perm.size != self.size: raise ValueError('Polyhedron and Permutation sizes differ.') a = perm.array_form corners = [self.corners[a[i]] for i in range(len(self.corners))] self._corners = tuple(corners) def reset(self): """Return corners to their original positions. Examples ======== >>> from sympy.combinatorics.polyhedron import tetrahedron as T >>> T = T.copy() >>> T.corners (0, 1, 2, 3) >>> T.rotate(0) >>> T.corners (0, 2, 3, 1) >>> T.reset() >>> T.corners (0, 1, 2, 3) """ self._corners = self.args[0] def _pgroup_calcs(): """Return the permutation groups for each of the polyhedra and the face definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron, tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces, icosahedron_faces (This author didn't find and didn't know of a better way to do it though there likely is such a way.) Although only 2 permutations are needed for a polyhedron in order to generate all the possible orientations, a group of permutations is provided instead. A set of permutations is called a "group" if:: a*b = c (for any pair of permutations in the group, a and b, their product, c, is in the group) a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds) there is an identity permutation, I, such that I*a = a*I for all elements in the group a*b = I (the inverse of each permutation is also in the group) None of the polyhedron groups defined follow these definitions of a group. Instead, they are selected to contain those permutations whose powers alone will construct all orientations of the polyhedron, i.e. for permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``, ``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of permutation ``i``) generate all permutations of the polyhedron instead of mixed products like ``a*b``, ``a*b**2``, etc.... Note that for a polyhedron with n vertices, the valid permutations of the vertices exclude those that do not maintain its faces. e.g. the permutation BCDE of a square's four corners, ABCD, is a valid permutation while CBDE is not (because this would twist the square). Examples ======== The is_group checks for: closure, the presence of the Identity permutation, and the presence of the inverse for each of the elements in the group. This confirms that none of the polyhedra are true groups: >>> from sympy.combinatorics.polyhedron import ( ... tetrahedron, cube, octahedron, dodecahedron, icosahedron) ... >>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron) >>> [h.pgroup.is_group for h in polyhedra] ... [True, True, True, True, True] Although tests in polyhedron's test suite check that powers of the permutations in the groups generate all permutations of the vertices of the polyhedron, here we also demonstrate the powers of the given permutations create a complete group for the tetrahedron: >>> from sympy.combinatorics import Permutation, PermutationGroup >>> for h in polyhedra[:1]: ... G = h.pgroup ... perms = set() ... for g in G: ... for e in range(g.order()): ... p = tuple((g**e).array_form) ... perms.add(p) ... ... perms = [Permutation(p) for p in perms] ... assert PermutationGroup(perms).is_group In addition to doing the above, the tests in the suite confirm that the faces are all present after the application of each permutation. References ========== http://dogschool.tripod.com/trianglegroup.html """ def _pgroup_of_double(polyh, ordered_faces, pgroup): n = len(ordered_faces[0]) # the vertices of the double which sits inside a give polyhedron # can be found by tracking the faces of the outer polyhedron. # A map between face and the vertex of the double is made so that # after rotation the position of the vertices can be located fmap = dict(zip(ordered_faces, range(len(ordered_faces)))) flat_faces = flatten(ordered_faces) new_pgroup = [] for i, p in enumerate(pgroup): h = polyh.copy() h.rotate(p) c = h.corners # reorder corners in the order they should appear when # enumerating the faces reorder = unflatten([c[j] for j in flat_faces], n) # make them canonical reorder = [tuple(map(as_int, minlex(f, directed=False, is_set=True))) for f in reorder] # map face to vertex: the resulting list of vertices are the # permutation that we seek for the double new_pgroup.append(Perm([fmap[f] for f in reorder])) return new_pgroup tetrahedron_faces = [ (0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3 (1, 2, 3), # bottom ] # cw from top # _t_pgroup = [ Perm([[1, 2, 3], [0]]), # cw from top Perm([[0, 1, 2], [3]]), # cw from front face Perm([[0, 3, 2], [1]]), # cw from back right face Perm([[0, 3, 1], [2]]), # cw from back left face Perm([[0, 1], [2, 3]]), # through front left edge Perm([[0, 2], [1, 3]]), # through front right edge Perm([[0, 3], [1, 2]]), # through back edge ] tetrahedron = Polyhedron( range(4), tetrahedron_faces, _t_pgroup) cube_faces = [ (0, 1, 2, 3), # upper (0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4 (4, 5, 6, 7), # lower ] # U, D, F, B, L, R = up, down, front, back, left, right _c_pgroup = [Perm(p) for p in [ [1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U [4, 0, 3, 7, 5, 1, 2, 6], # cw from F face [4, 5, 1, 0, 7, 6, 2, 3], # cw from R face [1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge [6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge [6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge [3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge [4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge [6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge [0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex [5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex [5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex [7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL ]] cube = Polyhedron( range(8), cube_faces, _c_pgroup) octahedron_faces = [ (0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4 (1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4 ] octahedron = Polyhedron( range(6), octahedron_faces, _pgroup_of_double(cube, cube_faces, _c_pgroup)) dodecahedron_faces = [ (0, 1, 2, 3, 4), # top (0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5 (3, 4, 9, 13, 8), (0, 4, 9, 14, 5), (5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18, 12), # lower 5 (8, 12, 18, 19, 13), (9, 13, 19, 15, 14), (15, 16, 17, 18, 19) # bottom ] def _string_to_perm(s): rv = [Perm(range(20))] p = None for si in s: if si not in '01': count = int(si) - 1 else: count = 1 if si == '0': p = _f0 elif si == '1': p = _f1 rv.extend([p]*count) return Perm.rmul(*rv) # top face cw _f0 = Perm([ 1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11, 12, 13, 14, 10, 16, 17, 18, 19, 15]) # front face cw _f1 = Perm([ 5, 0, 4, 9, 14, 10, 1, 3, 13, 15, 6, 2, 8, 19, 16, 17, 11, 7, 12, 18]) # the strings below, like 0104 are shorthand for F0*F1*F0**4 and are # the remaining 4 face rotations, 15 edge permutations, and the # 10 vertex rotations. _dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in ''' 0104 140 014 0410 010 1403 03104 04103 102 120 1304 01303 021302 03130 0412041 041204103 04120410 041204104 041204102 10 01 1402 0140 04102 0412 1204 1302 0130 03120'''.strip().split()] dodecahedron = Polyhedron( range(20), dodecahedron_faces, _dodeca_pgroup) icosahedron_faces = [ (0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 5), (0, 1, 5), (1, 6, 7), (1, 2, 7), (2, 7, 8), (2, 3, 8), (3, 8, 9), (3, 4, 9), (4, 9, 10), (4, 5, 10), (5, 6, 10), (1, 5, 6), (6, 7, 11), (7, 8, 11), (8, 9, 11), (9, 10, 11), (6, 10, 11)] icosahedron = Polyhedron( range(12), icosahedron_faces, _pgroup_of_double( dodecahedron, dodecahedron_faces, _dodeca_pgroup)) return (tetrahedron, cube, octahedron, dodecahedron, icosahedron, tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces, icosahedron_faces) # ----------------------------------------------------------------------- # Standard Polyhedron groups # # These are generated using _pgroup_calcs() above. However to save # import time we encode them explicitly here. # ----------------------------------------------------------------------- tetrahedron = Polyhedron( Tuple(0, 1, 2, 3), Tuple( Tuple(0, 1, 2), Tuple(0, 2, 3), Tuple(0, 1, 3), Tuple(1, 2, 3)), Tuple( Perm(1, 2, 3), Perm(3)(0, 1, 2), Perm(0, 3, 2), Perm(0, 3, 1), Perm(0, 1)(2, 3), Perm(0, 2)(1, 3), Perm(0, 3)(1, 2) )) cube = Polyhedron( Tuple(0, 1, 2, 3, 4, 5, 6, 7), Tuple( Tuple(0, 1, 2, 3), Tuple(0, 1, 5, 4), Tuple(1, 2, 6, 5), Tuple(2, 3, 7, 6), Tuple(0, 3, 7, 4), Tuple(4, 5, 6, 7)), Tuple( Perm(0, 1, 2, 3)(4, 5, 6, 7), Perm(0, 4, 5, 1)(2, 3, 7, 6), Perm(0, 4, 7, 3)(1, 5, 6, 2), Perm(0, 1)(2, 4)(3, 5)(6, 7), Perm(0, 6)(1, 2)(3, 5)(4, 7), Perm(0, 6)(1, 7)(2, 3)(4, 5), Perm(0, 3)(1, 7)(2, 4)(5, 6), Perm(0, 4)(1, 7)(2, 6)(3, 5), Perm(0, 6)(1, 5)(2, 4)(3, 7), Perm(1, 3, 4)(2, 7, 5), Perm(7)(0, 5, 2)(3, 4, 6), Perm(0, 5, 7)(1, 6, 3), Perm(0, 7, 2)(1, 4, 6))) octahedron = Polyhedron( Tuple(0, 1, 2, 3, 4, 5), Tuple( Tuple(0, 1, 2), Tuple(0, 2, 3), Tuple(0, 3, 4), Tuple(0, 1, 4), Tuple(1, 2, 5), Tuple(2, 3, 5), Tuple(3, 4, 5), Tuple(1, 4, 5)), Tuple( Perm(5)(1, 2, 3, 4), Perm(0, 4, 5, 2), Perm(0, 1, 5, 3), Perm(0, 1)(2, 4)(3, 5), Perm(0, 2)(1, 3)(4, 5), Perm(0, 3)(1, 5)(2, 4), Perm(0, 4)(1, 3)(2, 5), Perm(0, 5)(1, 4)(2, 3), Perm(0, 5)(1, 2)(3, 4), Perm(0, 4, 1)(2, 3, 5), Perm(0, 1, 2)(3, 4, 5), Perm(0, 2, 3)(1, 5, 4), Perm(0, 4, 3)(1, 5, 2))) dodecahedron = Polyhedron( Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19), Tuple( Tuple(0, 1, 2, 3, 4), Tuple(0, 1, 6, 10, 5), Tuple(1, 2, 7, 11, 6), Tuple(2, 3, 8, 12, 7), Tuple(3, 4, 9, 13, 8), Tuple(0, 4, 9, 14, 5), Tuple(5, 10, 16, 15, 14), Tuple(6, 10, 16, 17, 11), Tuple(7, 11, 17, 18, 12), Tuple(8, 12, 18, 19, 13), Tuple(9, 13, 19, 15, 14), Tuple(15, 16, 17, 18, 19)), Tuple( Perm(0, 1, 2, 3, 4)(5, 6, 7, 8, 9)(10, 11, 12, 13, 14)(15, 16, 17, 18, 19), Perm(0, 5, 10, 6, 1)(2, 4, 14, 16, 11)(3, 9, 15, 17, 7)(8, 13, 19, 18, 12), Perm(0, 10, 17, 12, 3)(1, 6, 11, 7, 2)(4, 5, 16, 18, 8)(9, 14, 15, 19, 13), Perm(0, 6, 17, 19, 9)(1, 11, 18, 13, 4)(2, 7, 12, 8, 3)(5, 10, 16, 15, 14), Perm(0, 2, 12, 19, 14)(1, 7, 18, 15, 5)(3, 8, 13, 9, 4)(6, 11, 17, 16, 10), Perm(0, 4, 9, 14, 5)(1, 3, 13, 15, 10)(2, 8, 19, 16, 6)(7, 12, 18, 17, 11), Perm(0, 1)(2, 5)(3, 10)(4, 6)(7, 14)(8, 16)(9, 11)(12, 15)(13, 17)(18, 19), Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 12)(8, 10)(9, 17)(13, 16)(14, 18)(15, 19), Perm(0, 12)(1, 8)(2, 3)(4, 7)(5, 18)(6, 13)(9, 11)(10, 19)(14, 17)(15, 16), Perm(0, 8)(1, 13)(2, 9)(3, 4)(5, 12)(6, 19)(7, 14)(10, 18)(11, 15)(16, 17), Perm(0, 4)(1, 9)(2, 14)(3, 5)(6, 13)(7, 15)(8, 10)(11, 19)(12, 16)(17, 18), Perm(0, 5)(1, 14)(2, 15)(3, 16)(4, 10)(6, 9)(7, 19)(8, 17)(11, 13)(12, 18), Perm(0, 11)(1, 6)(2, 10)(3, 16)(4, 17)(5, 7)(8, 15)(9, 18)(12, 14)(13, 19), Perm(0, 18)(1, 12)(2, 7)(3, 11)(4, 17)(5, 19)(6, 8)(9, 16)(10, 13)(14, 15), Perm(0, 18)(1, 19)(2, 13)(3, 8)(4, 12)(5, 17)(6, 15)(7, 9)(10, 16)(11, 14), Perm(0, 13)(1, 19)(2, 15)(3, 14)(4, 9)(5, 8)(6, 18)(7, 16)(10, 12)(11, 17), Perm(0, 16)(1, 15)(2, 19)(3, 18)(4, 17)(5, 10)(6, 14)(7, 13)(8, 12)(9, 11), Perm(0, 18)(1, 17)(2, 16)(3, 15)(4, 19)(5, 12)(6, 11)(7, 10)(8, 14)(9, 13), Perm(0, 15)(1, 19)(2, 18)(3, 17)(4, 16)(5, 14)(6, 13)(7, 12)(8, 11)(9, 10), Perm(0, 17)(1, 16)(2, 15)(3, 19)(4, 18)(5, 11)(6, 10)(7, 14)(8, 13)(9, 12), Perm(0, 19)(1, 18)(2, 17)(3, 16)(4, 15)(5, 13)(6, 12)(7, 11)(8, 10)(9, 14), Perm(1, 4, 5)(2, 9, 10)(3, 14, 6)(7, 13, 16)(8, 15, 11)(12, 19, 17), Perm(19)(0, 6, 2)(3, 5, 11)(4, 10, 7)(8, 14, 17)(9, 16, 12)(13, 15, 18), Perm(0, 11, 8)(1, 7, 3)(4, 6, 12)(5, 17, 13)(9, 10, 18)(14, 16, 19), Perm(0, 7, 13)(1, 12, 9)(2, 8, 4)(5, 11, 19)(6, 18, 14)(10, 17, 15), Perm(0, 3, 9)(1, 8, 14)(2, 13, 5)(6, 12, 15)(7, 19, 10)(11, 18, 16), Perm(0, 14, 10)(1, 9, 16)(2, 13, 17)(3, 19, 11)(4, 15, 6)(7, 8, 18), Perm(0, 16, 7)(1, 10, 11)(2, 5, 17)(3, 14, 18)(4, 15, 12)(8, 9, 19), Perm(0, 16, 13)(1, 17, 8)(2, 11, 12)(3, 6, 18)(4, 10, 19)(5, 15, 9), Perm(0, 11, 15)(1, 17, 14)(2, 18, 9)(3, 12, 13)(4, 7, 19)(5, 6, 16), Perm(0, 8, 15)(1, 12, 16)(2, 18, 10)(3, 19, 5)(4, 13, 14)(6, 7, 17))) icosahedron = Polyhedron( Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), Tuple( Tuple(0, 1, 2), Tuple(0, 2, 3), Tuple(0, 3, 4), Tuple(0, 4, 5), Tuple(0, 1, 5), Tuple(1, 6, 7), Tuple(1, 2, 7), Tuple(2, 7, 8), Tuple(2, 3, 8), Tuple(3, 8, 9), Tuple(3, 4, 9), Tuple(4, 9, 10), Tuple(4, 5, 10), Tuple(5, 6, 10), Tuple(1, 5, 6), Tuple(6, 7, 11), Tuple(7, 8, 11), Tuple(8, 9, 11), Tuple(9, 10, 11), Tuple(6, 10, 11)), Tuple( Perm(11)(1, 2, 3, 4, 5)(6, 7, 8, 9, 10), Perm(0, 5, 6, 7, 2)(3, 4, 10, 11, 8), Perm(0, 1, 7, 8, 3)(4, 5, 6, 11, 9), Perm(0, 2, 8, 9, 4)(1, 7, 11, 10, 5), Perm(0, 3, 9, 10, 5)(1, 2, 8, 11, 6), Perm(0, 4, 10, 6, 1)(2, 3, 9, 11, 7), Perm(0, 1)(2, 5)(3, 6)(4, 7)(8, 10)(9, 11), Perm(0, 2)(1, 3)(4, 7)(5, 8)(6, 9)(10, 11), Perm(0, 3)(1, 9)(2, 4)(5, 8)(6, 11)(7, 10), Perm(0, 4)(1, 9)(2, 10)(3, 5)(6, 8)(7, 11), Perm(0, 5)(1, 4)(2, 10)(3, 6)(7, 9)(8, 11), Perm(0, 6)(1, 5)(2, 10)(3, 11)(4, 7)(8, 9), Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 8)(9, 10), Perm(0, 8)(1, 9)(2, 3)(4, 7)(5, 11)(6, 10), Perm(0, 9)(1, 11)(2, 10)(3, 4)(5, 8)(6, 7), Perm(0, 10)(1, 9)(2, 11)(3, 6)(4, 5)(7, 8), Perm(0, 11)(1, 6)(2, 10)(3, 9)(4, 8)(5, 7), Perm(0, 11)(1, 8)(2, 7)(3, 6)(4, 10)(5, 9), Perm(0, 11)(1, 10)(2, 9)(3, 8)(4, 7)(5, 6), Perm(0, 11)(1, 7)(2, 6)(3, 10)(4, 9)(5, 8), Perm(0, 11)(1, 9)(2, 8)(3, 7)(4, 6)(5, 10), Perm(0, 5, 1)(2, 4, 6)(3, 10, 7)(8, 9, 11), Perm(0, 1, 2)(3, 5, 7)(4, 6, 8)(9, 10, 11), Perm(0, 2, 3)(1, 8, 4)(5, 7, 9)(6, 11, 10), Perm(0, 3, 4)(1, 8, 10)(2, 9, 5)(6, 7, 11), Perm(0, 4, 5)(1, 3, 10)(2, 9, 6)(7, 8, 11), Perm(0, 10, 7)(1, 5, 6)(2, 4, 11)(3, 9, 8), Perm(0, 6, 8)(1, 7, 2)(3, 5, 11)(4, 10, 9), Perm(0, 7, 9)(1, 11, 4)(2, 8, 3)(5, 6, 10), Perm(0, 8, 10)(1, 7, 6)(2, 11, 5)(3, 9, 4), Perm(0, 9, 6)(1, 3, 11)(2, 8, 7)(4, 10, 5))) tetrahedron_faces = list(tuple(arg) for arg in tetrahedron.faces) cube_faces = list(tuple(arg) for arg in cube.faces) octahedron_faces = list(tuple(arg) for arg in octahedron.faces) dodecahedron_faces = list(tuple(arg) for arg in dodecahedron.faces) icosahedron_faces = list(tuple(arg) for arg in icosahedron.faces)
f98a68caac23005257f73f713cc1e7f90ed446cb7405ab56a9bd32f7b478d21a
from __future__ import print_function, division from sympy.core import Basic, Dict, sympify from sympy.core.compatibility import as_int, default_sort_key from sympy.core.sympify import _sympify from sympy.functions.combinatorial.numbers import bell from sympy.matrices import zeros from sympy.sets.sets import FiniteSet, Union from sympy.utilities.iterables import flatten, group from collections import defaultdict class Partition(FiniteSet): """ This class represents an abstract partition. A partition is a set of disjoint sets whose union equals a given set. See Also ======== sympy.utilities.iterables.partitions, sympy.utilities.iterables.multiset_partitions """ _rank = None _partition = None def __new__(cls, *partition): """ Generates a new partition object. This method also verifies if the arguments passed are valid and raises a ValueError if they are not. Examples ======== Creating Partition from Python lists: >>> from sympy.combinatorics.partitions import Partition >>> a = Partition([1, 2], [3]) >>> a Partition(FiniteSet(1, 2), FiniteSet(3)) >>> a.partition [[1, 2], [3]] >>> len(a) 2 >>> a.members (1, 2, 3) Creating Partition from Python sets: >>> Partition({1, 2, 3}, {4, 5}) Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5)) Creating Partition from SymPy finite sets: >>> from sympy.sets.sets import FiniteSet >>> a = FiniteSet(1, 2, 3) >>> b = FiniteSet(4, 5) >>> Partition(a, b) Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5)) """ args = [] dups = False for arg in partition: if isinstance(arg, list): as_set = set(arg) if len(as_set) < len(arg): dups = True break # error below arg = as_set args.append(_sympify(arg)) if not all(isinstance(part, FiniteSet) for part in args): raise ValueError( "Each argument to Partition should be " \ "a list, set, or a FiniteSet") # sort so we have a canonical reference for RGS U = Union(*args) if dups or len(U) < sum(len(arg) for arg in args): raise ValueError("Partition contained duplicate elements.") obj = FiniteSet.__new__(cls, *args) obj.members = tuple(U) obj.size = len(U) return obj def sort_key(self, order=None): """Return a canonical key that can be used for sorting. Ordering is based on the size and sorted elements of the partition and ties are broken with the rank. Examples ======== >>> from sympy.utilities.iterables import default_sort_key >>> from sympy.combinatorics.partitions import Partition >>> from sympy.abc import x >>> a = Partition([1, 2]) >>> b = Partition([3, 4]) >>> c = Partition([1, x]) >>> d = Partition(list(range(4))) >>> l = [d, b, a + 1, a, c] >>> l.sort(key=default_sort_key); l [Partition(FiniteSet(1, 2)), Partition(FiniteSet(1), FiniteSet(2)), Partition(FiniteSet(1, x)), Partition(FiniteSet(3, 4)), Partition(FiniteSet(0, 1, 2, 3))] """ if order is None: members = self.members else: members = tuple(sorted(self.members, key=lambda w: default_sort_key(w, order))) return tuple(map(default_sort_key, (self.size, members, self.rank))) @property def partition(self): """Return partition as a sorted list of lists. Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> Partition([1], [2, 3]).partition [[1], [2, 3]] """ if self._partition is None: self._partition = sorted([sorted(p, key=default_sort_key) for p in self.args]) return self._partition def __add__(self, other): """ Return permutation whose rank is ``other`` greater than current rank, (mod the maximum rank for the set). Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> a = Partition([1, 2], [3]) >>> a.rank 1 >>> (a + 1).rank 2 >>> (a + 100).rank 1 """ other = as_int(other) offset = self.rank + other result = RGS_unrank((offset) % RGS_enum(self.size), self.size) return Partition.from_rgs(result, self.members) def __sub__(self, other): """ Return permutation whose rank is ``other`` less than current rank, (mod the maximum rank for the set). Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> a = Partition([1, 2], [3]) >>> a.rank 1 >>> (a - 1).rank 0 >>> (a - 100).rank 1 """ return self.__add__(-other) def __le__(self, other): """ Checks if a partition is less than or equal to the other based on rank. Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> a = Partition([1, 2], [3, 4, 5]) >>> b = Partition([1], [2, 3], [4], [5]) >>> a.rank, b.rank (9, 34) >>> a <= a True >>> a <= b True """ return self.sort_key() <= sympify(other).sort_key() def __lt__(self, other): """ Checks if a partition is less than the other. Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> a = Partition([1, 2], [3, 4, 5]) >>> b = Partition([1], [2, 3], [4], [5]) >>> a.rank, b.rank (9, 34) >>> a < b True """ return self.sort_key() < sympify(other).sort_key() @property def rank(self): """ Gets the rank of a partition. Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> a = Partition([1, 2], [3], [4, 5]) >>> a.rank 13 """ if self._rank is not None: return self._rank self._rank = RGS_rank(self.RGS) return self._rank @property def RGS(self): """ Returns the "restricted growth string" of the partition. The RGS is returned as a list of indices, L, where L[i] indicates the block in which element i appears. For example, in a partition of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is [1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0. Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> a = Partition([1, 2], [3], [4, 5]) >>> a.members (1, 2, 3, 4, 5) >>> a.RGS (0, 0, 1, 2, 2) >>> a + 1 Partition(FiniteSet(1, 2), FiniteSet(3), FiniteSet(4), FiniteSet(5)) >>> _.RGS (0, 0, 1, 2, 3) """ rgs = {} partition = self.partition for i, part in enumerate(partition): for j in part: rgs[j] = i return tuple([rgs[i] for i in sorted( [i for p in partition for i in p], key=default_sort_key)]) @classmethod def from_rgs(self, rgs, elements): """ Creates a set partition from a restricted growth string. The indices given in rgs are assumed to be the index of the element as given in elements *as provided* (the elements are not sorted by this routine). Block numbering starts from 0. If any block was not referenced in ``rgs`` an error will be raised. Examples ======== >>> from sympy.combinatorics.partitions import Partition >>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde')) Partition(FiniteSet(c), FiniteSet(a, d), FiniteSet(b, e)) >>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead')) Partition(FiniteSet(e), FiniteSet(a, c), FiniteSet(b, d)) >>> a = Partition([1, 4], [2], [3, 5]) >>> Partition.from_rgs(a.RGS, a.members) Partition(FiniteSet(1, 4), FiniteSet(2), FiniteSet(3, 5)) """ if len(rgs) != len(elements): raise ValueError('mismatch in rgs and element lengths') max_elem = max(rgs) + 1 partition = [[] for i in range(max_elem)] j = 0 for i in rgs: partition[i].append(elements[j]) j += 1 if not all(p for p in partition): raise ValueError('some blocks of the partition were empty.') return Partition(*partition) class IntegerPartition(Basic): """ This class represents an integer partition. In number theory and combinatorics, a partition of a positive integer, ``n``, also called an integer partition, is a way of writing ``n`` as a list of positive integers that sum to n. Two partitions that differ only in the order of summands are considered to be the same partition; if order matters then the partitions are referred to as compositions. For example, 4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1]; the compositions [1, 2, 1] and [1, 1, 2] are the same as partition [2, 1, 1]. See Also ======== sympy.utilities.iterables.partitions, sympy.utilities.iterables.multiset_partitions References ========== https://en.wikipedia.org/wiki/Partition_%28number_theory%29 """ _dict = None _keys = None def __new__(cls, partition, integer=None): """ Generates a new IntegerPartition object from a list or dictionary. The partition can be given as a list of positive integers or a dictionary of (integer, multiplicity) items. If the partition is preceded by an integer an error will be raised if the partition does not sum to that given integer. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> a = IntegerPartition([5, 4, 3, 1, 1]) >>> a IntegerPartition(14, (5, 4, 3, 1, 1)) >>> print(a) [5, 4, 3, 1, 1] >>> IntegerPartition({1:3, 2:1}) IntegerPartition(5, (2, 1, 1, 1)) If the value that the partition should sum to is given first, a check will be made to see n error will be raised if there is a discrepancy: >>> IntegerPartition(10, [5, 4, 3, 1]) Traceback (most recent call last): ... ValueError: The partition is not valid """ if integer is not None: integer, partition = partition, integer if isinstance(partition, (dict, Dict)): _ = [] for k, v in sorted(list(partition.items()), reverse=True): if not v: continue k, v = as_int(k), as_int(v) _.extend([k]*v) partition = tuple(_) else: partition = tuple(sorted(map(as_int, partition), reverse=True)) sum_ok = False if integer is None: integer = sum(partition) sum_ok = True else: integer = as_int(integer) if not sum_ok and sum(partition) != integer: raise ValueError("Partition did not add to %s" % integer) if any(i < 1 for i in partition): raise ValueError("The summands must all be positive.") obj = Basic.__new__(cls, integer, partition) obj.partition = list(partition) obj.integer = integer return obj def prev_lex(self): """Return the previous partition of the integer, n, in lexical order, wrapping around to [1, ..., 1] if the partition is [n]. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> p = IntegerPartition([4]) >>> print(p.prev_lex()) [3, 1] >>> p.partition > p.prev_lex().partition True """ d = defaultdict(int) d.update(self.as_dict()) keys = self._keys if keys == [1]: return IntegerPartition({self.integer: 1}) if keys[-1] != 1: d[keys[-1]] -= 1 if keys[-1] == 2: d[1] = 2 else: d[keys[-1] - 1] = d[1] = 1 else: d[keys[-2]] -= 1 left = d[1] + keys[-2] new = keys[-2] d[1] = 0 while left: new -= 1 if left - new >= 0: d[new] += left//new left -= d[new]*new return IntegerPartition(self.integer, d) def next_lex(self): """Return the next partition of the integer, n, in lexical order, wrapping around to [n] if the partition is [1, ..., 1]. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> p = IntegerPartition([3, 1]) >>> print(p.next_lex()) [4] >>> p.partition < p.next_lex().partition True """ d = defaultdict(int) d.update(self.as_dict()) key = self._keys a = key[-1] if a == self.integer: d.clear() d[1] = self.integer elif a == 1: if d[a] > 1: d[a + 1] += 1 d[a] -= 2 else: b = key[-2] d[b + 1] += 1 d[1] = (d[b] - 1)*b d[b] = 0 else: if d[a] > 1: if len(key) == 1: d.clear() d[a + 1] = 1 d[1] = self.integer - a - 1 else: a1 = a + 1 d[a1] += 1 d[1] = d[a]*a - a1 d[a] = 0 else: b = key[-2] b1 = b + 1 d[b1] += 1 need = d[b]*b + d[a]*a - b1 d[a] = d[b] = 0 d[1] = need return IntegerPartition(self.integer, d) def as_dict(self): """Return the partition as a dictionary whose keys are the partition integers and the values are the multiplicity of that integer. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict() {1: 3, 2: 1, 3: 4} """ if self._dict is None: groups = group(self.partition, multiple=False) self._keys = [g[0] for g in groups] self._dict = dict(groups) return self._dict @property def conjugate(self): """ Computes the conjugate partition of itself. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> a = IntegerPartition([6, 3, 3, 2, 1]) >>> a.conjugate [5, 4, 3, 1, 1, 1] """ j = 1 temp_arr = list(self.partition) + [0] k = temp_arr[0] b = [0]*k while k > 0: while k > temp_arr[j]: b[k - 1] = j k -= 1 j += 1 return b def __lt__(self, other): """Return True if self is less than other when the partition is listed from smallest to biggest. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> a = IntegerPartition([3, 1]) >>> a < a False >>> b = a.next_lex() >>> a < b True >>> a == b False """ return list(reversed(self.partition)) < list(reversed(other.partition)) def __le__(self, other): """Return True if self is less than other when the partition is listed from smallest to biggest. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> a = IntegerPartition([4]) >>> a <= a True """ return list(reversed(self.partition)) <= list(reversed(other.partition)) def as_ferrers(self, char='#'): """ Prints the ferrer diagram of a partition. Examples ======== >>> from sympy.combinatorics.partitions import IntegerPartition >>> print(IntegerPartition([1, 1, 5]).as_ferrers()) ##### # # """ return "\n".join([char*i for i in self.partition]) def __str__(self): return str(list(self.partition)) def random_integer_partition(n, seed=None): """ Generates a random integer partition summing to ``n`` as a list of reverse-sorted integers. Examples ======== >>> from sympy.combinatorics.partitions import random_integer_partition For the following, a seed is given so a known value can be shown; in practice, the seed would not be given. >>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1]) [85, 12, 2, 1] >>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1]) [5, 3, 1, 1] >>> random_integer_partition(1) [1] """ from sympy.testing.randtest import _randint n = as_int(n) if n < 1: raise ValueError('n must be a positive integer') randint = _randint(seed) partition = [] while (n > 0): k = randint(1, n) mult = randint(1, n//k) partition.append((k, mult)) n -= k*mult partition.sort(reverse=True) partition = flatten([[k]*m for k, m in partition]) return partition def RGS_generalized(m): """ Computes the m + 1 generalized unrestricted growth strings and returns them as rows in matrix. Examples ======== >>> from sympy.combinatorics.partitions import RGS_generalized >>> RGS_generalized(6) Matrix([ [ 1, 1, 1, 1, 1, 1, 1], [ 1, 2, 3, 4, 5, 6, 0], [ 2, 5, 10, 17, 26, 0, 0], [ 5, 15, 37, 77, 0, 0, 0], [ 15, 52, 151, 0, 0, 0, 0], [ 52, 203, 0, 0, 0, 0, 0], [203, 0, 0, 0, 0, 0, 0]]) """ d = zeros(m + 1) for i in range(0, m + 1): d[0, i] = 1 for i in range(1, m + 1): for j in range(m): if j <= m - i: d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1] else: d[i, j] = 0 return d def RGS_enum(m): """ RGS_enum computes the total number of restricted growth strings possible for a superset of size m. Examples ======== >>> from sympy.combinatorics.partitions import RGS_enum >>> from sympy.combinatorics.partitions import Partition >>> RGS_enum(4) 15 >>> RGS_enum(5) 52 >>> RGS_enum(6) 203 We can check that the enumeration is correct by actually generating the partitions. Here, the 15 partitions of 4 items are generated: >>> a = Partition(list(range(4))) >>> s = set() >>> for i in range(20): ... s.add(a) ... a += 1 ... >>> assert len(s) == 15 """ if (m < 1): return 0 elif (m == 1): return 1 else: return bell(m) def RGS_unrank(rank, m): """ Gives the unranked restricted growth string for a given superset size. Examples ======== >>> from sympy.combinatorics.partitions import RGS_unrank >>> RGS_unrank(14, 4) [0, 1, 2, 3] >>> RGS_unrank(0, 4) [0, 0, 0, 0] """ if m < 1: raise ValueError("The superset size must be >= 1") if rank < 0 or RGS_enum(m) <= rank: raise ValueError("Invalid arguments") L = [1] * (m + 1) j = 1 D = RGS_generalized(m) for i in range(2, m + 1): v = D[m - i, j] cr = j*v if cr <= rank: L[i] = j + 1 rank -= cr j += 1 else: L[i] = int(rank / v + 1) rank %= v return [x - 1 for x in L[1:]] def RGS_rank(rgs): """ Computes the rank of a restricted growth string. Examples ======== >>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank >>> RGS_rank([0, 1, 2, 1, 3]) 42 >>> RGS_rank(RGS_unrank(4, 7)) 4 """ rgs_size = len(rgs) rank = 0 D = RGS_generalized(rgs_size) for i in range(1, rgs_size): n = len(rgs[(i + 1):]) m = max(rgs[0:i]) rank += D[n, m + 1] * rgs[i] return rank
833592022c59cd01d00c2f1a0506df4440393cfe099b44946f6d5493b0db54da
from __future__ import print_function, division from sympy.combinatorics.permutations import Permutation, _af_invert, _af_rmul from sympy.ntheory import isprime rmul = Permutation.rmul _af_new = Permutation._af_new ############################################ # # Utilities for computational group theory # ############################################ def _base_ordering(base, degree): r""" Order `\{0, 1, ..., n-1\}` so that base points come first and in order. Parameters ========== ``base`` - the base ``degree`` - the degree of the associated permutation group Returns ======= A list ``base_ordering`` such that ``base_ordering[point]`` is the number of ``point`` in the ordering. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.util import _base_ordering >>> S = SymmetricGroup(4) >>> S.schreier_sims() >>> _base_ordering(S.base, S.degree) [0, 1, 2, 3] Notes ===== This is used in backtrack searches, when we define a relation `<<` on the underlying set for a permutation group of degree `n`, `\{0, 1, ..., n-1\}`, so that if `(b_1, b_2, ..., b_k)` is a base we have `b_i << b_j` whenever `i<j` and `b_i << a` for all `i\in\{1,2, ..., k\}` and `a` is not in the base. The idea is developed and applied to backtracking algorithms in [1], pp.108-132. The points that are not in the base are taken in increasing order. References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" """ base_len = len(base) ordering = [0]*degree for i in range(base_len): ordering[base[i]] = i current = base_len for i in range(degree): if i not in base: ordering[i] = current current += 1 return ordering def _check_cycles_alt_sym(perm): """ Checks for cycles of prime length p with n/2 < p < n-2. Here `n` is the degree of the permutation. This is a helper function for the function is_alt_sym from sympy.combinatorics.perm_groups. Examples ======== >>> from sympy.combinatorics.util import _check_cycles_alt_sym >>> from sympy.combinatorics.permutations import Permutation >>> a = Permutation([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12]]) >>> _check_cycles_alt_sym(a) False >>> b = Permutation([[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10]]) >>> _check_cycles_alt_sym(b) True See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.is_alt_sym """ n = perm.size af = perm.array_form current_len = 0 total_len = 0 used = set() for i in range(n//2): if not i in used and i < n//2 - total_len: current_len = 1 used.add(i) j = i while af[j] != i: current_len += 1 j = af[j] used.add(j) total_len += current_len if current_len > n//2 and current_len < n - 2 and isprime(current_len): return True return False def _distribute_gens_by_base(base, gens): r""" Distribute the group elements ``gens`` by membership in basic stabilizers. Notice that for a base `(b_1, b_2, ..., b_k)`, the basic stabilizers are defined as `G^{(i)} = G_{b_1, ..., b_{i-1}}` for `i \in\{1, 2, ..., k\}`. Parameters ========== ``base`` - a sequence of points in `\{0, 1, ..., n-1\}` ``gens`` - a list of elements of a permutation group of degree `n`. Returns ======= List of length `k`, where `k` is the length of ``base``. The `i`-th entry contains those elements in ``gens`` which fix the first `i` elements of ``base`` (so that the `0`-th entry is equal to ``gens`` itself). If no element fixes the first `i` elements of ``base``, the `i`-th element is set to a list containing the identity element. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.named_groups import DihedralGroup >>> from sympy.combinatorics.util import _distribute_gens_by_base >>> D = DihedralGroup(3) >>> D.schreier_sims() >>> D.strong_gens [(0 1 2), (0 2), (1 2)] >>> D.base [0, 1] >>> _distribute_gens_by_base(D.base, D.strong_gens) [[(0 1 2), (0 2), (1 2)], [(1 2)]] See Also ======== _strong_gens_from_distr, _orbits_transversals_from_bsgs, _handle_precomputed_bsgs """ base_len = len(base) degree = gens[0].size stabs = [[] for _ in range(base_len)] max_stab_index = 0 for gen in gens: j = 0 while j < base_len - 1 and gen._array_form[base[j]] == base[j]: j += 1 if j > max_stab_index: max_stab_index = j for k in range(j + 1): stabs[k].append(gen) for i in range(max_stab_index + 1, base_len): stabs[i].append(_af_new(list(range(degree)))) return stabs def _handle_precomputed_bsgs(base, strong_gens, transversals=None, basic_orbits=None, strong_gens_distr=None): """ Calculate BSGS-related structures from those present. The base and strong generating set must be provided; if any of the transversals, basic orbits or distributed strong generators are not provided, they will be calculated from the base and strong generating set. Parameters ========== ``base`` - the base ``strong_gens`` - the strong generators ``transversals`` - basic transversals ``basic_orbits`` - basic orbits ``strong_gens_distr`` - strong generators distributed by membership in basic stabilizers Returns ======= ``(transversals, basic_orbits, strong_gens_distr)`` where ``transversals`` are the basic transversals, ``basic_orbits`` are the basic orbits, and ``strong_gens_distr`` are the strong generators distributed by membership in basic stabilizers. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.named_groups import DihedralGroup >>> from sympy.combinatorics.util import _handle_precomputed_bsgs >>> D = DihedralGroup(3) >>> D.schreier_sims() >>> _handle_precomputed_bsgs(D.base, D.strong_gens, ... basic_orbits=D.basic_orbits) ([{0: (2), 1: (0 1 2), 2: (0 2)}, {1: (2), 2: (1 2)}], [[0, 1, 2], [1, 2]], [[(0 1 2), (0 2), (1 2)], [(1 2)]]) See Also ======== _orbits_transversals_from_bsgs, _distribute_gens_by_base """ if strong_gens_distr is None: strong_gens_distr = _distribute_gens_by_base(base, strong_gens) if transversals is None: if basic_orbits is None: basic_orbits, transversals = \ _orbits_transversals_from_bsgs(base, strong_gens_distr) else: transversals = \ _orbits_transversals_from_bsgs(base, strong_gens_distr, transversals_only=True) else: if basic_orbits is None: base_len = len(base) basic_orbits = [None]*base_len for i in range(base_len): basic_orbits[i] = list(transversals[i].keys()) return transversals, basic_orbits, strong_gens_distr def _orbits_transversals_from_bsgs(base, strong_gens_distr, transversals_only=False, slp=False): """ Compute basic orbits and transversals from a base and strong generating set. The generators are provided as distributed across the basic stabilizers. If the optional argument ``transversals_only`` is set to True, only the transversals are returned. Parameters ========== ``base`` - the base ``strong_gens_distr`` - strong generators distributed by membership in basic stabilizers ``transversals_only`` - a flag switching between returning only the transversals/ both orbits and transversals ``slp`` - if ``True``, return a list of dictionaries containing the generator presentations of the elements of the transversals, i.e. the list of indices of generators from `strong_gens_distr[i]` such that their product is the relevant transversal element Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.util import _orbits_transversals_from_bsgs >>> from sympy.combinatorics.util import (_orbits_transversals_from_bsgs, ... _distribute_gens_by_base) >>> S = SymmetricGroup(3) >>> S.schreier_sims() >>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens) >>> _orbits_transversals_from_bsgs(S.base, strong_gens_distr) ([[0, 1, 2], [1, 2]], [{0: (2), 1: (0 1 2), 2: (0 2 1)}, {1: (2), 2: (1 2)}]) See Also ======== _distribute_gens_by_base, _handle_precomputed_bsgs """ from sympy.combinatorics.perm_groups import _orbit_transversal base_len = len(base) degree = strong_gens_distr[0][0].size transversals = [None]*base_len slps = [None]*base_len if transversals_only is False: basic_orbits = [None]*base_len for i in range(base_len): transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i], base[i], pairs=True, slp=True) transversals[i] = dict(transversals[i]) if transversals_only is False: basic_orbits[i] = list(transversals[i].keys()) if transversals_only: return transversals else: if not slp: return basic_orbits, transversals return basic_orbits, transversals, slps def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None): """ Remove redundant generators from a strong generating set. Parameters ========== ``base`` - a base ``strong_gens`` - a strong generating set relative to ``base`` ``basic_orbits`` - basic orbits ``strong_gens_distr`` - strong generators distributed by membership in basic stabilizers Returns ======= A strong generating set with respect to ``base`` which is a subset of ``strong_gens``. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.util import _remove_gens >>> from sympy.combinatorics.testutil import _verify_bsgs >>> S = SymmetricGroup(15) >>> base, strong_gens = S.schreier_sims_incremental() >>> new_gens = _remove_gens(base, strong_gens) >>> len(new_gens) 14 >>> _verify_bsgs(S, base, new_gens) True Notes ===== This procedure is outlined in [1],p.95. References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" """ from sympy.combinatorics.perm_groups import _orbit base_len = len(base) degree = strong_gens[0].size if strong_gens_distr is None: strong_gens_distr = _distribute_gens_by_base(base, strong_gens) if basic_orbits is None: basic_orbits = [] for i in range(base_len): basic_orbit = _orbit(degree, strong_gens_distr[i], base[i]) basic_orbits.append(basic_orbit) strong_gens_distr.append([]) res = strong_gens[:] for i in range(base_len - 1, -1, -1): gens_copy = strong_gens_distr[i][:] for gen in strong_gens_distr[i]: if gen not in strong_gens_distr[i + 1]: temp_gens = gens_copy[:] temp_gens.remove(gen) if temp_gens == []: continue temp_orbit = _orbit(degree, temp_gens, base[i]) if temp_orbit == basic_orbits[i]: gens_copy.remove(gen) res.remove(gen) return res def _strip(g, base, orbits, transversals): """ Attempt to decompose a permutation using a (possibly partial) BSGS structure. This is done by treating the sequence ``base`` as an actual base, and the orbits ``orbits`` and transversals ``transversals`` as basic orbits and transversals relative to it. This process is called "sifting". A sift is unsuccessful when a certain orbit element is not found or when after the sift the decomposition doesn't end with the identity element. The argument ``transversals`` is a list of dictionaries that provides transversal elements for the orbits ``orbits``. Parameters ========== ``g`` - permutation to be decomposed ``base`` - sequence of points ``orbits`` - a list in which the ``i``-th entry is an orbit of ``base[i]`` under some subgroup of the pointwise stabilizer of ` `base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit in this function since the only information we need is encoded in the orbits and transversals ``transversals`` - a list of orbit transversals associated with the orbits ``orbits``. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.util import _strip >>> S = SymmetricGroup(5) >>> S.schreier_sims() >>> g = Permutation([0, 2, 3, 1, 4]) >>> _strip(g, S.base, S.basic_orbits, S.basic_transversals) ((4), 5) Notes ===== The algorithm is described in [1],pp.89-90. The reason for returning both the current state of the element being decomposed and the level at which the sifting ends is that they provide important information for the randomized version of the Schreier-Sims algorithm. References ========== [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random """ h = g._array_form base_len = len(base) for i in range(base_len): beta = h[base[i]] if beta == base[i]: continue if beta not in orbits[i]: return _af_new(h), i + 1 u = transversals[i][beta]._array_form h = _af_rmul(_af_invert(u), h) return _af_new(h), base_len + 1 def _strip_af(h, base, orbits, transversals, j, slp=[], slps={}): """ optimized _strip, with h, transversals and result in array form if the stripped elements is the identity, it returns False, base_len + 1 j h[base[i]] == base[i] for i <= j """ base_len = len(base) for i in range(j+1, base_len): beta = h[base[i]] if beta == base[i]: continue if beta not in orbits[i]: if not slp: return h, i + 1 return h, i + 1, slp u = transversals[i][beta] if h == u: if not slp: return False, base_len + 1 return False, base_len + 1, slp h = _af_rmul(_af_invert(u), h) if slp: u_slp = slps[i][beta][:] u_slp.reverse() u_slp = [(i, (g,)) for g in u_slp] slp = u_slp + slp if not slp: return h, base_len + 1 return h, base_len + 1, slp def _strong_gens_from_distr(strong_gens_distr): """ Retrieve strong generating set from generators of basic stabilizers. This is just the union of the generators of the first and second basic stabilizers. Parameters ========== ``strong_gens_distr`` - strong generators distributed by membership in basic stabilizers Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.util import (_strong_gens_from_distr, ... _distribute_gens_by_base) >>> S = SymmetricGroup(3) >>> S.schreier_sims() >>> S.strong_gens [(0 1 2), (2)(0 1), (1 2)] >>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens) >>> _strong_gens_from_distr(strong_gens_distr) [(0 1 2), (2)(0 1), (1 2)] See Also ======== _distribute_gens_by_base """ if len(strong_gens_distr) == 1: return strong_gens_distr[0][:] else: result = strong_gens_distr[0] for gen in strong_gens_distr[1]: if gen not in result: result.append(gen) return result
1ad29e05e2c5da65bd5f2cc15de290c54f8f78399bae6791c502e533aec04974
from __future__ import print_function, division from sympy.combinatorics.group_constructs import DirectProduct from sympy.combinatorics.perm_groups import PermutationGroup from sympy.combinatorics.permutations import Permutation _af_new = Permutation._af_new def AbelianGroup(*cyclic_orders): """ Returns the direct product of cyclic groups with the given orders. According to the structure theorem for finite abelian groups ([1]), every finite abelian group can be written as the direct product of finitely many cyclic groups. Examples ======== >>> from sympy.combinatorics import Permutation >>> from sympy.combinatorics.named_groups import AbelianGroup >>> AbelianGroup(3, 4) PermutationGroup([ (6)(0 1 2), (3 4 5 6)]) >>> _.is_group True See Also ======== DirectProduct References ========== .. [1] http://groupprops.subwiki.org/wiki/Structure_theorem_for_finitely_generated_abelian_groups """ groups = [] degree = 0 order = 1 for size in cyclic_orders: degree += size order *= size groups.append(CyclicGroup(size)) G = DirectProduct(*groups) G._is_abelian = True G._degree = degree G._order = order return G def AlternatingGroup(n): """ Generates the alternating group on ``n`` elements as a permutation group. For ``n > 2``, the generators taken are ``(0 1 2), (0 1 2 ... n-1)`` for ``n`` odd and ``(0 1 2), (1 2 ... n-1)`` for ``n`` even (See [1], p.31, ex.6.9.). After the group is generated, some of its basic properties are set. The cases ``n = 1, 2`` are handled separately. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> G = AlternatingGroup(4) >>> G.is_group True >>> a = list(G.generate_dimino()) >>> len(a) 12 >>> all(perm.is_even for perm in a) True See Also ======== SymmetricGroup, CyclicGroup, DihedralGroup References ========== [1] Armstrong, M. "Groups and Symmetry" """ # small cases are special if n in (1, 2): return PermutationGroup([Permutation([0])]) a = list(range(n)) a[0], a[1], a[2] = a[1], a[2], a[0] gen1 = a if n % 2: a = list(range(1, n)) a.append(0) gen2 = a else: a = list(range(2, n)) a.append(1) a.insert(0, 0) gen2 = a gens = [gen1, gen2] if gen1 == gen2: gens = gens[:1] G = PermutationGroup([_af_new(a) for a in gens], dups=False) if n < 4: G._is_abelian = True G._is_nilpotent = True else: G._is_abelian = False G._is_nilpotent = False if n < 5: G._is_solvable = True else: G._is_solvable = False G._degree = n G._is_transitive = True G._is_alt = True return G def CyclicGroup(n): """ Generates the cyclic group of order ``n`` as a permutation group. The generator taken is the ``n``-cycle ``(0 1 2 ... n-1)`` (in cycle notation). After the group is generated, some of its basic properties are set. Examples ======== >>> from sympy.combinatorics.named_groups import CyclicGroup >>> G = CyclicGroup(6) >>> G.is_group True >>> G.order() 6 >>> list(G.generate_schreier_sims(af=True)) [[0, 1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 0], [2, 3, 4, 5, 0, 1], [3, 4, 5, 0, 1, 2], [4, 5, 0, 1, 2, 3], [5, 0, 1, 2, 3, 4]] See Also ======== SymmetricGroup, DihedralGroup, AlternatingGroup """ a = list(range(1, n)) a.append(0) gen = _af_new(a) G = PermutationGroup([gen]) G._is_abelian = True G._is_nilpotent = True G._is_solvable = True G._degree = n G._is_transitive = True G._order = n return G def DihedralGroup(n): r""" Generates the dihedral group `D_n` as a permutation group. The dihedral group `D_n` is the group of symmetries of the regular ``n``-gon. The generators taken are the ``n``-cycle ``a = (0 1 2 ... n-1)`` (a rotation of the ``n``-gon) and ``b = (0 n-1)(1 n-2)...`` (a reflection of the ``n``-gon) in cycle rotation. It is easy to see that these satisfy ``a**n = b**2 = 1`` and ``bab = ~a`` so they indeed generate `D_n` (See [1]). After the group is generated, some of its basic properties are set. Examples ======== >>> from sympy.combinatorics.named_groups import DihedralGroup >>> G = DihedralGroup(5) >>> G.is_group True >>> a = list(G.generate_dimino()) >>> [perm.cyclic_form for perm in a] [[], [[0, 1, 2, 3, 4]], [[0, 2, 4, 1, 3]], [[0, 3, 1, 4, 2]], [[0, 4, 3, 2, 1]], [[0, 4], [1, 3]], [[1, 4], [2, 3]], [[0, 1], [2, 4]], [[0, 2], [3, 4]], [[0, 3], [1, 2]]] See Also ======== SymmetricGroup, CyclicGroup, AlternatingGroup References ========== [1] https://en.wikipedia.org/wiki/Dihedral_group """ # small cases are special if n == 1: return PermutationGroup([Permutation([1, 0])]) if n == 2: return PermutationGroup([Permutation([1, 0, 3, 2]), Permutation([2, 3, 0, 1]), Permutation([3, 2, 1, 0])]) a = list(range(1, n)) a.append(0) gen1 = _af_new(a) a = list(range(n)) a.reverse() gen2 = _af_new(a) G = PermutationGroup([gen1, gen2]) # if n is a power of 2, group is nilpotent if n & (n-1) == 0: G._is_nilpotent = True else: G._is_nilpotent = False G._is_abelian = False G._is_solvable = True G._degree = n G._is_transitive = True G._order = 2*n return G def SymmetricGroup(n): """ Generates the symmetric group on ``n`` elements as a permutation group. The generators taken are the ``n``-cycle ``(0 1 2 ... n-1)`` and the transposition ``(0 1)`` (in cycle notation). (See [1]). After the group is generated, some of its basic properties are set. Examples ======== >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> G = SymmetricGroup(4) >>> G.is_group True >>> G.order() 24 >>> list(G.generate_schreier_sims(af=True)) [[0, 1, 2, 3], [1, 2, 3, 0], [2, 3, 0, 1], [3, 1, 2, 0], [0, 2, 3, 1], [1, 3, 0, 2], [2, 0, 1, 3], [3, 2, 0, 1], [0, 3, 1, 2], [1, 0, 2, 3], [2, 1, 3, 0], [3, 0, 1, 2], [0, 1, 3, 2], [1, 2, 0, 3], [2, 3, 1, 0], [3, 1, 0, 2], [0, 2, 1, 3], [1, 3, 2, 0], [2, 0, 3, 1], [3, 2, 1, 0], [0, 3, 2, 1], [1, 0, 3, 2], [2, 1, 0, 3], [3, 0, 2, 1]] See Also ======== CyclicGroup, DihedralGroup, AlternatingGroup References ========== .. [1] https://en.wikipedia.org/wiki/Symmetric_group#Generators_and_relations """ if n == 1: G = PermutationGroup([Permutation([0])]) elif n == 2: G = PermutationGroup([Permutation([1, 0])]) else: a = list(range(1, n)) a.append(0) gen1 = _af_new(a) a = list(range(n)) a[0], a[1] = a[1], a[0] gen2 = _af_new(a) G = PermutationGroup([gen1, gen2]) if n < 3: G._is_abelian = True G._is_nilpotent = True else: G._is_abelian = False G._is_nilpotent = False if n < 5: G._is_solvable = True else: G._is_solvable = False G._degree = n G._is_transitive = True G._is_sym = True return G def RubikGroup(n): """Return a group of Rubik's cube generators >>> from sympy.combinatorics.named_groups import RubikGroup >>> RubikGroup(2).is_group True """ from sympy.combinatorics.generators import rubik if n <= 1: raise ValueError("Invalid cube. n has to be greater than 1") return PermutationGroup(rubik(n))
db6c5dd3fb9c5b91feef46f9bd1f03f33fefbdc13c46867116bb73066d20b493
from __future__ import print_function, division from sympy.core import Basic from sympy.core.compatibility import iterable, as_int from sympy.utilities.iterables import flatten from collections import defaultdict class Prufer(Basic): """ The Prufer correspondence is an algorithm that describes the bijection between labeled trees and the Prufer code. A Prufer code of a labeled tree is unique up to isomorphism and has a length of n - 2. Prufer sequences were first used by Heinz Prufer to give a proof of Cayley's formula. References ========== .. [1] http://mathworld.wolfram.com/LabeledTree.html """ _prufer_repr = None _tree_repr = None _nodes = None _rank = None @property def prufer_repr(self): """Returns Prufer sequence for the Prufer object. This sequence is found by removing the highest numbered vertex, recording the node it was attached to, and continuing until only two vertices remain. The Prufer sequence is the list of recorded nodes. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).prufer_repr [3, 3, 3, 4] >>> Prufer([1, 0, 0]).prufer_repr [1, 0, 0] See Also ======== to_prufer """ if self._prufer_repr is None: self._prufer_repr = self.to_prufer(self._tree_repr[:], self.nodes) return self._prufer_repr @property def tree_repr(self): """Returns the tree representation of the Prufer object. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).tree_repr [[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]] >>> Prufer([1, 0, 0]).tree_repr [[1, 2], [0, 1], [0, 3], [0, 4]] See Also ======== to_tree """ if self._tree_repr is None: self._tree_repr = self.to_tree(self._prufer_repr[:]) return self._tree_repr @property def nodes(self): """Returns the number of nodes in the tree. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).nodes 6 >>> Prufer([1, 0, 0]).nodes 5 """ return self._nodes @property def rank(self): """Returns the rank of the Prufer sequence. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> p = Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]) >>> p.rank 778 >>> p.next(1).rank 779 >>> p.prev().rank 777 See Also ======== prufer_rank, next, prev, size """ if self._rank is None: self._rank = self.prufer_rank() return self._rank @property def size(self): """Return the number of possible trees of this Prufer object. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> Prufer([0]*4).size == Prufer([6]*4).size == 1296 True See Also ======== prufer_rank, rank, next, prev """ return self.prev(self.rank).prev().rank + 1 @staticmethod def to_prufer(tree, n): """Return the Prufer sequence for a tree given as a list of edges where ``n`` is the number of nodes in the tree. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) >>> a.prufer_repr [0, 0] >>> Prufer.to_prufer([[0, 1], [0, 2], [0, 3]], 4) [0, 0] See Also ======== prufer_repr: returns Prufer sequence of a Prufer object. """ d = defaultdict(int) L = [] for edge in tree: # Increment the value of the corresponding # node in the degree list as we encounter an # edge involving it. d[edge[0]] += 1 d[edge[1]] += 1 for i in range(n - 2): # find the smallest leaf for x in range(n): if d[x] == 1: break # find the node it was connected to y = None for edge in tree: if x == edge[0]: y = edge[1] elif x == edge[1]: y = edge[0] if y is not None: break # record and update L.append(y) for j in (x, y): d[j] -= 1 if not d[j]: d.pop(j) tree.remove(edge) return L @staticmethod def to_tree(prufer): """Return the tree (as a list of edges) of the given Prufer sequence. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> a = Prufer([0, 2], 4) >>> a.tree_repr [[0, 1], [0, 2], [2, 3]] >>> Prufer.to_tree([0, 2]) [[0, 1], [0, 2], [2, 3]] References ========== - https://hamberg.no/erlend/posts/2010-11-06-prufer-sequence-compact-tree-representation.html See Also ======== tree_repr: returns tree representation of a Prufer object. """ tree = [] last = [] n = len(prufer) + 2 d = defaultdict(lambda: 1) for p in prufer: d[p] += 1 for i in prufer: for j in range(n): # find the smallest leaf (degree = 1) if d[j] == 1: break # (i, j) is the new edge that we append to the tree # and remove from the degree dictionary d[i] -= 1 d[j] -= 1 tree.append(sorted([i, j])) last = [i for i in range(n) if d[i] == 1] or [0, 1] tree.append(last) return tree @staticmethod def edges(*runs): """Return a list of edges and the number of nodes from the given runs that connect nodes in an integer-labelled tree. All node numbers will be shifted so that the minimum node is 0. It is not a problem if edges are repeated in the runs; only unique edges are returned. There is no assumption made about what the range of the node labels should be, but all nodes from the smallest through the largest must be present. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> Prufer.edges([1, 2, 3], [2, 4, 5]) # a T ([[0, 1], [1, 2], [1, 3], [3, 4]], 5) Duplicate edges are removed: >>> Prufer.edges([0, 1, 2, 3], [1, 4, 5], [1, 4, 6]) # a K ([[0, 1], [1, 2], [1, 4], [2, 3], [4, 5], [4, 6]], 7) """ e = set() nmin = runs[0][0] for r in runs: for i in range(len(r) - 1): a, b = r[i: i + 2] if b < a: a, b = b, a e.add((a, b)) rv = [] got = set() nmin = nmax = None for ei in e: for i in ei: got.add(i) nmin = min(ei[0], nmin) if nmin is not None else ei[0] nmax = max(ei[1], nmax) if nmax is not None else ei[1] rv.append(list(ei)) missing = set(range(nmin, nmax + 1)) - got if missing: missing = [i + nmin for i in missing] if len(missing) == 1: msg = 'Node %s is missing.' % missing.pop() else: msg = 'Nodes %s are missing.' % list(sorted(missing)) raise ValueError(msg) if nmin != 0: for i, ei in enumerate(rv): rv[i] = [n - nmin for n in ei] nmax -= nmin return sorted(rv), nmax + 1 def prufer_rank(self): """Computes the rank of a Prufer sequence. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) >>> a.prufer_rank() 0 See Also ======== rank, next, prev, size """ r = 0 p = 1 for i in range(self.nodes - 3, -1, -1): r += p*self.prufer_repr[i] p *= self.nodes return r @classmethod def unrank(self, rank, n): """Finds the unranked Prufer sequence. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> Prufer.unrank(0, 4) Prufer([0, 0]) """ n, rank = as_int(n), as_int(rank) L = defaultdict(int) for i in range(n - 3, -1, -1): L[i] = rank % n rank = (rank - L[i])//n return Prufer([L[i] for i in range(len(L))]) def __new__(cls, *args, **kw_args): """The constructor for the Prufer object. Examples ======== >>> from sympy.combinatorics.prufer import Prufer A Prufer object can be constructed from a list of edges: >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) >>> a.prufer_repr [0, 0] If the number of nodes is given, no checking of the nodes will be performed; it will be assumed that nodes 0 through n - 1 are present: >>> Prufer([[0, 1], [0, 2], [0, 3]], 4) Prufer([[0, 1], [0, 2], [0, 3]], 4) A Prufer object can be constructed from a Prufer sequence: >>> b = Prufer([1, 3]) >>> b.tree_repr [[0, 1], [1, 3], [2, 3]] """ ret_obj = Basic.__new__(cls, *args, **kw_args) args = [list(args[0])] if args[0] and iterable(args[0][0]): if not args[0][0]: raise ValueError( 'Prufer expects at least one edge in the tree.') if len(args) > 1: nnodes = args[1] else: nodes = set(flatten(args[0])) nnodes = max(nodes) + 1 if nnodes != len(nodes): missing = set(range(nnodes)) - nodes if len(missing) == 1: msg = 'Node %s is missing.' % missing.pop() else: msg = 'Nodes %s are missing.' % list(sorted(missing)) raise ValueError(msg) ret_obj._tree_repr = [list(i) for i in args[0]] ret_obj._nodes = nnodes else: ret_obj._prufer_repr = args[0] ret_obj._nodes = len(ret_obj._prufer_repr) + 2 return ret_obj def next(self, delta=1): """Generates the Prufer sequence that is delta beyond the current one. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> a = Prufer([[0, 1], [0, 2], [0, 3]]) >>> b = a.next(1) # == a.next() >>> b.tree_repr [[0, 2], [0, 1], [1, 3]] >>> b.rank 1 See Also ======== prufer_rank, rank, prev, size """ return Prufer.unrank(self.rank + delta, self.nodes) def prev(self, delta=1): """Generates the Prufer sequence that is -delta before the current one. Examples ======== >>> from sympy.combinatorics.prufer import Prufer >>> a = Prufer([[0, 1], [1, 2], [2, 3], [1, 4]]) >>> a.rank 36 >>> b = a.prev() >>> b Prufer([1, 2, 0]) >>> b.rank 35 See Also ======== prufer_rank, rank, next, size """ return Prufer.unrank(self.rank -delta, self.nodes)
fd024cc375af14856f133ab241396a0206bdddf24bf63d3f77c8d6f83617e62a
"""Finitely Presented Groups and its algorithms. """ from __future__ import print_function, division from sympy import S from sympy.combinatorics.free_groups import (FreeGroup, FreeGroupElement, free_group) from sympy.combinatorics.rewritingsystem import RewritingSystem from sympy.combinatorics.coset_table import (CosetTable, coset_enumeration_r, coset_enumeration_c) from sympy.combinatorics import PermutationGroup from sympy.printing.defaults import DefaultPrinting from sympy.utilities import public from sympy.utilities.magic import pollute from sympy import symbols from itertools import product @public def fp_group(fr_grp, relators=[]): _fp_group = FpGroup(fr_grp, relators) return (_fp_group,) + tuple(_fp_group._generators) @public def xfp_group(fr_grp, relators=[]): _fp_group = FpGroup(fr_grp, relators) return (_fp_group, _fp_group._generators) # Does not work. Both symbols and pollute are undefined. Never tested. @public def vfp_group(fr_grpm, relators): _fp_group = FpGroup(symbols, relators) pollute([sym.name for sym in _fp_group.symbols], _fp_group.generators) return _fp_group def _parse_relators(rels): """Parse the passed relators.""" return rels ############################################################################### # FINITELY PRESENTED GROUPS # ############################################################################### class FpGroup(DefaultPrinting): """ The FpGroup would take a FreeGroup and a list/tuple of relators, the relators would be specified in such a way that each of them be equal to the identity of the provided free group. """ is_group = True is_FpGroup = True is_PermutationGroup = False def __init__(self, fr_grp, relators): relators = _parse_relators(relators) self.free_group = fr_grp self.relators = relators self.generators = self._generators() self.dtype = type("FpGroupElement", (FpGroupElement,), {"group": self}) # CosetTable instance on identity subgroup self._coset_table = None # returns whether coset table on identity subgroup # has been standardized self._is_standardized = False self._order = None self._center = None self._rewriting_system = RewritingSystem(self) self._perm_isomorphism = None return def _generators(self): return self.free_group.generators def make_confluent(self): ''' Try to make the group's rewriting system confluent ''' self._rewriting_system.make_confluent() return def reduce(self, word): ''' Return the reduced form of `word` in `self` according to the group's rewriting system. If it's confluent, the reduced form is the unique normal form of the word in the group. ''' return self._rewriting_system.reduce(word) def equals(self, word1, word2): ''' Compare `word1` and `word2` for equality in the group using the group's rewriting system. If the system is confluent, the returned answer is necessarily correct. (If it isn't, `False` could be returned in some cases where in fact `word1 == word2`) ''' if self.reduce(word1*word2**-1) == self.identity: return True elif self._rewriting_system.is_confluent: return False return None @property def identity(self): return self.free_group.identity def __contains__(self, g): return g in self.free_group def subgroup(self, gens, C=None, homomorphism=False): ''' Return the subgroup generated by `gens` using the Reidemeister-Schreier algorithm homomorphism -- When set to True, return a dictionary containing the images of the presentation generators in the original group. Examples ======== >>> from sympy.combinatorics.fp_groups import (FpGroup, FpSubgroup) >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**3, y**5, (x*y)**2]) >>> H = [x*y, x**-1*y**-1*x*y*x] >>> K, T = f.subgroup(H, homomorphism=True) >>> T(K.generators) [x*y, x**-1*y**2*x**-1] ''' if not all([isinstance(g, FreeGroupElement) for g in gens]): raise ValueError("Generators must be `FreeGroupElement`s") if not all([g.group == self.free_group for g in gens]): raise ValueError("Given generators are not members of the group") if homomorphism: g, rels, _gens = reidemeister_presentation(self, gens, C=C, homomorphism=True) else: g, rels = reidemeister_presentation(self, gens, C=C) if g: g = FpGroup(g[0].group, rels) else: g = FpGroup(free_group('')[0], []) if homomorphism: from sympy.combinatorics.homomorphisms import homomorphism return g, homomorphism(g, self, g.generators, _gens, check=False) return g def coset_enumeration(self, H, strategy="relator_based", max_cosets=None, draft=None, incomplete=False): """ Return an instance of ``coset table``, when Todd-Coxeter algorithm is run over the ``self`` with ``H`` as subgroup, using ``strategy`` argument as strategy. The returned coset table is compressed but not standardized. An instance of `CosetTable` for `fp_grp` can be passed as the keyword argument `draft` in which case the coset enumeration will start with that instance and attempt to complete it. When `incomplete` is `True` and the function is unable to complete for some reason, the partially complete table will be returned. """ if not max_cosets: max_cosets = CosetTable.coset_table_max_limit if strategy == 'relator_based': C = coset_enumeration_r(self, H, max_cosets=max_cosets, draft=draft, incomplete=incomplete) else: C = coset_enumeration_c(self, H, max_cosets=max_cosets, draft=draft, incomplete=incomplete) if C.is_complete(): C.compress() return C def standardize_coset_table(self): """ Standardized the coset table ``self`` and makes the internal variable ``_is_standardized`` equal to ``True``. """ self._coset_table.standardize() self._is_standardized = True def coset_table(self, H, strategy="relator_based", max_cosets=None, draft=None, incomplete=False): """ Return the mathematical coset table of ``self`` in ``H``. """ if not H: if self._coset_table is not None: if not self._is_standardized: self.standardize_coset_table() else: C = self.coset_enumeration([], strategy, max_cosets=max_cosets, draft=draft, incomplete=incomplete) self._coset_table = C self.standardize_coset_table() return self._coset_table.table else: C = self.coset_enumeration(H, strategy, max_cosets=max_cosets, draft=draft, incomplete=incomplete) C.standardize() return C.table def order(self, strategy="relator_based"): """ Returns the order of the finitely presented group ``self``. It uses the coset enumeration with identity group as subgroup, i.e ``H=[]``. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x, y**2]) >>> f.order(strategy="coset_table_based") 2 """ from sympy import S, gcd if self._order is not None: return self._order if self._coset_table is not None: self._order = len(self._coset_table.table) elif len(self.relators) == 0: self._order = self.free_group.order() elif len(self.generators) == 1: self._order = abs(gcd([r.array_form[0][1] for r in self.relators])) elif self._is_infinite(): self._order = S.Infinity else: gens, C = self._finite_index_subgroup() if C: ind = len(C.table) self._order = ind*self.subgroup(gens, C=C).order() else: self._order = self.index([]) return self._order def _is_infinite(self): ''' Test if the group is infinite. Return `True` if the test succeeds and `None` otherwise ''' used_gens = set() for r in self.relators: used_gens.update(r.contains_generators()) if any([g not in used_gens for g in self.generators]): return True # Abelianisation test: check is the abelianisation is infinite abelian_rels = [] from sympy.polys.solvers import RawMatrix as Matrix from sympy.polys.domains import ZZ from sympy.matrices.normalforms import invariant_factors for rel in self.relators: abelian_rels.append([rel.exponent_sum(g) for g in self.generators]) m = Matrix(abelian_rels) setattr(m, "ring", ZZ) if 0 in invariant_factors(m): return True else: return None def _finite_index_subgroup(self, s=[]): ''' Find the elements of `self` that generate a finite index subgroup and, if found, return the list of elements and the coset table of `self` by the subgroup, otherwise return `(None, None)` ''' gen = self.most_frequent_generator() rels = list(self.generators) rels.extend(self.relators) if not s: if len(self.generators) == 2: s = [gen] + [g for g in self.generators if g != gen] else: rand = self.free_group.identity i = 0 while ((rand in rels or rand**-1 in rels or rand.is_identity) and i<10): rand = self.random() i += 1 s = [gen, rand] + [g for g in self.generators if g != gen] mid = (len(s)+1)//2 half1 = s[:mid] half2 = s[mid:] draft1 = None draft2 = None m = 200 C = None while not C and (m/2 < CosetTable.coset_table_max_limit): m = min(m, CosetTable.coset_table_max_limit) draft1 = self.coset_enumeration(half1, max_cosets=m, draft=draft1, incomplete=True) if draft1.is_complete(): C = draft1 half = half1 else: draft2 = self.coset_enumeration(half2, max_cosets=m, draft=draft2, incomplete=True) if draft2.is_complete(): C = draft2 half = half2 if not C: m *= 2 if not C: return None, None C.compress() return half, C def most_frequent_generator(self): gens = self.generators rels = self.relators freqs = [sum([r.generator_count(g) for r in rels]) for g in gens] return gens[freqs.index(max(freqs))] def random(self): import random r = self.free_group.identity for i in range(random.randint(2,3)): r = r*random.choice(self.generators)**random.choice([1,-1]) return r def index(self, H, strategy="relator_based"): """ Return the index of subgroup ``H`` in group ``self``. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**5, y**4, y*x*y**3*x**3]) >>> f.index([x]) 4 """ # TODO: use |G:H| = |G|/|H| (currently H can't be made into a group) # when we know |G| and |H| if H == []: return self.order() else: C = self.coset_enumeration(H, strategy) return len(C.table) def __str__(self): if self.free_group.rank > 30: str_form = "<fp group with %s generators>" % self.free_group.rank else: str_form = "<fp group on the generators %s>" % str(self.generators) return str_form __repr__ = __str__ #============================================================================== # PERMUTATION GROUP METHODS #============================================================================== def _to_perm_group(self): ''' Return an isomorphic permutation group and the isomorphism. The implementation is dependent on coset enumeration so will only terminate for finite groups. ''' from sympy.combinatorics import Permutation, PermutationGroup from sympy.combinatorics.homomorphisms import homomorphism if self.order() is S.Infinity: raise NotImplementedError("Permutation presentation of infinite " "groups is not implemented") if self._perm_isomorphism: T = self._perm_isomorphism P = T.image() else: C = self.coset_table([]) gens = self.generators images = [[C[i][2*gens.index(g)] for i in range(len(C))] for g in gens] images = [Permutation(i) for i in images] P = PermutationGroup(images) T = homomorphism(self, P, gens, images, check=False) self._perm_isomorphism = T return P, T def _perm_group_list(self, method_name, *args): ''' Given the name of a `PermutationGroup` method (returning a subgroup or a list of subgroups) and (optionally) additional arguments it takes, return a list or a list of lists containing the generators of this (or these) subgroups in terms of the generators of `self`. ''' P, T = self._to_perm_group() perm_result = getattr(P, method_name)(*args) single = False if isinstance(perm_result, PermutationGroup): perm_result, single = [perm_result], True result = [] for group in perm_result: gens = group.generators result.append(T.invert(gens)) return result[0] if single else result def derived_series(self): ''' Return the list of lists containing the generators of the subgroups in the derived series of `self`. ''' return self._perm_group_list('derived_series') def lower_central_series(self): ''' Return the list of lists containing the generators of the subgroups in the lower central series of `self`. ''' return self._perm_group_list('lower_central_series') def center(self): ''' Return the list of generators of the center of `self`. ''' return self._perm_group_list('center') def derived_subgroup(self): ''' Return the list of generators of the derived subgroup of `self`. ''' return self._perm_group_list('derived_subgroup') def centralizer(self, other): ''' Return the list of generators of the centralizer of `other` (a list of elements of `self`) in `self`. ''' T = self._to_perm_group()[1] other = T(other) return self._perm_group_list('centralizer', other) def normal_closure(self, other): ''' Return the list of generators of the normal closure of `other` (a list of elements of `self`) in `self`. ''' T = self._to_perm_group()[1] other = T(other) return self._perm_group_list('normal_closure', other) def _perm_property(self, attr): ''' Given an attribute of a `PermutationGroup`, return its value for a permutation group isomorphic to `self`. ''' P = self._to_perm_group()[0] return getattr(P, attr) @property def is_abelian(self): ''' Check if `self` is abelian. ''' return self._perm_property("is_abelian") @property def is_nilpotent(self): ''' Check if `self` is nilpotent. ''' return self._perm_property("is_nilpotent") @property def is_solvable(self): ''' Check if `self` is solvable. ''' return self._perm_property("is_solvable") @property def elements(self): ''' List the elements of `self`. ''' P, T = self._to_perm_group() return T.invert(P._elements) @property def is_cyclic(self): """ Return ``True`` if group is Cyclic. """ if len(self.generators) <= 1: return True try: P, T = self._to_perm_group() except NotImplementedError: raise NotImplementedError("Check for infinite Cyclic group " "is not implemented") return P.is_cyclic def abelian_invariants(self): """ Return Abelian Invariants of a group. """ try: P, T = self._to_perm_group() except NotImplementedError: raise NotImplementedError("abelian invariants is not implemented" "for infinite group") return P.abelian_invariants() def composition_series(self): """ Return subnormal series of maximum length for a group. """ try: P, T = self._to_perm_group() except NotImplementedError: raise NotImplementedError("composition series is not implemented" "for infinite group") return P.composition_series() class FpSubgroup(DefaultPrinting): ''' The class implementing a subgroup of an FpGroup or a FreeGroup (only finite index subgroups are supported at this point). This is to be used if one wishes to check if an element of the original group belongs to the subgroup ''' def __init__(self, G, gens, normal=False): super(FpSubgroup,self).__init__() self.parent = G self.generators = list(set([g for g in gens if g != G.identity])) self._min_words = None #for use in __contains__ self.C = None self.normal = normal def __contains__(self, g): if isinstance(self.parent, FreeGroup): if self._min_words is None: # make _min_words - a list of subwords such that # g is in the subgroup if and only if it can be # partitioned into these subwords. Infinite families of # subwords are presented by tuples, e.g. (r, w) # stands for the family of subwords r*w**n*r**-1 def _process(w): # this is to be used before adding new words # into _min_words; if the word w is not cyclically # reduced, it will generate an infinite family of # subwords so should be written as a tuple; # if it is, w**-1 should be added to the list # as well p, r = w.cyclic_reduction(removed=True) if not r.is_identity: return [(r, p)] else: return [w, w**-1] # make the initial list gens = [] for w in self.generators: if self.normal: w = w.cyclic_reduction() gens.extend(_process(w)) for w1 in gens: for w2 in gens: # if w1 and w2 are equal or are inverses, continue if w1 == w2 or (not isinstance(w1, tuple) and w1**-1 == w2): continue # if the start of one word is the inverse of the # end of the other, their multiple should be added # to _min_words because of cancellation if isinstance(w1, tuple): # start, end s1, s2 = w1[0][0], w1[0][0]**-1 else: s1, s2 = w1[0], w1[len(w1)-1] if isinstance(w2, tuple): # start, end r1, r2 = w2[0][0], w2[0][0]**-1 else: r1, r2 = w2[0], w2[len(w1)-1] # p1 and p2 are w1 and w2 or, in case when # w1 or w2 is an infinite family, a representative p1, p2 = w1, w2 if isinstance(w1, tuple): p1 = w1[0]*w1[1]*w1[0]**-1 if isinstance(w2, tuple): p2 = w2[0]*w2[1]*w2[0]**-1 # add the product of the words to the list is necessary if r1**-1 == s2 and not (p1*p2).is_identity: new = _process(p1*p2) if not new in gens: gens.extend(new) if r2**-1 == s1 and not (p2*p1).is_identity: new = _process(p2*p1) if not new in gens: gens.extend(new) self._min_words = gens min_words = self._min_words def _is_subword(w): # check if w is a word in _min_words or one of # the infinite families in it w, r = w.cyclic_reduction(removed=True) if r.is_identity or self.normal: return w in min_words else: t = [s[1] for s in min_words if isinstance(s, tuple) and s[0] == r] return [s for s in t if w.power_of(s)] != [] # store the solution of words for which the result of # _word_break (below) is known known = {} def _word_break(w): # check if w can be written as a product of words # in min_words if len(w) == 0: return True i = 0 while i < len(w): i += 1 prefix = w.subword(0, i) if not _is_subword(prefix): continue rest = w.subword(i, len(w)) if rest not in known: known[rest] = _word_break(rest) if known[rest]: return True return False if self.normal: g = g.cyclic_reduction() return _word_break(g) else: if self.C is None: C = self.parent.coset_enumeration(self.generators) self.C = C i = 0 C = self.C for j in range(len(g)): i = C.table[i][C.A_dict[g[j]]] return i == 0 def order(self): from sympy import S if not self.generators: return 1 if isinstance(self.parent, FreeGroup): return S.Infinity if self.C is None: C = self.parent.coset_enumeration(self.generators) self.C = C # This is valid because `len(self.C.table)` (the index of the subgroup) # will always be finite - otherwise coset enumeration doesn't terminate return self.parent.order()/len(self.C.table) def to_FpGroup(self): if isinstance(self.parent, FreeGroup): gen_syms = [('x_%d'%i) for i in range(len(self.generators))] return free_group(', '.join(gen_syms))[0] return self.parent.subgroup(C=self.C) def __str__(self): if len(self.generators) > 30: str_form = "<fp subgroup with %s generators>" % len(self.generators) else: str_form = "<fp subgroup on the generators %s>" % str(self.generators) return str_form __repr__ = __str__ ############################################################################### # LOW INDEX SUBGROUPS # ############################################################################### def low_index_subgroups(G, N, Y=[]): """ Implements the Low Index Subgroups algorithm, i.e find all subgroups of ``G`` upto a given index ``N``. This implements the method described in [Sim94]. This procedure involves a backtrack search over incomplete Coset Tables, rather than over forced coincidences. Parameters ========== G: An FpGroup < X|R > N: positive integer, representing the maximum index value for subgroups Y: (an optional argument) specifying a list of subgroup generators, such that each of the resulting subgroup contains the subgroup generated by Y. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, low_index_subgroups >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**2, y**3, (x*y)**4]) >>> L = low_index_subgroups(f, 4) >>> for coset_table in L: ... print(coset_table.table) [[0, 0, 0, 0]] [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]] [[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]] [[1, 1, 0, 0], [0, 0, 1, 1]] References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of Computational Group Theory" Section 5.4 .. [2] Marston Conder and Peter Dobcsanyi "Applications and Adaptions of the Low Index Subgroups Procedure" """ C = CosetTable(G, []) R = G.relators # length chosen for the length of the short relators len_short_rel = 5 # elements of R2 only checked at the last step for complete # coset tables R2 = set([rel for rel in R if len(rel) > len_short_rel]) # elements of R1 are used in inner parts of the process to prune # branches of the search tree, R1 = set([rel.identity_cyclic_reduction() for rel in set(R) - R2]) R1_c_list = C.conjugates(R1) S = [] descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y) return S def descendant_subgroups(S, C, R1_c_list, x, R2, N, Y): A_dict = C.A_dict A_dict_inv = C.A_dict_inv if C.is_complete(): # if C is complete then it only needs to test # whether the relators in R2 are satisfied for w, alpha in product(R2, C.omega): if not C.scan_check(alpha, w): return # relators in R2 are satisfied, append the table to list S.append(C) else: # find the first undefined entry in Coset Table for alpha, x in product(range(len(C.table)), C.A): if C.table[alpha][A_dict[x]] is None: # this is "x" in pseudo-code (using "y" makes it clear) undefined_coset, undefined_gen = alpha, x break # for filling up the undefine entry we try all possible values # of beta in Omega or beta = n where beta^(undefined_gen^-1) is undefined reach = C.omega + [C.n] for beta in reach: if beta < N: if beta == C.n or C.table[beta][A_dict_inv[undefined_gen]] is None: try_descendant(S, C, R1_c_list, R2, N, undefined_coset, \ undefined_gen, beta, Y) def try_descendant(S, C, R1_c_list, R2, N, alpha, x, beta, Y): r""" Solves the problem of trying out each individual possibility for `\alpha^x. """ D = C.copy() if beta == D.n and beta < N: D.table.append([None]*len(D.A)) D.p.append(beta) D.table[alpha][D.A_dict[x]] = beta D.table[beta][D.A_dict_inv[x]] = alpha D.deduction_stack.append((alpha, x)) if not D.process_deductions_check(R1_c_list[D.A_dict[x]], \ R1_c_list[D.A_dict_inv[x]]): return for w in Y: if not D.scan_check(0, w): return if first_in_class(D, Y): descendant_subgroups(S, D, R1_c_list, x, R2, N, Y) def first_in_class(C, Y=[]): """ Checks whether the subgroup ``H=G1`` corresponding to the Coset Table could possibly be the canonical representative of its conjugacy class. Parameters ========== C: CosetTable Returns ======= bool: True/False If this returns False, then no descendant of C can have that property, and so we can abandon C. If it returns True, then we need to process further the node of the search tree corresponding to C, and so we call ``descendant_subgroups`` recursively on C. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, first_in_class >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**2, y**3, (x*y)**4]) >>> C = CosetTable(f, []) >>> C.table = [[0, 0, None, None]] >>> first_in_class(C) True >>> C.table = [[1, 1, 1, None], [0, 0, None, 1]]; C.p = [0, 1] >>> first_in_class(C) True >>> C.table = [[1, 1, 2, 1], [0, 0, 0, None], [None, None, None, 0]] >>> C.p = [0, 1, 2] >>> first_in_class(C) False >>> C.table = [[1, 1, 1, 2], [0, 0, 2, 0], [2, None, 0, 1]] >>> first_in_class(C) False # TODO:: Sims points out in [Sim94] that performance can be improved by # remembering some of the information computed by ``first_in_class``. If # the ``continue alpha`` statement is executed at line 14, then the same thing # will happen for that value of alpha in any descendant of the table C, and so # the values the values of alpha for which this occurs could profitably be # stored and passed through to the descendants of C. Of course this would # make the code more complicated. # The code below is taken directly from the function on page 208 of [Sim94] # nu[alpha] """ n = C.n # lamda is the largest numbered point in Omega_c_alpha which is currently defined lamda = -1 # for alpha in Omega_c, nu[alpha] is the point in Omega_c_alpha corresponding to alpha nu = [None]*n # for alpha in Omega_c_alpha, mu[alpha] is the point in Omega_c corresponding to alpha mu = [None]*n # mutually nu and mu are the mutually-inverse equivalence maps between # Omega_c_alpha and Omega_c next_alpha = False # For each 0!=alpha in [0 .. nc-1], we start by constructing the equivalent # standardized coset table C_alpha corresponding to H_alpha for alpha in range(1, n): # reset nu to "None" after previous value of alpha for beta in range(lamda+1): nu[mu[beta]] = None # we only want to reject our current table in favour of a preceding # table in the ordering in which 1 is replaced by alpha, if the subgroup # G_alpha corresponding to this preceding table definitely contains the # given subgroup for w in Y: # TODO: this should support input of a list of general words # not just the words which are in "A" (i.e gen and gen^-1) if C.table[alpha][C.A_dict[w]] != alpha: # continue with alpha next_alpha = True break if next_alpha: next_alpha = False continue # try alpha as the new point 0 in Omega_C_alpha mu[0] = alpha nu[alpha] = 0 # compare corresponding entries in C and C_alpha lamda = 0 for beta in range(n): for x in C.A: gamma = C.table[beta][C.A_dict[x]] delta = C.table[mu[beta]][C.A_dict[x]] # if either of the entries is undefined, # we move with next alpha if gamma is None or delta is None: # continue with alpha next_alpha = True break if nu[delta] is None: # delta becomes the next point in Omega_C_alpha lamda += 1 nu[delta] = lamda mu[lamda] = delta if nu[delta] < gamma: return False if nu[delta] > gamma: # continue with alpha next_alpha = True break if next_alpha: next_alpha = False break return True #======================================================================== # Simplifying Presentation #======================================================================== def simplify_presentation(*args, **kwargs): ''' For an instance of `FpGroup`, return a simplified isomorphic copy of the group (e.g. remove redundant generators or relators). Alternatively, a list of generators and relators can be passed in which case the simplified lists will be returned. By default, the generators of the group are unchanged. If you would like to remove redundant generators, set the keyword argument `change_gens = True`. ''' change_gens = kwargs.get("change_gens", False) if len(args) == 1: if not isinstance(args[0], FpGroup): raise TypeError("The argument must be an instance of FpGroup") G = args[0] gens, rels = simplify_presentation(G.generators, G.relators, change_gens=change_gens) if gens: return FpGroup(gens[0].group, rels) return FpGroup(FreeGroup([]), []) elif len(args) == 2: gens, rels = args[0][:], args[1][:] if not gens: return gens, rels identity = gens[0].group.identity else: if len(args) == 0: m = "Not enough arguments" else: m = "Too many arguments" raise RuntimeError(m) prev_gens = [] prev_rels = [] while not set(prev_rels) == set(rels): prev_rels = rels while change_gens and not set(prev_gens) == set(gens): prev_gens = gens gens, rels = elimination_technique_1(gens, rels, identity) rels = _simplify_relators(rels, identity) if change_gens: syms = [g.array_form[0][0] for g in gens] F = free_group(syms)[0] identity = F.identity gens = F.generators subs = dict(zip(syms, gens)) for j, r in enumerate(rels): a = r.array_form rel = identity for sym, p in a: rel = rel*subs[sym]**p rels[j] = rel return gens, rels def _simplify_relators(rels, identity): """Relies upon ``_simplification_technique_1`` for its functioning. """ rels = rels[:] rels = list(set(_simplification_technique_1(rels))) rels.sort() rels = [r.identity_cyclic_reduction() for r in rels] try: rels.remove(identity) except ValueError: pass return rels # Pg 350, section 2.5.1 from [2] def elimination_technique_1(gens, rels, identity): rels = rels[:] # the shorter relators are examined first so that generators selected for # elimination will have shorter strings as equivalent rels.sort() gens = gens[:] redundant_gens = {} redundant_rels = [] used_gens = set() # examine each relator in relator list for any generator occurring exactly # once for rel in rels: # don't look for a redundant generator in a relator which # depends on previously found ones contained_gens = rel.contains_generators() if any([g in contained_gens for g in redundant_gens]): continue contained_gens = list(contained_gens) contained_gens.sort(reverse = True) for gen in contained_gens: if rel.generator_count(gen) == 1 and gen not in used_gens: k = rel.exponent_sum(gen) gen_index = rel.index(gen**k) bk = rel.subword(gen_index + 1, len(rel)) fw = rel.subword(0, gen_index) chi = bk*fw redundant_gens[gen] = chi**(-1*k) used_gens.update(chi.contains_generators()) redundant_rels.append(rel) break rels = [r for r in rels if r not in redundant_rels] # eliminate the redundant generators from remaining relators rels = [r.eliminate_words(redundant_gens, _all = True).identity_cyclic_reduction() for r in rels] rels = list(set(rels)) try: rels.remove(identity) except ValueError: pass gens = [g for g in gens if g not in redundant_gens] return gens, rels def _simplification_technique_1(rels): """ All relators are checked to see if they are of the form `gen^n`. If any such relators are found then all other relators are processed for strings in the `gen` known order. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import _simplification_technique_1 >>> F, x, y = free_group("x, y") >>> w1 = [x**2*y**4, x**3] >>> _simplification_technique_1(w1) [x**-1*y**4, x**3] >>> w2 = [x**2*y**-4*x**5, x**3, x**2*y**8, y**5] >>> _simplification_technique_1(w2) [x**-1*y*x**-1, x**3, x**-1*y**-2, y**5] >>> w3 = [x**6*y**4, x**4] >>> _simplification_technique_1(w3) [x**2*y**4, x**4] """ from sympy import gcd rels = rels[:] # dictionary with "gen: n" where gen^n is one of the relators exps = {} for i in range(len(rels)): rel = rels[i] if rel.number_syllables() == 1: g = rel[0] exp = abs(rel.array_form[0][1]) if rel.array_form[0][1] < 0: rels[i] = rels[i]**-1 g = g**-1 if g in exps: exp = gcd(exp, exps[g].array_form[0][1]) exps[g] = g**exp one_syllables_words = exps.values() # decrease some of the exponents in relators, making use of the single # syllable relators for i in range(len(rels)): rel = rels[i] if rel in one_syllables_words: continue rel = rel.eliminate_words(one_syllables_words, _all = True) # if rels[i] contains g**n where abs(n) is greater than half of the power p # of g in exps, g**n can be replaced by g**(n-p) (or g**(p-n) if n<0) for g in rel.contains_generators(): if g in exps: exp = exps[g].array_form[0][1] max_exp = (exp + 1)//2 rel = rel.eliminate_word(g**(max_exp), g**(max_exp-exp), _all = True) rel = rel.eliminate_word(g**(-max_exp), g**(-(max_exp-exp)), _all = True) rels[i] = rel rels = [r.identity_cyclic_reduction() for r in rels] return rels ############################################################################### # SUBGROUP PRESENTATIONS # ############################################################################### # Pg 175 [1] def define_schreier_generators(C, homomorphism=False): ''' Parameters ========== C -- Coset table. homomorphism -- When set to True, return a dictionary containing the images of the presentation generators in the original group. ''' y = [] gamma = 1 f = C.fp_group X = f.generators if homomorphism: # `_gens` stores the elements of the parent group to # to which the schreier generators correspond to. _gens = {} # compute the schreier Traversal tau = {} tau[0] = f.identity C.P = [[None]*len(C.A) for i in range(C.n)] for alpha, x in product(C.omega, C.A): beta = C.table[alpha][C.A_dict[x]] if beta == gamma: C.P[alpha][C.A_dict[x]] = "<identity>" C.P[beta][C.A_dict_inv[x]] = "<identity>" gamma += 1 if homomorphism: tau[beta] = tau[alpha]*x elif x in X and C.P[alpha][C.A_dict[x]] is None: y_alpha_x = '%s_%s' % (x, alpha) y.append(y_alpha_x) C.P[alpha][C.A_dict[x]] = y_alpha_x if homomorphism: _gens[y_alpha_x] = tau[alpha]*x*tau[beta]**-1 grp_gens = list(free_group(', '.join(y))) C._schreier_free_group = grp_gens.pop(0) C._schreier_generators = grp_gens if homomorphism: C._schreier_gen_elem = _gens # replace all elements of P by, free group elements for i, j in product(range(len(C.P)), range(len(C.A))): # if equals "<identity>", replace by identity element if C.P[i][j] == "<identity>": C.P[i][j] = C._schreier_free_group.identity elif isinstance(C.P[i][j], str): r = C._schreier_generators[y.index(C.P[i][j])] C.P[i][j] = r beta = C.table[i][j] C.P[beta][j + 1] = r**-1 def reidemeister_relators(C): R = C.fp_group.relators rels = [rewrite(C, coset, word) for word in R for coset in range(C.n)] order_1_gens = set([i for i in rels if len(i) == 1]) # remove all the order 1 generators from relators rels = list(filter(lambda rel: rel not in order_1_gens, rels)) # replace order 1 generators by identity element in reidemeister relators for i in range(len(rels)): w = rels[i] w = w.eliminate_words(order_1_gens, _all=True) rels[i] = w C._schreier_generators = [i for i in C._schreier_generators if not (i in order_1_gens or i**-1 in order_1_gens)] # Tietze transformation 1 i.e TT_1 # remove cyclic conjugate elements from relators i = 0 while i < len(rels): w = rels[i] j = i + 1 while j < len(rels): if w.is_cyclic_conjugate(rels[j]): del rels[j] else: j += 1 i += 1 C._reidemeister_relators = rels def rewrite(C, alpha, w): """ Parameters ========== C: CosetTable alpha: A live coset w: A word in `A*` Returns ======= rho(tau(alpha), w) Examples ======== >>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, define_schreier_generators, rewrite >>> from sympy.combinatorics.free_groups import free_group >>> F, x, y = free_group("x ,y") >>> f = FpGroup(F, [x**2, y**3, (x*y)**6]) >>> C = CosetTable(f, []) >>> C.table = [[1, 1, 2, 3], [0, 0, 4, 5], [4, 4, 3, 0], [5, 5, 0, 2], [2, 2, 5, 1], [3, 3, 1, 4]] >>> C.p = [0, 1, 2, 3, 4, 5] >>> define_schreier_generators(C) >>> rewrite(C, 0, (x*y)**6) x_4*y_2*x_3*x_1*x_2*y_4*x_5 """ v = C._schreier_free_group.identity for i in range(len(w)): x_i = w[i] v = v*C.P[alpha][C.A_dict[x_i]] alpha = C.table[alpha][C.A_dict[x_i]] return v # Pg 350, section 2.5.2 from [2] def elimination_technique_2(C): """ This technique eliminates one generator at a time. Heuristically this seems superior in that we may select for elimination the generator with shortest equivalent string at each stage. >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r, \ reidemeister_relators, define_schreier_generators, elimination_technique_2 >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**3, y**5, (x*y)**2]); H = [x*y, x**-1*y**-1*x*y*x] >>> C = coset_enumeration_r(f, H) >>> C.compress(); C.standardize() >>> define_schreier_generators(C) >>> reidemeister_relators(C) >>> elimination_technique_2(C) ([y_1, y_2], [y_2**-3, y_2*y_1*y_2*y_1*y_2*y_1, y_1**2]) """ rels = C._reidemeister_relators rels.sort(reverse=True) gens = C._schreier_generators for i in range(len(gens) - 1, -1, -1): rel = rels[i] for j in range(len(gens) - 1, -1, -1): gen = gens[j] if rel.generator_count(gen) == 1: k = rel.exponent_sum(gen) gen_index = rel.index(gen**k) bk = rel.subword(gen_index + 1, len(rel)) fw = rel.subword(0, gen_index) rep_by = (bk*fw)**(-1*k) del rels[i]; del gens[j] for l in range(len(rels)): rels[l] = rels[l].eliminate_word(gen, rep_by) break C._reidemeister_relators = rels C._schreier_generators = gens return C._schreier_generators, C._reidemeister_relators def reidemeister_presentation(fp_grp, H, C=None, homomorphism=False): """ Parameters ========== fp_group: A finitely presented group, an instance of FpGroup H: A subgroup whose presentation is to be found, given as a list of words in generators of `fp_grp` homomorphism: When set to True, return a homomorphism from the subgroup to the parent group Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, reidemeister_presentation >>> F, x, y = free_group("x, y") Example 5.6 Pg. 177 from [1] >>> f = FpGroup(F, [x**3, y**5, (x*y)**2]) >>> H = [x*y, x**-1*y**-1*x*y*x] >>> reidemeister_presentation(f, H) ((y_1, y_2), (y_1**2, y_2**3, y_2*y_1*y_2*y_1*y_2*y_1)) Example 5.8 Pg. 183 from [1] >>> f = FpGroup(F, [x**3, y**3, (x*y)**3]) >>> H = [x*y, x*y**-1] >>> reidemeister_presentation(f, H) ((x_0, y_0), (x_0**3, y_0**3, x_0*y_0*x_0*y_0*x_0*y_0)) Exercises Q2. Pg 187 from [1] >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) >>> H = [x] >>> reidemeister_presentation(f, H) ((x_0,), (x_0**4,)) Example 5.9 Pg. 183 from [1] >>> f = FpGroup(F, [x**3*y**-3, (x*y)**3, (x*y**-1)**2]) >>> H = [x] >>> reidemeister_presentation(f, H) ((x_0,), (x_0**6,)) """ if not C: C = coset_enumeration_r(fp_grp, H) C.compress(); C.standardize() define_schreier_generators(C, homomorphism=homomorphism) reidemeister_relators(C) gens, rels = C._schreier_generators, C._reidemeister_relators gens, rels = simplify_presentation(gens, rels, change_gens=True) C.schreier_generators = tuple(gens) C.reidemeister_relators = tuple(rels) if homomorphism: _gens = [] for gen in gens: _gens.append(C._schreier_gen_elem[str(gen)]) return C.schreier_generators, C.reidemeister_relators, _gens return C.schreier_generators, C.reidemeister_relators FpGroupElement = FreeGroupElement
9e1b9e673fde5971c6bf4783ae573be42e037a8d7a036259d7f874bf78842635
from __future__ import print_function, division from sympy.core.mul import Mul from sympy.core.singleton import S from sympy.concrete.expr_with_intlimits import ExprWithIntLimits from sympy.core.exprtools import factor_terms from sympy.functions.elementary.exponential import exp, log from sympy.polys import quo, roots from sympy.simplify import powsimp class Product(ExprWithIntLimits): r"""Represents unevaluated products. ``Product`` represents a finite or infinite product, with the first argument being the general form of terms in the series, and the second argument being ``(dummy_variable, start, end)``, with ``dummy_variable`` taking all integer values from ``start`` through ``end``. In accordance with long-standing mathematical convention, the end term is included in the product. Finite products =============== For finite products (and products with symbolic limits assumed to be finite) we follow the analogue of the summation convention described by Karr [1], especially definition 3 of section 1.4. The product: .. math:: \prod_{m \leq i < n} f(i) has *the obvious meaning* for `m < n`, namely: .. math:: \prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1) with the upper limit value `f(n)` excluded. The product over an empty set is one if and only if `m = n`: .. math:: \prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n Finally, for all other products over empty sets we assume the following definition: .. math:: \prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n It is important to note that above we define all products with the upper limit being exclusive. This is in contrast to the usual mathematical notation, but does not affect the product convention. Indeed we have: .. math:: \prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i) where the difference in notation is intentional to emphasize the meaning, with limits typeset on the top being inclusive. Examples ======== >>> from sympy.abc import a, b, i, k, m, n, x >>> from sympy import Product, factorial, oo >>> Product(k, (k, 1, m)) Product(k, (k, 1, m)) >>> Product(k, (k, 1, m)).doit() factorial(m) >>> Product(k**2,(k, 1, m)) Product(k**2, (k, 1, m)) >>> Product(k**2,(k, 1, m)).doit() factorial(m)**2 Wallis' product for pi: >>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo)) >>> W Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo)) Direct computation currently fails: >>> W.doit() Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo)) But we can approach the infinite product by a limit of finite products: >>> from sympy import limit >>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n)) >>> W2 Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n)) >>> W2e = W2.doit() >>> W2e 2**(-2*n)*4**n*factorial(n)**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n)) >>> limit(W2e, n, oo) pi/2 By the same formula we can compute sin(pi/2): >>> from sympy import pi, gamma, simplify >>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n)) >>> P = P.subs(x, pi/2) >>> P pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2 >>> Pe = P.doit() >>> Pe pi**2*RisingFactorial(1 - pi/2, n)*RisingFactorial(1 + pi/2, n)/(2*factorial(n)**2) >>> Pe = Pe.rewrite(gamma) >>> Pe pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 - pi/2)*gamma(1 + pi/2)*gamma(n + 1)**2) >>> Pe = simplify(Pe) >>> Pe sin(pi**2/2)*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/gamma(n + 1)**2 >>> limit(Pe, n, oo) sin(pi**2/2) Products with the lower limit being larger than the upper one: >>> Product(1/i, (i, 6, 1)).doit() 120 >>> Product(i, (i, 2, 5)).doit() 120 The empty product: >>> Product(i, (i, n, n-1)).doit() 1 An example showing that the symbolic result of a product is still valid for seemingly nonsensical values of the limits. Then the Karr convention allows us to give a perfectly valid interpretation to those products by interchanging the limits according to the above rules: >>> P = Product(2, (i, 10, n)).doit() >>> P 2**(n - 9) >>> P.subs(n, 5) 1/16 >>> Product(2, (i, 10, 5)).doit() 1/16 >>> 1/Product(2, (i, 6, 9)).doit() 1/16 An explicit example of the Karr summation convention applied to products: >>> P1 = Product(x, (i, a, b)).doit() >>> P1 x**(-a + b + 1) >>> P2 = Product(x, (i, b+1, a-1)).doit() >>> P2 x**(a - b - 1) >>> simplify(P1 * P2) 1 And another one: >>> P1 = Product(i, (i, b, a)).doit() >>> P1 RisingFactorial(b, a - b + 1) >>> P2 = Product(i, (i, a+1, b-1)).doit() >>> P2 RisingFactorial(a + 1, -a + b - 1) >>> P1 * P2 RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1) >>> simplify(P1 * P2) 1 See Also ======== Sum, summation product References ========== .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM, Volume 28 Issue 2, April 1981, Pages 305-350 http://dl.acm.org/citation.cfm?doid=322248.322255 .. [2] https://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation .. [3] https://en.wikipedia.org/wiki/Empty_product """ __slots__ = ('is_commutative',) def __new__(cls, function, *symbols, **assumptions): obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions) return obj def _eval_rewrite_as_Sum(self, *args, **kwargs): from sympy.concrete.summations import Sum return exp(Sum(log(self.function), *self.limits)) @property def term(self): return self._args[0] function = term def _eval_is_zero(self): if self.has_empty_sequence: return False z = self.term.is_zero if z is True: return True if self.has_finite_limits: # A Product is zero only if its term is zero assuming finite limits. return z def _eval_is_extended_real(self): if self.has_empty_sequence: return True return self.function.is_extended_real def _eval_is_positive(self): if self.has_empty_sequence: return True if self.function.is_positive and self.has_finite_limits: return True def _eval_is_nonnegative(self): if self.has_empty_sequence: return True if self.function.is_nonnegative and self.has_finite_limits: return True def _eval_is_extended_nonnegative(self): if self.has_empty_sequence: return True if self.function.is_extended_nonnegative: return True def _eval_is_extended_nonpositive(self): if self.has_empty_sequence: return True def _eval_is_finite(self): if self.has_finite_limits and self.function.is_finite: return True def doit(self, **hints): # first make sure any definite limits have product # variables with matching assumptions reps = {} for xab in self.limits: # Must be imported here to avoid circular imports from .summations import _dummy_with_inherited_properties_concrete d = _dummy_with_inherited_properties_concrete(xab) if d: reps[xab[0]] = d if reps: undo = dict([(v, k) for k, v in reps.items()]) did = self.xreplace(reps).doit(**hints) if type(did) is tuple: # when separate=True did = tuple([i.xreplace(undo) for i in did]) else: did = did.xreplace(undo) return did f = self.function for index, limit in enumerate(self.limits): i, a, b = limit dif = b - a if dif.is_integer and dif.is_negative: a, b = b + 1, a - 1 f = 1 / f g = self._eval_product(f, (i, a, b)) if g in (None, S.NaN): return self.func(powsimp(f), *self.limits[index:]) else: f = g if hints.get('deep', True): return f.doit(**hints) else: return powsimp(f) def _eval_adjoint(self): if self.is_commutative: return self.func(self.function.adjoint(), *self.limits) return None def _eval_conjugate(self): return self.func(self.function.conjugate(), *self.limits) def _eval_product(self, term, limits): from sympy.concrete.delta import deltaproduct, _has_simple_delta from sympy.concrete.summations import summation from sympy.functions import KroneckerDelta, RisingFactorial (k, a, n) = limits if k not in term.free_symbols: if (term - 1).is_zero: return S.One return term**(n - a + 1) if a == n: return term.subs(k, a) if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]): return deltaproduct(term, limits) dif = n - a definite = dif.is_Integer if definite and (dif < 100): return self._eval_product_direct(term, limits) elif term.is_polynomial(k): poly = term.as_poly(k) A = B = Q = S.One all_roots = roots(poly) M = 0 for r, m in all_roots.items(): M += m A *= RisingFactorial(a - r, n - a + 1)**m Q *= (n - r)**m if M < poly.degree(): arg = quo(poly, Q.as_poly(k)) B = self.func(arg, (k, a, n)).doit() return poly.LC()**(n - a + 1) * A * B elif term.is_Add: factored = factor_terms(term, fraction=True) if factored.is_Mul: return self._eval_product(factored, (k, a, n)) elif term.is_Mul: # Factor in part without the summation variable and part with without_k, with_k = term.as_coeff_mul(k) if len(with_k) >= 2: # More than one term including k, so still a multiplication exclude, include = [], [] for t in with_k: p = self._eval_product(t, (k, a, n)) if p is not None: exclude.append(p) else: include.append(t) if not exclude: return None else: arg = term._new_rawargs(*include) A = Mul(*exclude) B = self.func(arg, (k, a, n)).doit() return without_k**(n - a + 1)*A * B else: # Just a single term p = self._eval_product(with_k[0], (k, a, n)) if p is None: p = self.func(with_k[0], (k, a, n)).doit() return without_k**(n - a + 1)*p elif term.is_Pow: if not term.base.has(k): s = summation(term.exp, (k, a, n)) return term.base**s elif not term.exp.has(k): p = self._eval_product(term.base, (k, a, n)) if p is not None: return p**term.exp elif isinstance(term, Product): evaluated = term.doit() f = self._eval_product(evaluated, limits) if f is None: return self.func(evaluated, limits) else: return f if definite: return self._eval_product_direct(term, limits) def _eval_simplify(self, **kwargs): from sympy.simplify.simplify import product_simplify rv = product_simplify(self) return rv.doit() if kwargs['doit'] else rv def _eval_transpose(self): if self.is_commutative: return self.func(self.function.transpose(), *self.limits) return None def _eval_product_direct(self, term, limits): (k, a, n) = limits return Mul(*[term.subs(k, a + i) for i in range(n - a + 1)]) def is_convergent(self): r""" See docs of :obj:`.Sum.is_convergent()` for explanation of convergence in SymPy. The infinite product: .. math:: \prod_{1 \leq i < \infty} f(i) is defined by the sequence of partial products: .. math:: \prod_{i=1}^{n} f(i) = f(1) f(2) \cdots f(n) as n increases without bound. The product converges to a non-zero value if and only if the sum: .. math:: \sum_{1 \leq i < \infty} \log{f(n)} converges. Examples ======== >>> from sympy import Interval, S, Product, Symbol, cos, pi, exp, oo >>> n = Symbol('n', integer=True) >>> Product(n/(n + 1), (n, 1, oo)).is_convergent() False >>> Product(1/n**2, (n, 1, oo)).is_convergent() False >>> Product(cos(pi/n), (n, 1, oo)).is_convergent() True >>> Product(exp(-n**2), (n, 1, oo)).is_convergent() False References ========== .. [1] https://en.wikipedia.org/wiki/Infinite_product """ from sympy.concrete.summations import Sum sequence_term = self.function log_sum = log(sequence_term) lim = self.limits try: is_conv = Sum(log_sum, *lim).is_convergent() except NotImplementedError: if Sum(sequence_term - 1, *lim).is_absolutely_convergent() is S.true: return S.true raise NotImplementedError("The algorithm to find the product convergence of %s " "is not yet implemented" % (sequence_term)) return is_conv def reverse_order(expr, *indices): """ Reverse the order of a limit in a Product. Usage ===== ``reverse_order(expr, *indices)`` reverses some limits in the expression ``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in the argument ``indices`` specify some indices whose limits get reversed. These selectors are either variable names or numerical indices counted starting from the inner-most limit tuple. Examples ======== >>> from sympy import Product, simplify, RisingFactorial, gamma, Sum >>> from sympy.abc import x, y, a, b, c, d >>> P = Product(x, (x, a, b)) >>> Pr = P.reverse_order(x) >>> Pr Product(1/x, (x, b + 1, a - 1)) >>> Pr = Pr.doit() >>> Pr 1/RisingFactorial(b + 1, a - b - 1) >>> simplify(Pr) gamma(b + 1)/gamma(a) >>> P = P.doit() >>> P RisingFactorial(a, -a + b + 1) >>> simplify(P) gamma(b + 1)/gamma(a) While one should prefer variable names when specifying which limits to reverse, the index counting notation comes in handy in case there are several symbols with the same name. >>> S = Sum(x*y, (x, a, b), (y, c, d)) >>> S Sum(x*y, (x, a, b), (y, c, d)) >>> S0 = S.reverse_order(0) >>> S0 Sum(-x*y, (x, b + 1, a - 1), (y, c, d)) >>> S1 = S0.reverse_order(1) >>> S1 Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1)) Of course we can mix both notations: >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1) Sum(x*y, (x, b + 1, a - 1), (y, 6, 1)) >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x) Sum(x*y, (x, b + 1, a - 1), (y, 6, 1)) See Also ======== sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index, reorder_limit, sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder References ========== .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM, Volume 28 Issue 2, April 1981, Pages 305-350 http://dl.acm.org/citation.cfm?doid=322248.322255 """ l_indices = list(indices) for i, indx in enumerate(l_indices): if not isinstance(indx, int): l_indices[i] = expr.index(indx) e = 1 limits = [] for i, limit in enumerate(expr.limits): l = limit if i in l_indices: e = -e l = (limit[0], limit[2] + 1, limit[1] - 1) limits.append(l) return Product(expr.function ** e, *limits) def product(*args, **kwargs): r""" Compute the product. The notation for symbols is similar to the notation used in Sum or Integral. product(f, (i, a, b)) computes the product of f with respect to i from a to b, i.e., :: b _____ product(f(n), (i, a, b)) = | | f(n) | | i = a If it cannot compute the product, it returns an unevaluated Product object. Repeated products can be computed by introducing additional symbols tuples:: >>> from sympy import product, symbols >>> i, n, m, k = symbols('i n m k', integer=True) >>> product(i, (i, 1, k)) factorial(k) >>> product(m, (i, 1, k)) m**k >>> product(i, (i, 1, k), (k, 1, n)) Product(factorial(k), (k, 1, n)) """ prod = Product(*args, **kwargs) if isinstance(prod, Product): return prod.doit(deep=False) else: return prod
c4664f69e0a281c22a0537dbefee8f7e5b2ea9818af7c9da37622bb7fef9d639
""" This module implements sums and products containing the Kronecker Delta function. References ========== - http://mathworld.wolfram.com/KroneckerDelta.html """ from __future__ import print_function, division from sympy.core import Add, Mul, S, Dummy from sympy.core.cache import cacheit from sympy.core.compatibility import default_sort_key from sympy.functions import KroneckerDelta, Piecewise, piecewise_fold from sympy.sets import Interval @cacheit def _expand_delta(expr, index): """ Expand the first Add containing a simple KroneckerDelta. """ if not expr.is_Mul: return expr delta = None func = Add terms = [S.One] for h in expr.args: if delta is None and h.is_Add and _has_simple_delta(h, index): delta = True func = h.func terms = [terms[0]*t for t in h.args] else: terms = [t*h for t in terms] return func(*terms) @cacheit def _extract_delta(expr, index): """ Extract a simple KroneckerDelta from the expression. Returns the tuple ``(delta, newexpr)`` where: - ``delta`` is a simple KroneckerDelta expression if one was found, or ``None`` if no simple KroneckerDelta expression was found. - ``newexpr`` is a Mul containing the remaining terms; ``expr`` is returned unchanged if no simple KroneckerDelta expression was found. Examples ======== >>> from sympy import KroneckerDelta >>> from sympy.concrete.delta import _extract_delta >>> from sympy.abc import x, y, i, j, k >>> _extract_delta(4*x*y*KroneckerDelta(i, j), i) (KroneckerDelta(i, j), 4*x*y) >>> _extract_delta(4*x*y*KroneckerDelta(i, j), k) (None, 4*x*y*KroneckerDelta(i, j)) See Also ======== sympy.functions.special.tensor_functions.KroneckerDelta deltaproduct deltasummation """ if not _has_simple_delta(expr, index): return (None, expr) if isinstance(expr, KroneckerDelta): return (expr, S.One) if not expr.is_Mul: raise ValueError("Incorrect expr") delta = None terms = [] for arg in expr.args: if delta is None and _is_simple_delta(arg, index): delta = arg else: terms.append(arg) return (delta, expr.func(*terms)) @cacheit def _has_simple_delta(expr, index): """ Returns True if ``expr`` is an expression that contains a KroneckerDelta that is simple in the index ``index``, meaning that this KroneckerDelta is nonzero for a single value of the index ``index``. """ if expr.has(KroneckerDelta): if _is_simple_delta(expr, index): return True if expr.is_Add or expr.is_Mul: for arg in expr.args: if _has_simple_delta(arg, index): return True return False @cacheit def _is_simple_delta(delta, index): """ Returns True if ``delta`` is a KroneckerDelta and is nonzero for a single value of the index ``index``. """ if isinstance(delta, KroneckerDelta) and delta.has(index): p = (delta.args[0] - delta.args[1]).as_poly(index) if p: return p.degree() == 1 return False @cacheit def _remove_multiple_delta(expr): """ Evaluate products of KroneckerDelta's. """ from sympy.solvers import solve if expr.is_Add: return expr.func(*list(map(_remove_multiple_delta, expr.args))) if not expr.is_Mul: return expr eqs = [] newargs = [] for arg in expr.args: if isinstance(arg, KroneckerDelta): eqs.append(arg.args[0] - arg.args[1]) else: newargs.append(arg) if not eqs: return expr solns = solve(eqs, dict=True) if len(solns) == 0: return S.Zero elif len(solns) == 1: for key in solns[0].keys(): newargs.append(KroneckerDelta(key, solns[0][key])) expr2 = expr.func(*newargs) if expr != expr2: return _remove_multiple_delta(expr2) return expr @cacheit def _simplify_delta(expr): """ Rewrite a KroneckerDelta's indices in its simplest form. """ from sympy.solvers import solve if isinstance(expr, KroneckerDelta): try: slns = solve(expr.args[0] - expr.args[1], dict=True) if slns and len(slns) == 1: return Mul(*[KroneckerDelta(*(key, value)) for key, value in slns[0].items()]) except NotImplementedError: pass return expr @cacheit def deltaproduct(f, limit): """ Handle products containing a KroneckerDelta. See Also ======== deltasummation sympy.functions.special.tensor_functions.KroneckerDelta sympy.concrete.products.product """ from sympy.concrete.products import product if ((limit[2] - limit[1]) < 0) == True: return S.One if not f.has(KroneckerDelta): return product(f, limit) if f.is_Add: # Identify the term in the Add that has a simple KroneckerDelta delta = None terms = [] for arg in sorted(f.args, key=default_sort_key): if delta is None and _has_simple_delta(arg, limit[0]): delta = arg else: terms.append(arg) newexpr = f.func(*terms) k = Dummy("kprime", integer=True) if isinstance(limit[1], int) and isinstance(limit[2], int): result = deltaproduct(newexpr, limit) + sum([ deltaproduct(newexpr, (limit[0], limit[1], ik - 1)) * delta.subs(limit[0], ik) * deltaproduct(newexpr, (limit[0], ik + 1, limit[2])) for ik in range(int(limit[1]), int(limit[2] + 1))] ) else: result = deltaproduct(newexpr, limit) + deltasummation( deltaproduct(newexpr, (limit[0], limit[1], k - 1)) * delta.subs(limit[0], k) * deltaproduct(newexpr, (limit[0], k + 1, limit[2])), (k, limit[1], limit[2]), no_piecewise=_has_simple_delta(newexpr, limit[0]) ) return _remove_multiple_delta(result) delta, _ = _extract_delta(f, limit[0]) if not delta: g = _expand_delta(f, limit[0]) if f != g: from sympy import factor try: return factor(deltaproduct(g, limit)) except AssertionError: return deltaproduct(g, limit) return product(f, limit) return _remove_multiple_delta(f.subs(limit[0], limit[1])*KroneckerDelta(limit[2], limit[1])) + \ S.One*_simplify_delta(KroneckerDelta(limit[2], limit[1] - 1)) @cacheit def deltasummation(f, limit, no_piecewise=False): """ Handle summations containing a KroneckerDelta. The idea for summation is the following: - If we are dealing with a KroneckerDelta expression, i.e. KroneckerDelta(g(x), j), we try to simplify it. If we could simplify it, then we sum the resulting expression. We already know we can sum a simplified expression, because only simple KroneckerDelta expressions are involved. If we couldn't simplify it, there are two cases: 1) The expression is a simple expression: we return the summation, taking care if we are dealing with a Derivative or with a proper KroneckerDelta. 2) The expression is not simple (i.e. KroneckerDelta(cos(x))): we can do nothing at all. - If the expr is a multiplication expr having a KroneckerDelta term: First we expand it. If the expansion did work, then we try to sum the expansion. If not, we try to extract a simple KroneckerDelta term, then we have two cases: 1) We have a simple KroneckerDelta term, so we return the summation. 2) We didn't have a simple term, but we do have an expression with simplified KroneckerDelta terms, so we sum this expression. Examples ======== >>> from sympy import oo, symbols >>> from sympy.abc import k >>> i, j = symbols('i, j', integer=True, finite=True) >>> from sympy.concrete.delta import deltasummation >>> from sympy import KroneckerDelta, Piecewise >>> deltasummation(KroneckerDelta(i, k), (k, -oo, oo)) 1 >>> deltasummation(KroneckerDelta(i, k), (k, 0, oo)) Piecewise((1, i >= 0), (0, True)) >>> deltasummation(KroneckerDelta(i, k), (k, 1, 3)) Piecewise((1, (i >= 1) & (i <= 3)), (0, True)) >>> deltasummation(k*KroneckerDelta(i, j)*KroneckerDelta(j, k), (k, -oo, oo)) j*KroneckerDelta(i, j) >>> deltasummation(j*KroneckerDelta(i, j), (j, -oo, oo)) i >>> deltasummation(i*KroneckerDelta(i, j), (i, -oo, oo)) j See Also ======== deltaproduct sympy.functions.special.tensor_functions.KroneckerDelta sympy.concrete.sums.summation """ from sympy.concrete.summations import summation from sympy.solvers import solve if ((limit[2] - limit[1]) < 0) == True: return S.Zero if not f.has(KroneckerDelta): return summation(f, limit) x = limit[0] g = _expand_delta(f, x) if g.is_Add: return piecewise_fold( g.func(*[deltasummation(h, limit, no_piecewise) for h in g.args])) # try to extract a simple KroneckerDelta term delta, expr = _extract_delta(g, x) if (delta is not None) and (delta.delta_range is not None): dinf, dsup = delta.delta_range if (limit[1] - dinf <= 0) == True and (limit[2] - dsup >= 0) == True: no_piecewise = True if not delta: return summation(f, limit) solns = solve(delta.args[0] - delta.args[1], x) if len(solns) == 0: return S.Zero elif len(solns) != 1: from sympy.concrete.summations import Sum return Sum(f, limit) value = solns[0] if no_piecewise: return expr.subs(x, value) return Piecewise( (expr.subs(x, value), Interval(*limit[1:3]).as_relational(value)), (S.Zero, True) )
eca9eb1f0ba27754b5a78abde4afdfd88d65e6a5c97c64284147cff29536c27f
"""Gosper's algorithm for hypergeometric summation. """ from __future__ import print_function, division from sympy.core import S, Dummy, symbols from sympy.core.compatibility import is_sequence from sympy.polys import Poly, parallel_poly_from_expr, factor from sympy.solvers import solve from sympy.simplify import hypersimp def gosper_normal(f, g, n, polys=True): r""" Compute the Gosper's normal form of ``f`` and ``g``. Given relatively prime univariate polynomials ``f`` and ``g``, rewrite their quotient to a normal form defined as follows: .. math:: \frac{f(n)}{g(n)} = Z \cdot \frac{A(n) C(n+1)}{B(n) C(n)} where ``Z`` is an arbitrary constant and ``A``, ``B``, ``C`` are monic polynomials in ``n`` with the following properties: 1. `\gcd(A(n), B(n+h)) = 1 \forall h \in \mathbb{N}` 2. `\gcd(B(n), C(n+1)) = 1` 3. `\gcd(A(n), C(n)) = 1` This normal form, or rational factorization in other words, is a crucial step in Gosper's algorithm and in solving of difference equations. It can be also used to decide if two hypergeometric terms are similar or not. This procedure will return a tuple containing elements of this factorization in the form ``(Z*A, B, C)``. Examples ======== >>> from sympy.concrete.gosper import gosper_normal >>> from sympy.abc import n >>> gosper_normal(4*n+5, 2*(4*n+1)*(2*n+3), n, polys=False) (1/4, n + 3/2, n + 1/4) """ (p, q), opt = parallel_poly_from_expr( (f, g), n, field=True, extension=True) a, A = p.LC(), p.monic() b, B = q.LC(), q.monic() C, Z = A.one, a/b h = Dummy('h') D = Poly(n + h, n, h, domain=opt.domain) R = A.resultant(B.compose(D)) roots = set(R.ground_roots().keys()) for r in set(roots): if not r.is_Integer or r < 0: roots.remove(r) for i in sorted(roots): d = A.gcd(B.shift(+i)) A = A.quo(d) B = B.quo(d.shift(-i)) for j in range(1, i + 1): C *= d.shift(-j) A = A.mul_ground(Z) if not polys: A = A.as_expr() B = B.as_expr() C = C.as_expr() return A, B, C def gosper_term(f, n): r""" Compute Gosper's hypergeometric term for ``f``. Suppose ``f`` is a hypergeometric term such that: .. math:: s_n = \sum_{k=0}^{n-1} f_k and `f_k` doesn't depend on `n`. Returns a hypergeometric term `g_n` such that `g_{n+1} - g_n = f_n`. Examples ======== >>> from sympy.concrete.gosper import gosper_term >>> from sympy.functions import factorial >>> from sympy.abc import n >>> gosper_term((4*n + 1)*factorial(n)/factorial(2*n + 1), n) (-n - 1/2)/(n + 1/4) """ r = hypersimp(f, n) if r is None: return None # 'f' is *not* a hypergeometric term p, q = r.as_numer_denom() A, B, C = gosper_normal(p, q, n) B = B.shift(-1) N = S(A.degree()) M = S(B.degree()) K = S(C.degree()) if (N != M) or (A.LC() != B.LC()): D = {K - max(N, M)} elif not N: D = {K - N + 1, S.Zero} else: D = {K - N + 1, (B.nth(N - 1) - A.nth(N - 1))/A.LC()} for d in set(D): if not d.is_Integer or d < 0: D.remove(d) if not D: return None # 'f(n)' is *not* Gosper-summable d = max(D) coeffs = symbols('c:%s' % (d + 1), cls=Dummy) domain = A.get_domain().inject(*coeffs) x = Poly(coeffs, n, domain=domain) H = A*x.shift(1) - B*x - C solution = solve(H.coeffs(), coeffs) if solution is None: return None # 'f(n)' is *not* Gosper-summable x = x.as_expr().subs(solution) for coeff in coeffs: if coeff not in solution: x = x.subs(coeff, 0) if x.is_zero: return None # 'f(n)' is *not* Gosper-summable else: return B.as_expr()*x/C.as_expr() def gosper_sum(f, k): r""" Gosper's hypergeometric summation algorithm. Given a hypergeometric term ``f`` such that: .. math :: s_n = \sum_{k=0}^{n-1} f_k and `f(n)` doesn't depend on `n`, returns `g_{n} - g(0)` where `g_{n+1} - g_n = f_n`, or ``None`` if `s_n` can not be expressed in closed form as a sum of hypergeometric terms. Examples ======== >>> from sympy.concrete.gosper import gosper_sum >>> from sympy.functions import factorial >>> from sympy.abc import i, n, k >>> f = (4*k + 1)*factorial(k)/factorial(2*k + 1) >>> gosper_sum(f, (k, 0, n)) (-factorial(n) + 2*factorial(2*n + 1))/factorial(2*n + 1) >>> _.subs(n, 2) == sum(f.subs(k, i) for i in [0, 1, 2]) True >>> gosper_sum(f, (k, 3, n)) (-60*factorial(n) + factorial(2*n + 1))/(60*factorial(2*n + 1)) >>> _.subs(n, 5) == sum(f.subs(k, i) for i in [3, 4, 5]) True References ========== .. [1] Marko Petkovsek, Herbert S. Wilf, Doron Zeilberger, A = B, AK Peters, Ltd., Wellesley, MA, USA, 1997, pp. 73--100 """ indefinite = False if is_sequence(k): k, a, b = k else: indefinite = True g = gosper_term(f, k) if g is None: return None if indefinite: result = f*g else: result = (f*(g + 1)).subs(k, b) - (f*g).subs(k, a) if result is S.NaN: try: result = (f*(g + 1)).limit(k, b) - (f*g).limit(k, a) except NotImplementedError: result = None return factor(result)
46d992e537edb533a9b687a73b28751ddfeb26dc3615bd619cba30bc4a2ecd3b
"""Various algorithms for helping identifying numbers and sequences.""" from __future__ import print_function, division from sympy.utilities import public from sympy.core import Function, Symbol from sympy.core.numbers import Zero from sympy import (sympify, floor, lcm, denom, Integer, Rational, exp, integrate, symbols, Product, product) from sympy.polys.polyfuncs import rational_interpolate as rinterp @public def find_simple_recurrence_vector(l): """ This function is used internally by other functions from the sympy.concrete.guess module. While most users may want to rather use the function find_simple_recurrence when looking for recurrence relations among rational numbers, the current function may still be useful when some post-processing has to be done. The function returns a vector of length n when a recurrence relation of order n is detected in the sequence of rational numbers v. If the returned vector has a length 1, then the returned value is always the list [0], which means that no relation has been found. While the functions is intended to be used with rational numbers, it should work for other kinds of real numbers except for some cases involving quadratic numbers; for that reason it should be used with some caution when the argument is not a list of rational numbers. Examples ======== >>> from sympy.concrete.guess import find_simple_recurrence_vector >>> from sympy import fibonacci >>> find_simple_recurrence_vector([fibonacci(k) for k in range(12)]) [1, -1, -1] See Also ======== See the function sympy.concrete.guess.find_simple_recurrence which is more user-friendly. """ q1 = [0] q2 = [Integer(1)] b, z = 0, len(l) >> 1 while len(q2) <= z: while l[b]==0: b += 1 if b == len(l): c = 1 for x in q2: c = lcm(c, denom(x)) if q2[0]*c < 0: c = -c for k in range(len(q2)): q2[k] = int(q2[k]*c) return q2 a = Integer(1)/l[b] m = [a] for k in range(b+1, len(l)): m.append(-sum(l[j+1]*m[b-j-1] for j in range(b, k))*a) l, m = m, [0] * max(len(q2), b+len(q1)) for k in range(len(q2)): m[k] = a*q2[k] for k in range(b, b+len(q1)): m[k] += q1[k-b] while m[-1]==0: m.pop() # because trailing zeros can occur q1, q2, b = q2, m, 1 return [0] @public def find_simple_recurrence(v, A=Function('a'), N=Symbol('n')): """ Detects and returns a recurrence relation from a sequence of several integer (or rational) terms. The name of the function in the returned expression is 'a' by default; the main variable is 'n' by default. The smallest index in the returned expression is always n (and never n-1, n-2, etc.). Examples ======== >>> from sympy.concrete.guess import find_simple_recurrence >>> from sympy import fibonacci >>> find_simple_recurrence([fibonacci(k) for k in range(12)]) -a(n) - a(n + 1) + a(n + 2) >>> from sympy import Function, Symbol >>> a = [1, 1, 1] >>> for k in range(15): a.append(5*a[-1]-3*a[-2]+8*a[-3]) >>> find_simple_recurrence(a, A=Function('f'), N=Symbol('i')) -8*f(i) + 3*f(i + 1) - 5*f(i + 2) + f(i + 3) """ p = find_simple_recurrence_vector(v) n = len(p) if n <= 1: return Zero() rel = Zero() for k in range(n): rel += A(N+n-1-k)*p[k] return rel @public def rationalize(x, maxcoeff=10000): """ Helps identifying a rational number from a float (or mpmath.mpf) value by using a continued fraction. The algorithm stops as soon as a large partial quotient is detected (greater than 10000 by default). Examples ======== >>> from sympy.concrete.guess import rationalize >>> from mpmath import cos, pi >>> rationalize(cos(pi/3)) 1/2 >>> from mpmath import mpf >>> rationalize(mpf("0.333333333333333")) 1/3 While the function is rather intended to help 'identifying' rational values, it may be used in some cases for approximating real numbers. (Though other functions may be more relevant in that case.) >>> rationalize(pi, maxcoeff = 250) 355/113 See Also ======== Several other methods can approximate a real number as a rational, like: * fractions.Fraction.from_decimal * fractions.Fraction.from_float * mpmath.identify * mpmath.pslq by using the following syntax: mpmath.pslq([x, 1]) * mpmath.findpoly by using the following syntax: mpmath.findpoly(x, 1) * sympy.simplify.nsimplify (which is a more general function) The main difference between the current function and all these variants is that control focuses on magnitude of partial quotients here rather than on global precision of the approximation. If the real is "known to be" a rational number, the current function should be able to detect it correctly with the default settings even when denominator is great (unless its expansion contains unusually big partial quotients) which may occur when studying sequences of increasing numbers. If the user cares more on getting simple fractions, other methods may be more convenient. """ p0, p1 = 0, 1 q0, q1 = 1, 0 a = floor(x) while a < maxcoeff or q1==0: p = a*p1 + p0 q = a*q1 + q0 p0, p1 = p1, p q0, q1 = q1, q if x==a: break x = 1/(x-a) a = floor(x) return sympify(p) / q @public def guess_generating_function_rational(v, X=Symbol('x')): """ Tries to "guess" a rational generating function for a sequence of rational numbers v. Examples ======== >>> from sympy.concrete.guess import guess_generating_function_rational >>> from sympy import fibonacci >>> l = [fibonacci(k) for k in range(5,15)] >>> guess_generating_function_rational(l) (3*x + 5)/(-x**2 - x + 1) See Also ======== sympy.series.approximants mpmath.pade """ # a) compute the denominator as q q = find_simple_recurrence_vector(v) n = len(q) if n <= 1: return None # b) compute the numerator as p p = [sum(v[i-k]*q[k] for k in range(min(i+1, n))) for i in range(len(v)>>1)] return (sum(p[k]*X**k for k in range(len(p))) / sum(q[k]*X**k for k in range(n))) @public def guess_generating_function(v, X=Symbol('x'), types=['all'], maxsqrtn=2): """ Tries to "guess" a generating function for a sequence of rational numbers v. Only a few patterns are implemented yet. The function returns a dictionary where keys are the name of a given type of generating function. Six types are currently implemented: type | formal definition -------+---------------------------------------------------------------- ogf | f(x) = Sum( a_k * x^k , k: 0..infinity ) egf | f(x) = Sum( a_k * x^k / k! , k: 0..infinity ) lgf | f(x) = Sum( (-1)^(k+1) a_k * x^k / k , k: 1..infinity ) | (with initial index being hold as 1 rather than 0) hlgf | f(x) = Sum( a_k * x^k / k , k: 1..infinity ) | (with initial index being hold as 1 rather than 0) lgdogf | f(x) = derivate( log(Sum( a_k * x^k, k: 0..infinity )), x) lgdegf | f(x) = derivate( log(Sum( a_k * x^k / k!, k: 0..infinity )), x) In order to spare time, the user can select only some types of generating functions (default being ['all']). While forgetting to use a list in the case of a single type may seem to work most of the time as in: types='ogf' this (convenient) syntax may lead to unexpected extra results in some cases. Discarding a type when calling the function does not mean that the type will not be present in the returned dictionary; it only means that no extra computation will be performed for that type, but the function may still add it in the result when it can be easily converted from another type. Two generating functions (lgdogf and lgdegf) are not even computed if the initial term of the sequence is 0; it may be useful in that case to try again after having removed the leading zeros. Examples ======== >>> from sympy.concrete.guess import guess_generating_function as ggf >>> ggf([k+1 for k in range(12)], types=['ogf', 'lgf', 'hlgf']) {'hlgf': 1/(1 - x), 'lgf': 1/(x + 1), 'ogf': 1/(x**2 - 2*x + 1)} >>> from sympy import sympify >>> l = sympify("[3/2, 11/2, 0, -121/2, -363/2, 121]") >>> ggf(l) {'ogf': (x + 3/2)/(11*x**2 - 3*x + 1)} >>> from sympy import fibonacci >>> ggf([fibonacci(k) for k in range(5, 15)], types=['ogf']) {'ogf': (3*x + 5)/(-x**2 - x + 1)} >>> from sympy import simplify, factorial >>> ggf([factorial(k) for k in range(12)], types=['ogf', 'egf', 'lgf']) {'egf': 1/(1 - x)} >>> ggf([k+1 for k in range(12)], types=['egf']) {'egf': (x + 1)*exp(x), 'lgdegf': (x + 2)/(x + 1)} N-th root of a rational function can also be detected (below is an example coming from the sequence A108626 from http://oeis.org). The greatest n-th root to be tested is specified as maxsqrtn (default 2). >>> ggf([1, 2, 5, 14, 41, 124, 383, 1200, 3799, 12122, 38919])['ogf'] sqrt(1/(x**4 + 2*x**2 - 4*x + 1)) References ========== .. [1] "Concrete Mathematics", R.L. Graham, D.E. Knuth, O. Patashnik .. [2] https://oeis.org/wiki/Generating_functions """ # List of all types of all g.f. known by the algorithm if 'all' in types: types = ['ogf', 'egf', 'lgf', 'hlgf', 'lgdogf', 'lgdegf'] result = {} # Ordinary Generating Function (ogf) if 'ogf' in types: # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(v))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*v[i] for i in range(n+1)) for n in range(len(v))] g = guess_generating_function_rational(t, X=X) if g: result['ogf'] = g**Rational(1, d+1) break # Exponential Generating Function (egf) if 'egf' in types: # Transform sequence (division by factorial) w, f = [], Integer(1) for i, k in enumerate(v): f *= i if i else 1 w.append(k/f) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['egf'] = g**Rational(1, d+1) break # Logarithmic Generating Function (lgf) if 'lgf' in types: # Transform sequence (multiplication by (-1)^(n+1) / n) w, f = [], Integer(-1) for i, k in enumerate(v): f = -f w.append(f*k/Integer(i+1)) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['lgf'] = g**Rational(1, d+1) break # Hyperbolic logarithmic Generating Function (hlgf) if 'hlgf' in types: # Transform sequence (division by n+1) w = [] for i, k in enumerate(v): w.append(k/Integer(i+1)) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['hlgf'] = g**Rational(1, d+1) break # Logarithmic derivative of ordinary generating Function (lgdogf) if v[0] != 0 and ('lgdogf' in types or ('ogf' in types and 'ogf' not in result)): # Transform sequence by computing f'(x)/f(x) # because log(f(x)) = integrate( f'(x)/f(x) ) a, w = sympify(v[0]), [] for n in range(len(v)-1): w.append( (v[n+1]*(n+1) - sum(w[-i-1]*v[i+1] for i in range(n)))/a) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['lgdogf'] = g**Rational(1, d+1) if 'ogf' not in result: result['ogf'] = exp(integrate(result['lgdogf'], X)) break # Logarithmic derivative of exponential generating Function (lgdegf) if v[0] != 0 and ('lgdegf' in types or ('egf' in types and 'egf' not in result)): # Transform sequence / step 1 (division by factorial) z, f = [], Integer(1) for i, k in enumerate(v): f *= i if i else 1 z.append(k/f) # Transform sequence / step 2 by computing f'(x)/f(x) # because log(f(x)) = integrate( f'(x)/f(x) ) a, w = z[0], [] for n in range(len(z)-1): w.append( (z[n+1]*(n+1) - sum(w[-i-1]*z[i+1] for i in range(n)))/a) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['lgdegf'] = g**Rational(1, d+1) if 'egf' not in result: result['egf'] = exp(integrate(result['lgdegf'], X)) break return result @public def guess(l, all=False, evaluate=True, niter=2, variables=None): """ This function is adapted from the Rate.m package for Mathematica written by Christian Krattenthaler. It tries to guess a formula from a given sequence of rational numbers. In order to speed up the process, the 'all' variable is set to False by default, stopping the computation as some results are returned during an iteration; the variable can be set to True if more iterations are needed (other formulas may be found; however they may be equivalent to the first ones). Another option is the 'evaluate' variable (default is True); setting it to False will leave the involved products unevaluated. By default, the number of iterations is set to 2 but a greater value (up to len(l)-1) can be specified with the optional 'niter' variable. More and more convoluted results are found when the order of the iteration gets higher: * first iteration returns polynomial or rational functions; * second iteration returns products of rising factorials and their inverses; * third iteration returns products of products of rising factorials and their inverses; * etc. The returned formulas contain symbols i0, i1, i2, ... where the main variables is i0 (and auxiliary variables are i1, i2, ...). A list of other symbols can be provided in the 'variables' option; the length of the least should be the value of 'niter' (more is acceptable but only the first symbols will be used); in this case, the main variable will be the first symbol in the list. Examples ======== >>> from sympy.concrete.guess import guess >>> guess([1,2,6,24,120], evaluate=False) [Product(i1 + 1, (i1, 1, i0 - 1))] >>> from sympy import symbols >>> r = guess([1,2,7,42,429,7436,218348,10850216], niter=4) >>> i0 = symbols("i0") >>> [r[0].subs(i0,n).doit() for n in range(1,10)] [1, 2, 7, 42, 429, 7436, 218348, 10850216, 911835460] """ if any(a==0 for a in l[:-1]): return [] N = len(l) niter = min(N-1, niter) myprod = product if evaluate else Product g = [] res = [] if variables is None: symb = symbols('i:'+str(niter)) else: symb = variables for k, s in enumerate(symb): g.append(l) n, r = len(l), [] for i in range(n-2-1, -1, -1): ri = rinterp(enumerate(g[k][:-1], start=1), i, X=s) if ((denom(ri).subs({s:n}) != 0) and (ri.subs({s:n}) - g[k][-1] == 0) and ri not in r): r.append(ri) if r: for i in range(k-1, -1, -1): r = list(map(lambda v: g[i][0] * myprod(v, (symb[i+1], 1, symb[i]-1)), r)) if not all: return r res += r l = [Rational(l[i+1], l[i]) for i in range(N-k-1)] return res
b1b8eba17baea611e3fa8ee57d0d7359609257c91a682305762aa218131f7414
from __future__ import print_function, division from sympy.core.add import Add from sympy.core.compatibility import is_sequence from sympy.core.containers import Tuple from sympy.core.expr import Expr from sympy.core.mul import Mul from sympy.core.relational import Equality, Relational from sympy.core.singleton import S from sympy.core.symbol import Symbol, Dummy from sympy.core.sympify import sympify from sympy.functions.elementary.piecewise import (piecewise_fold, Piecewise) from sympy.logic.boolalg import BooleanFunction from sympy.tensor.indexed import Idx from sympy.sets.sets import Interval from sympy.sets.fancysets import Range from sympy.utilities import flatten from sympy.utilities.iterables import sift from sympy.utilities.exceptions import SymPyDeprecationWarning def _common_new(cls, function, *symbols, **assumptions): """Return either a special return value or the tuple, (function, limits, orientation). This code is common to both ExprWithLimits and AddWithLimits.""" function = sympify(function) if isinstance(function, Equality): # This transforms e.g. Integral(Eq(x, y)) to Eq(Integral(x), Integral(y)) # but that is only valid for definite integrals. limits, orientation = _process_limits(*symbols) if not (limits and all(len(limit) == 3 for limit in limits)): SymPyDeprecationWarning( feature='Integral(Eq(x, y))', useinstead='Eq(Integral(x, z), Integral(y, z))', issue=18053, deprecated_since_version=1.6, ).warn() lhs = function.lhs rhs = function.rhs return Equality(cls(lhs, *symbols, **assumptions), \ cls(rhs, *symbols, **assumptions)) if function is S.NaN: return S.NaN if symbols: limits, orientation = _process_limits(*symbols) for i, li in enumerate(limits): if len(li) == 4: function = function.subs(li[0], li[-1]) limits[i] = Tuple(*li[:-1]) else: # symbol not provided -- we can still try to compute a general form free = function.free_symbols if len(free) != 1: raise ValueError( "specify dummy variables for %s" % function) limits, orientation = [Tuple(s) for s in free], 1 # denest any nested calls while cls == type(function): limits = list(function.limits) + limits function = function.function # Any embedded piecewise functions need to be brought out to the # top level. We only fold Piecewise that contain the integration # variable. reps = {} symbols_of_integration = set([i[0] for i in limits]) for p in function.atoms(Piecewise): if not p.has(*symbols_of_integration): reps[p] = Dummy() # mask off those that don't function = function.xreplace(reps) # do the fold function = piecewise_fold(function) # remove the masking function = function.xreplace({v: k for k, v in reps.items()}) return function, limits, orientation def _process_limits(*symbols): """Process the list of symbols and convert them to canonical limits, storing them as Tuple(symbol, lower, upper). The orientation of the function is also returned when the upper limit is missing so (x, 1, None) becomes (x, None, 1) and the orientation is changed. """ limits = [] orientation = 1 for V in symbols: if isinstance(V, (Relational, BooleanFunction)): variable = V.atoms(Symbol).pop() V = (variable, V.as_set()) if isinstance(V, Symbol) or getattr(V, '_diff_wrt', False): if isinstance(V, Idx): if V.lower is None or V.upper is None: limits.append(Tuple(V)) else: limits.append(Tuple(V, V.lower, V.upper)) else: limits.append(Tuple(V)) continue elif is_sequence(V, Tuple): if len(V) == 2 and isinstance(V[1], Range): lo = V[1].inf hi = V[1].sup dx = abs(V[1].step) V = [V[0]] + [0, (hi - lo)//dx, dx*V[0] + lo] V = sympify(flatten(V)) # a list of sympified elements if isinstance(V[0], (Symbol, Idx)) or getattr(V[0], '_diff_wrt', False): newsymbol = V[0] if len(V) == 2 and isinstance(V[1], Interval): # 2 -> 3 # Interval V[1:] = [V[1].start, V[1].end] elif len(V) == 3: # general case if V[2] is None and not V[1] is None: orientation *= -1 V = [newsymbol] + [i for i in V[1:] if i is not None] if not isinstance(newsymbol, Idx) or len(V) == 3: if len(V) == 4: limits.append(Tuple(*V)) continue if len(V) == 3: if isinstance(newsymbol, Idx): # Idx represents an integer which may have # specified values it can take on; if it is # given such a value, an error is raised here # if the summation would try to give it a larger # or smaller value than permitted. None and Symbolic # values will not raise an error. lo, hi = newsymbol.lower, newsymbol.upper try: if lo is not None and not bool(V[1] >= lo): raise ValueError("Summation will set Idx value too low.") except TypeError: pass try: if hi is not None and not bool(V[2] <= hi): raise ValueError("Summation will set Idx value too high.") except TypeError: pass limits.append(Tuple(*V)) continue if len(V) == 1 or (len(V) == 2 and V[1] is None): limits.append(Tuple(newsymbol)) continue elif len(V) == 2: limits.append(Tuple(newsymbol, V[1])) continue raise ValueError('Invalid limits given: %s' % str(symbols)) return limits, orientation class ExprWithLimits(Expr): __slots__ = ('is_commutative',) def __new__(cls, function, *symbols, **assumptions): pre = _common_new(cls, function, *symbols, **assumptions) if type(pre) is tuple: function, limits, _ = pre else: return pre # limits must have upper and lower bounds; the indefinite form # is not supported. This restriction does not apply to AddWithLimits if any(len(l) != 3 or None in l for l in limits): raise ValueError('ExprWithLimits requires values for lower and upper bounds.') obj = Expr.__new__(cls, **assumptions) arglist = [function] arglist.extend(limits) obj._args = tuple(arglist) obj.is_commutative = function.is_commutative # limits already checked return obj @property def function(self): """Return the function applied across limits. Examples ======== >>> from sympy import Integral >>> from sympy.abc import x >>> Integral(x**2, (x,)).function x**2 See Also ======== limits, variables, free_symbols """ return self._args[0] @property def limits(self): """Return the limits of expression. Examples ======== >>> from sympy import Integral >>> from sympy.abc import x, i >>> Integral(x**i, (i, 1, 3)).limits ((i, 1, 3),) See Also ======== function, variables, free_symbols """ return self._args[1:] @property def variables(self): """Return a list of the limit variables. >>> from sympy import Sum >>> from sympy.abc import x, i >>> Sum(x**i, (i, 1, 3)).variables [i] See Also ======== function, limits, free_symbols as_dummy : Rename dummy variables sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable """ return [l[0] for l in self.limits] @property def bound_symbols(self): """Return only variables that are dummy variables. Examples ======== >>> from sympy import Integral >>> from sympy.abc import x, i, j, k >>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols [i, j] See Also ======== function, limits, free_symbols as_dummy : Rename dummy variables sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable """ return [l[0] for l in self.limits if len(l) != 1] @property def free_symbols(self): """ This method returns the symbols in the object, excluding those that take on a specific value (i.e. the dummy symbols). Examples ======== >>> from sympy import Sum >>> from sympy.abc import x, y >>> Sum(x, (x, y, 1)).free_symbols {y} """ # don't test for any special values -- nominal free symbols # should be returned, e.g. don't return set() if the # function is zero -- treat it like an unevaluated expression. function, limits = self.function, self.limits isyms = function.free_symbols for xab in limits: if len(xab) == 1: isyms.add(xab[0]) continue # take out the target symbol if xab[0] in isyms: isyms.remove(xab[0]) # add in the new symbols for i in xab[1:]: isyms.update(i.free_symbols) return isyms @property def is_number(self): """Return True if the Sum has no free symbols, else False.""" return not self.free_symbols def _eval_interval(self, x, a, b): limits = [(i if i[0] != x else (x, a, b)) for i in self.limits] integrand = self.function return self.func(integrand, *limits) def _eval_subs(self, old, new): """ Perform substitutions over non-dummy variables of an expression with limits. Also, can be used to specify point-evaluation of an abstract antiderivative. Examples ======== >>> from sympy import Sum, oo >>> from sympy.abc import s, n >>> Sum(1/n**s, (n, 1, oo)).subs(s, 2) Sum(n**(-2), (n, 1, oo)) >>> from sympy import Integral >>> from sympy.abc import x, a >>> Integral(a*x**2, x).subs(x, 4) Integral(a*x**2, (x, 4)) See Also ======== variables : Lists the integration variables transform : Perform mapping on the dummy variable for integrals change_index : Perform mapping on the sum and product dummy variables """ from sympy.core.function import AppliedUndef, UndefinedFunction func, limits = self.function, list(self.limits) # If one of the expressions we are replacing is used as a func index # one of two things happens. # - the old variable first appears as a free variable # so we perform all free substitutions before it becomes # a func index. # - the old variable first appears as a func index, in # which case we ignore. See change_index. # Reorder limits to match standard mathematical practice for scoping limits.reverse() if not isinstance(old, Symbol) or \ old.free_symbols.intersection(self.free_symbols): sub_into_func = True for i, xab in enumerate(limits): if 1 == len(xab) and old == xab[0]: if new._diff_wrt: xab = (new,) else: xab = (old, old) limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]]) if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0: sub_into_func = False break if isinstance(old, AppliedUndef) or isinstance(old, UndefinedFunction): sy2 = set(self.variables).intersection(set(new.atoms(Symbol))) sy1 = set(self.variables).intersection(set(old.args)) if not sy2.issubset(sy1): raise ValueError( "substitution can not create dummy dependencies") sub_into_func = True if sub_into_func: func = func.subs(old, new) else: # old is a Symbol and a dummy variable of some limit for i, xab in enumerate(limits): if len(xab) == 3: limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]]) if old == xab[0]: break # simplify redundant limits (x, x) to (x, ) for i, xab in enumerate(limits): if len(xab) == 2 and (xab[0] - xab[1]).is_zero: limits[i] = Tuple(xab[0], ) # Reorder limits back to representation-form limits.reverse() return self.func(func, *limits) @property def has_finite_limits(self): """ Returns True if the limits are known to be finite, either by the explicit bounds, assumptions on the bounds, or assumptions on the variables. False if known to be infinite, based on the bounds. None if not enough information is available to determine. Examples ======== >>> from sympy import Sum, Integral, Product, oo, Symbol >>> x = Symbol('x') >>> Sum(x, (x, 1, 8)).has_finite_limits True >>> Integral(x, (x, 1, oo)).has_finite_limits False >>> M = Symbol('M') >>> Sum(x, (x, 1, M)).has_finite_limits >>> N = Symbol('N', integer=True) >>> Product(x, (x, 1, N)).has_finite_limits True See Also ======== has_reversed_limits """ ret_None = False for lim in self.limits: if len(lim) == 3: if any(l.is_infinite for l in lim[1:]): # Any of the bounds are +/-oo return False elif any(l.is_infinite is None for l in lim[1:]): # Maybe there are assumptions on the variable? if lim[0].is_infinite is None: ret_None = True else: if lim[0].is_infinite is None: ret_None = True if ret_None: return None return True @property def has_reversed_limits(self): """ Returns True if the limits are known to be in reversed order, either by the explicit bounds, assumptions on the bounds, or assumptions on the variables. False if known to be in normal order, based on the bounds. None if not enough information is available to determine. Examples ======== >>> from sympy import Sum, Integral, Product, oo, Symbol >>> x = Symbol('x') >>> Sum(x, (x, 8, 1)).has_reversed_limits True >>> Sum(x, (x, 1, oo)).has_reversed_limits False >>> M = Symbol('M') >>> Integral(x, (x, 1, M)).has_reversed_limits >>> N = Symbol('N', integer=True, positive=True) >>> Sum(x, (x, 1, N)).has_reversed_limits False >>> Product(x, (x, 2, N)).has_reversed_limits >>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits False See Also ======== sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence """ ret_None = False for lim in self.limits: if len(lim) == 3: var, a, b = lim dif = b - a if dif.is_extended_negative: return True elif dif.is_extended_nonnegative: continue else: ret_None = True else: return None if ret_None: return None return False class AddWithLimits(ExprWithLimits): r"""Represents unevaluated oriented additions. Parent class for Integral and Sum. """ def __new__(cls, function, *symbols, **assumptions): pre = _common_new(cls, function, *symbols, **assumptions) if type(pre) is tuple: function, limits, orientation = pre else: return pre obj = Expr.__new__(cls, **assumptions) arglist = [orientation*function] # orientation not used in ExprWithLimits arglist.extend(limits) obj._args = tuple(arglist) obj.is_commutative = function.is_commutative # limits already checked return obj def _eval_adjoint(self): if all([x.is_real for x in flatten(self.limits)]): return self.func(self.function.adjoint(), *self.limits) return None def _eval_conjugate(self): if all([x.is_real for x in flatten(self.limits)]): return self.func(self.function.conjugate(), *self.limits) return None def _eval_transpose(self): if all([x.is_real for x in flatten(self.limits)]): return self.func(self.function.transpose(), *self.limits) return None def _eval_factor(self, **hints): if 1 == len(self.limits): summand = self.function.factor(**hints) if summand.is_Mul: out = sift(summand.args, lambda w: w.is_commutative \ and not set(self.variables) & w.free_symbols) return Mul(*out[True])*self.func(Mul(*out[False]), \ *self.limits) else: summand = self.func(self.function, *self.limits[0:-1]).factor() if not summand.has(self.variables[-1]): return self.func(1, [self.limits[-1]]).doit()*summand elif isinstance(summand, Mul): return self.func(summand, self.limits[-1]).factor() return self def _eval_expand_basic(self, **hints): from sympy.matrices.matrices import MatrixBase summand = self.function.expand(**hints) if summand.is_Add and summand.is_commutative: return Add(*[self.func(i, *self.limits) for i in summand.args]) elif isinstance(summand, MatrixBase): return summand.applyfunc(lambda x: self.func(x, *self.limits)) elif summand != self.function: return self.func(summand, *self.limits) return self
de5e66834274fd40a3f4234d9ed76954ccde423a71b6cf8bba68ba778bc01614
from __future__ import print_function, division from sympy.calculus.singularities import is_decreasing from sympy.calculus.util import AccumulationBounds from sympy.concrete.expr_with_limits import AddWithLimits from sympy.concrete.expr_with_intlimits import ExprWithIntLimits from sympy.concrete.gosper import gosper_sum from sympy.core.add import Add from sympy.core.function import Derivative from sympy.core.mul import Mul from sympy.core.relational import Eq from sympy.core.singleton import S from sympy.core.symbol import Dummy, Wild, Symbol from sympy.functions.special.zeta_functions import zeta from sympy.functions.elementary.piecewise import Piecewise from sympy.logic.boolalg import And from sympy.polys import apart, PolynomialError, together from sympy.series.limitseq import limit_seq from sympy.series.order import O from sympy.sets.sets import FiniteSet from sympy.simplify import denom from sympy.simplify.combsimp import combsimp from sympy.simplify.powsimp import powsimp from sympy.solvers import solve from sympy.solvers.solveset import solveset import itertools class Sum(AddWithLimits, ExprWithIntLimits): r"""Represents unevaluated summation. ``Sum`` represents a finite or infinite series, with the first argument being the general form of terms in the series, and the second argument being ``(dummy_variable, start, end)``, with ``dummy_variable`` taking all integer values from ``start`` through ``end``. In accordance with long-standing mathematical convention, the end term is included in the summation. Finite sums =========== For finite sums (and sums with symbolic limits assumed to be finite) we follow the summation convention described by Karr [1], especially definition 3 of section 1.4. The sum: .. math:: \sum_{m \leq i < n} f(i) has *the obvious meaning* for `m < n`, namely: .. math:: \sum_{m \leq i < n} f(i) = f(m) + f(m+1) + \ldots + f(n-2) + f(n-1) with the upper limit value `f(n)` excluded. The sum over an empty set is zero if and only if `m = n`: .. math:: \sum_{m \leq i < n} f(i) = 0 \quad \mathrm{for} \quad m = n Finally, for all other sums over empty sets we assume the following definition: .. math:: \sum_{m \leq i < n} f(i) = - \sum_{n \leq i < m} f(i) \quad \mathrm{for} \quad m > n It is important to note that Karr defines all sums with the upper limit being exclusive. This is in contrast to the usual mathematical notation, but does not affect the summation convention. Indeed we have: .. math:: \sum_{m \leq i < n} f(i) = \sum_{i = m}^{n - 1} f(i) where the difference in notation is intentional to emphasize the meaning, with limits typeset on the top being inclusive. Examples ======== >>> from sympy.abc import i, k, m, n, x >>> from sympy import Sum, factorial, oo, IndexedBase, Function >>> Sum(k, (k, 1, m)) Sum(k, (k, 1, m)) >>> Sum(k, (k, 1, m)).doit() m**2/2 + m/2 >>> Sum(k**2, (k, 1, m)) Sum(k**2, (k, 1, m)) >>> Sum(k**2, (k, 1, m)).doit() m**3/3 + m**2/2 + m/6 >>> Sum(x**k, (k, 0, oo)) Sum(x**k, (k, 0, oo)) >>> Sum(x**k, (k, 0, oo)).doit() Piecewise((1/(1 - x), Abs(x) < 1), (Sum(x**k, (k, 0, oo)), True)) >>> Sum(x**k/factorial(k), (k, 0, oo)).doit() exp(x) Here are examples to do summation with symbolic indices. You can use either Function of IndexedBase classes: >>> f = Function('f') >>> Sum(f(n), (n, 0, 3)).doit() f(0) + f(1) + f(2) + f(3) >>> Sum(f(n), (n, 0, oo)).doit() Sum(f(n), (n, 0, oo)) >>> f = IndexedBase('f') >>> Sum(f[n]**2, (n, 0, 3)).doit() f[0]**2 + f[1]**2 + f[2]**2 + f[3]**2 An example showing that the symbolic result of a summation is still valid for seemingly nonsensical values of the limits. Then the Karr convention allows us to give a perfectly valid interpretation to those sums by interchanging the limits according to the above rules: >>> S = Sum(i, (i, 1, n)).doit() >>> S n**2/2 + n/2 >>> S.subs(n, -4) 6 >>> Sum(i, (i, 1, -4)).doit() 6 >>> Sum(-i, (i, -3, 0)).doit() 6 An explicit example of the Karr summation convention: >>> S1 = Sum(i**2, (i, m, m+n-1)).doit() >>> S1 m**2*n + m*n**2 - m*n + n**3/3 - n**2/2 + n/6 >>> S2 = Sum(i**2, (i, m+n, m-1)).doit() >>> S2 -m**2*n - m*n**2 + m*n - n**3/3 + n**2/2 - n/6 >>> S1 + S2 0 >>> S3 = Sum(i, (i, m, m-1)).doit() >>> S3 0 See Also ======== summation Product, sympy.concrete.products.product References ========== .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM, Volume 28 Issue 2, April 1981, Pages 305-350 http://dl.acm.org/citation.cfm?doid=322248.322255 .. [2] https://en.wikipedia.org/wiki/Summation#Capital-sigma_notation .. [3] https://en.wikipedia.org/wiki/Empty_sum """ __slots__ = ('is_commutative',) def __new__(cls, function, *symbols, **assumptions): obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions) if not hasattr(obj, 'limits'): return obj if any(len(l) != 3 or None in l for l in obj.limits): raise ValueError('Sum requires values for lower and upper bounds.') return obj def _eval_is_zero(self): # a Sum is only zero if its function is zero or if all terms # cancel out. This only answers whether the summand is zero; if # not then None is returned since we don't analyze whether all # terms cancel out. if self.function.is_zero or self.has_empty_sequence: return True def _eval_is_extended_real(self): if self.has_empty_sequence: return True return self.function.is_extended_real def _eval_is_positive(self): if self.has_finite_limits and self.has_reversed_limits is False: return self.function.is_positive def _eval_is_negative(self): if self.has_finite_limits and self.has_reversed_limits is False: return self.function.is_negative def _eval_is_finite(self): if self.has_finite_limits and self.function.is_finite: return True def doit(self, **hints): if hints.get('deep', True): f = self.function.doit(**hints) else: f = self.function # first make sure any definite limits have summation # variables with matching assumptions reps = {} for xab in self.limits: d = _dummy_with_inherited_properties_concrete(xab) if d: reps[xab[0]] = d if reps: undo = dict([(v, k) for k, v in reps.items()]) did = self.xreplace(reps).doit(**hints) if type(did) is tuple: # when separate=True did = tuple([i.xreplace(undo) for i in did]) elif did is not None: did = did.xreplace(undo) else: did = self return did if self.function.is_Matrix: expanded = self.expand() if self != expanded: return expanded.doit() return _eval_matrix_sum(self) for n, limit in enumerate(self.limits): i, a, b = limit dif = b - a if dif == -1: # Any summation over an empty set is zero return S.Zero if dif.is_integer and dif.is_negative: a, b = b + 1, a - 1 f = -f newf = eval_sum(f, (i, a, b)) if newf is None: if f == self.function: zeta_function = self.eval_zeta_function(f, (i, a, b)) if zeta_function is not None: return zeta_function return self else: return self.func(f, *self.limits[n:]) f = newf if hints.get('deep', True): # eval_sum could return partially unevaluated # result with Piecewise. In this case we won't # doit() recursively. if not isinstance(f, Piecewise): return f.doit(**hints) return f def eval_zeta_function(self, f, limits): """ Check whether the function matches with the zeta function. If it matches, then return a `Piecewise` expression because zeta function does not converge unless `s > 1` and `q > 0` """ i, a, b = limits w, y, z = Wild('w', exclude=[i]), Wild('y', exclude=[i]), Wild('z', exclude=[i]) result = f.match((w * i + y) ** (-z)) if result is not None and b is S.Infinity: coeff = 1 / result[w] ** result[z] s = result[z] q = result[y] / result[w] + a return Piecewise((coeff * zeta(s, q), And(q > 0, s > 1)), (self, True)) def _eval_derivative(self, x): """ Differentiate wrt x as long as x is not in the free symbols of any of the upper or lower limits. Sum(a*b*x, (x, 1, a)) can be differentiated wrt x or b but not `a` since the value of the sum is discontinuous in `a`. In a case involving a limit variable, the unevaluated derivative is returned. """ # diff already confirmed that x is in the free symbols of self, but we # don't want to differentiate wrt any free symbol in the upper or lower # limits # XXX remove this test for free_symbols when the default _eval_derivative is in if isinstance(x, Symbol) and x not in self.free_symbols: return S.Zero # get limits and the function f, limits = self.function, list(self.limits) limit = limits.pop(-1) if limits: # f is the argument to a Sum f = self.func(f, *limits) _, a, b = limit if x in a.free_symbols or x in b.free_symbols: return None df = Derivative(f, x, evaluate=True) rv = self.func(df, limit) return rv def _eval_difference_delta(self, n, step): k, _, upper = self.args[-1] new_upper = upper.subs(n, n + step) if len(self.args) == 2: f = self.args[0] else: f = self.func(*self.args[:-1]) return Sum(f, (k, upper + 1, new_upper)).doit() def _eval_simplify(self, **kwargs): from sympy.simplify.simplify import factor_sum, sum_combine from sympy.core.function import expand from sympy.core.mul import Mul # split the function into adds terms = Add.make_args(expand(self.function)) s_t = [] # Sum Terms o_t = [] # Other Terms for term in terms: if term.has(Sum): # if there is an embedded sum here # it is of the form x * (Sum(whatever)) # hence we make a Mul out of it, and simplify all interior sum terms subterms = Mul.make_args(expand(term)) out_terms = [] for subterm in subterms: # go through each term if isinstance(subterm, Sum): # if it's a sum, simplify it out_terms.append(subterm._eval_simplify()) else: # otherwise, add it as is out_terms.append(subterm) # turn it back into a Mul s_t.append(Mul(*out_terms)) else: o_t.append(term) # next try to combine any interior sums for further simplification result = Add(sum_combine(s_t), *o_t) return factor_sum(result, limits=self.limits) def is_convergent(self): r"""Checks for the convergence of a Sum. We divide the study of convergence of infinite sums and products in two parts. First Part: One part is the question whether all the terms are well defined, i.e., they are finite in a sum and also non-zero in a product. Zero is the analogy of (minus) infinity in products as :math:`e^{-\infty} = 0`. Second Part: The second part is the question of convergence after infinities, and zeros in products, have been omitted assuming that their number is finite. This means that we only consider the tail of the sum or product, starting from some point after which all terms are well defined. For example, in a sum of the form: .. math:: \sum_{1 \leq i < \infty} \frac{1}{n^2 + an + b} where a and b are numbers. The routine will return true, even if there are infinities in the term sequence (at most two). An analogous product would be: .. math:: \prod_{1 \leq i < \infty} e^{\frac{1}{n^2 + an + b}} This is how convergence is interpreted. It is concerned with what happens at the limit. Finding the bad terms is another independent matter. Note: It is responsibility of user to see that the sum or product is well defined. There are various tests employed to check the convergence like divergence test, root test, integral test, alternating series test, comparison tests, Dirichlet tests. It returns true if Sum is convergent and false if divergent and NotImplementedError if it can not be checked. References ========== .. [1] https://en.wikipedia.org/wiki/Convergence_tests Examples ======== >>> from sympy import factorial, S, Sum, Symbol, oo >>> n = Symbol('n', integer=True) >>> Sum(n/(n - 1), (n, 4, 7)).is_convergent() True >>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent() False >>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent() False >>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent() True See Also ======== Sum.is_absolutely_convergent() sympy.concrete.products.Product.is_convergent() """ from sympy import Interval, Integral, log, symbols, simplify p, q, r = symbols('p q r', cls=Wild) sym = self.limits[0][0] lower_limit = self.limits[0][1] upper_limit = self.limits[0][2] sequence_term = self.function if len(sequence_term.free_symbols) > 1: raise NotImplementedError("convergence checking for more than one symbol " "containing series is not handled") if lower_limit.is_finite and upper_limit.is_finite: return S.true # transform sym -> -sym and swap the upper_limit = S.Infinity # and lower_limit = - upper_limit if lower_limit is S.NegativeInfinity: if upper_limit is S.Infinity: return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \ Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent() sequence_term = simplify(sequence_term.xreplace({sym: -sym})) lower_limit = -upper_limit upper_limit = S.Infinity sym_ = Dummy(sym.name, integer=True, positive=True) sequence_term = sequence_term.xreplace({sym: sym_}) sym = sym_ interval = Interval(lower_limit, upper_limit) # Piecewise function handle if sequence_term.is_Piecewise: for func, cond in sequence_term.args: # see if it represents something going to oo if cond == True or cond.as_set().sup is S.Infinity: s = Sum(func, (sym, lower_limit, upper_limit)) return s.is_convergent() return S.true ### -------- Divergence test ----------- ### try: lim_val = limit_seq(sequence_term, sym) if lim_val is not None and lim_val.is_zero is False: return S.false except NotImplementedError: pass try: lim_val_abs = limit_seq(abs(sequence_term), sym) if lim_val_abs is not None and lim_val_abs.is_zero is False: return S.false except NotImplementedError: pass order = O(sequence_term, (sym, S.Infinity)) ### --------- p-series test (1/n**p) ---------- ### p_series_test = order.expr.match(sym**p) if p_series_test is not None: if p_series_test[p] < -1: return S.true if p_series_test[p] >= -1: return S.false ### ------------- comparison test ------------- ### # 1/(n**p*log(n)**q*log(log(n))**r) comparison n_log_test = order.expr.match(1/(sym**p*log(sym)**q*log(log(sym))**r)) if n_log_test is not None: if (n_log_test[p] > 1 or (n_log_test[p] == 1 and n_log_test[q] > 1) or (n_log_test[p] == n_log_test[q] == 1 and n_log_test[r] > 1)): return S.true return S.false ### ------------- Limit comparison test -----------### # (1/n) comparison try: lim_comp = limit_seq(sym*sequence_term, sym) if lim_comp is not None and lim_comp.is_number and lim_comp > 0: return S.false except NotImplementedError: pass ### ----------- ratio test ---------------- ### next_sequence_term = sequence_term.xreplace({sym: sym + 1}) ratio = combsimp(powsimp(next_sequence_term/sequence_term)) try: lim_ratio = limit_seq(ratio, sym) if lim_ratio is not None and lim_ratio.is_number: if abs(lim_ratio) > 1: return S.false if abs(lim_ratio) < 1: return S.true except NotImplementedError: pass ### ----------- root test ---------------- ### # lim = Limit(abs(sequence_term)**(1/sym), sym, S.Infinity) try: lim_evaluated = limit_seq(abs(sequence_term)**(1/sym), sym) if lim_evaluated is not None and lim_evaluated.is_number: if lim_evaluated < 1: return S.true if lim_evaluated > 1: return S.false except NotImplementedError: pass ### ------------- alternating series test ----------- ### dict_val = sequence_term.match((-1)**(sym + p)*q) if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval): return S.true ### ------------- integral test -------------- ### check_interval = None maxima = solveset(sequence_term.diff(sym), sym, interval) if not maxima: check_interval = interval elif isinstance(maxima, FiniteSet) and maxima.sup.is_number: check_interval = Interval(maxima.sup, interval.sup) if (check_interval is not None and (is_decreasing(sequence_term, check_interval) or is_decreasing(-sequence_term, check_interval))): integral_val = Integral( sequence_term, (sym, lower_limit, upper_limit)) try: integral_val_evaluated = integral_val.doit() if integral_val_evaluated.is_number: return S(integral_val_evaluated.is_finite) except NotImplementedError: pass ### ----- Dirichlet and bounded times convergent tests ----- ### # TODO # # Dirichlet_test # https://en.wikipedia.org/wiki/Dirichlet%27s_test # # Bounded times convergent test # It is based on comparison theorems for series. # In particular, if the general term of a series can # be written as a product of two terms a_n and b_n # and if a_n is bounded and if Sum(b_n) is absolutely # convergent, then the original series Sum(a_n * b_n) # is absolutely convergent and so convergent. # # The following code can grows like 2**n where n is the # number of args in order.expr # Possibly combined with the potentially slow checks # inside the loop, could make this test extremely slow # for larger summation expressions. if order.expr.is_Mul: args = order.expr.args argset = set(args) ### -------------- Dirichlet tests -------------- ### m = Dummy('m', integer=True) def _dirichlet_test(g_n): try: ing_val = limit_seq(Sum(g_n, (sym, interval.inf, m)).doit(), m) if ing_val is not None and ing_val.is_finite: return S.true except NotImplementedError: pass ### -------- bounded times convergent test ---------### def _bounded_convergent_test(g1_n, g2_n): try: lim_val = limit_seq(g1_n, sym) if lim_val is not None and (lim_val.is_finite or ( isinstance(lim_val, AccumulationBounds) and (lim_val.max - lim_val.min).is_finite)): if Sum(g2_n, (sym, lower_limit, upper_limit)).is_absolutely_convergent(): return S.true except NotImplementedError: pass for n in range(1, len(argset)): for a_tuple in itertools.combinations(args, n): b_set = argset - set(a_tuple) a_n = Mul(*a_tuple) b_n = Mul(*b_set) if is_decreasing(a_n, interval): dirich = _dirichlet_test(b_n) if dirich is not None: return dirich bc_test = _bounded_convergent_test(a_n, b_n) if bc_test is not None: return bc_test _sym = self.limits[0][0] sequence_term = sequence_term.xreplace({sym: _sym}) raise NotImplementedError("The algorithm to find the Sum convergence of %s " "is not yet implemented" % (sequence_term)) def is_absolutely_convergent(self): """ Checks for the absolute convergence of an infinite series. Same as checking convergence of absolute value of sequence_term of an infinite series. References ========== .. [1] https://en.wikipedia.org/wiki/Absolute_convergence Examples ======== >>> from sympy import Sum, Symbol, sin, oo >>> n = Symbol('n', integer=True) >>> Sum((-1)**n, (n, 1, oo)).is_absolutely_convergent() False >>> Sum((-1)**n/n**2, (n, 1, oo)).is_absolutely_convergent() True See Also ======== Sum.is_convergent() """ return Sum(abs(self.function), self.limits).is_convergent() def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True): """ Return an Euler-Maclaurin approximation of self, where m is the number of leading terms to sum directly and n is the number of terms in the tail. With m = n = 0, this is simply the corresponding integral plus a first-order endpoint correction. Returns (s, e) where s is the Euler-Maclaurin approximation and e is the estimated error (taken to be the magnitude of the first omitted term in the tail): >>> from sympy.abc import k, a, b >>> from sympy import Sum >>> Sum(1/k, (k, 2, 5)).doit().evalf() 1.28333333333333 >>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin() >>> s -log(2) + 7/20 + log(5) >>> from sympy import sstr >>> print(sstr((s.evalf(), e.evalf()), full_prec=True)) (1.26629073187415, 0.0175000000000000) The endpoints may be symbolic: >>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin() >>> s -log(a) + log(b) + 1/(2*b) + 1/(2*a) >>> e Abs(1/(12*b**2) - 1/(12*a**2)) If the function is a polynomial of degree at most 2n+1, the Euler-Maclaurin formula becomes exact (and e = 0 is returned): >>> Sum(k, (k, 2, b)).euler_maclaurin() (b**2/2 + b/2 - 1, 0) >>> Sum(k, (k, 2, b)).doit() b**2/2 + b/2 - 1 With a nonzero eps specified, the summation is ended as soon as the remainder term is less than the epsilon. """ from sympy.functions import bernoulli, factorial from sympy.integrals import Integral m = int(m) n = int(n) f = self.function if len(self.limits) != 1: raise ValueError("More than 1 limit") i, a, b = self.limits[0] if (a > b) == True: if a - b == 1: return S.Zero, S.Zero a, b = b + 1, a - 1 f = -f s = S.Zero if m: if b.is_Integer and a.is_Integer: m = min(m, b - a + 1) if not eps or f.is_polynomial(i): for k in range(m): s += f.subs(i, a + k) else: term = f.subs(i, a) if term: test = abs(term.evalf(3)) < eps if test == True: return s, abs(term) elif not (test == False): # a symbolic Relational class, can't go further return term, S.Zero s += term for k in range(1, m): term = f.subs(i, a + k) if abs(term.evalf(3)) < eps and term != 0: return s, abs(term) s += term if b - a + 1 == m: return s, S.Zero a += m x = Dummy('x') I = Integral(f.subs(i, x), (x, a, b)) if eval_integral: I = I.doit() s += I def fpoint(expr): if b is S.Infinity: return expr.subs(i, a), 0 return expr.subs(i, a), expr.subs(i, b) fa, fb = fpoint(f) iterm = (fa + fb)/2 g = f.diff(i) for k in range(1, n + 2): ga, gb = fpoint(g) term = bernoulli(2*k)/factorial(2*k)*(gb - ga) if (eps and term and abs(term.evalf(3)) < eps) or (k > n): break s += term g = g.diff(i, 2, simplify=False) return s + iterm, abs(term) def reverse_order(self, *indices): """ Reverse the order of a limit in a Sum. Usage ===== ``reverse_order(self, *indices)`` reverses some limits in the expression ``self`` which can be either a ``Sum`` or a ``Product``. The selectors in the argument ``indices`` specify some indices whose limits get reversed. These selectors are either variable names or numerical indices counted starting from the inner-most limit tuple. Examples ======== >>> from sympy import Sum >>> from sympy.abc import x, y, a, b, c, d >>> Sum(x, (x, 0, 3)).reverse_order(x) Sum(-x, (x, 4, -1)) >>> Sum(x*y, (x, 1, 5), (y, 0, 6)).reverse_order(x, y) Sum(x*y, (x, 6, 0), (y, 7, -1)) >>> Sum(x, (x, a, b)).reverse_order(x) Sum(-x, (x, b + 1, a - 1)) >>> Sum(x, (x, a, b)).reverse_order(0) Sum(-x, (x, b + 1, a - 1)) While one should prefer variable names when specifying which limits to reverse, the index counting notation comes in handy in case there are several symbols with the same name. >>> S = Sum(x**2, (x, a, b), (x, c, d)) >>> S Sum(x**2, (x, a, b), (x, c, d)) >>> S0 = S.reverse_order(0) >>> S0 Sum(-x**2, (x, b + 1, a - 1), (x, c, d)) >>> S1 = S0.reverse_order(1) >>> S1 Sum(x**2, (x, b + 1, a - 1), (x, d + 1, c - 1)) Of course we can mix both notations: >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1) Sum(x*y, (x, b + 1, a - 1), (y, 6, 1)) >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x) Sum(x*y, (x, b + 1, a - 1), (y, 6, 1)) See Also ======== sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index, reorder_limit, sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder References ========== .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM, Volume 28 Issue 2, April 1981, Pages 305-350 http://dl.acm.org/citation.cfm?doid=322248.322255 """ l_indices = list(indices) for i, indx in enumerate(l_indices): if not isinstance(indx, int): l_indices[i] = self.index(indx) e = 1 limits = [] for i, limit in enumerate(self.limits): l = limit if i in l_indices: e = -e l = (limit[0], limit[2] + 1, limit[1] - 1) limits.append(l) return Sum(e * self.function, *limits) def summation(f, *symbols, **kwargs): r""" Compute the summation of f with respect to symbols. The notation for symbols is similar to the notation used in Integral. summation(f, (i, a, b)) computes the sum of f with respect to i from a to b, i.e., :: b ____ \ ` summation(f, (i, a, b)) = ) f /___, i = a If it cannot compute the sum, it returns an unevaluated Sum object. Repeated sums can be computed by introducing additional symbols tuples:: >>> from sympy import summation, oo, symbols, log >>> i, n, m = symbols('i n m', integer=True) >>> summation(2*i - 1, (i, 1, n)) n**2 >>> summation(1/2**i, (i, 0, oo)) 2 >>> summation(1/log(n)**n, (n, 2, oo)) Sum(log(n)**(-n), (n, 2, oo)) >>> summation(i, (i, 0, n), (n, 0, m)) m**3/6 + m**2/2 + m/3 >>> from sympy.abc import x >>> from sympy import factorial >>> summation(x**n/factorial(n), (n, 0, oo)) exp(x) See Also ======== Sum Product, sympy.concrete.products.product """ return Sum(f, *symbols, **kwargs).doit(deep=False) def telescopic_direct(L, R, n, limits): """Returns the direct summation of the terms of a telescopic sum L is the term with lower index R is the term with higher index n difference between the indexes of L and R For example: >>> from sympy.concrete.summations import telescopic_direct >>> from sympy.abc import k, a, b >>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b)) -1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a """ (i, a, b) = limits s = 0 for m in range(n): s += L.subs(i, a + m) + R.subs(i, b - m) return s def telescopic(L, R, limits): '''Tries to perform the summation using the telescopic property return None if not possible ''' (i, a, b) = limits if L.is_Add or R.is_Add: return None # We want to solve(L.subs(i, i + m) + R, m) # First we try a simple match since this does things that # solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails k = Wild("k") sol = (-R).match(L.subs(i, i + k)) s = None if sol and k in sol: s = sol[k] if not (s.is_Integer and L.subs(i, i + s) == -R): # sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x})) s = None # But there are things that match doesn't do that solve # can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1 if s is None: m = Dummy('m') try: sol = solve(L.subs(i, i + m) + R, m) or [] except NotImplementedError: return None sol = [si for si in sol if si.is_Integer and (L.subs(i, i + si) + R).expand().is_zero] if len(sol) != 1: return None s = sol[0] if s < 0: return telescopic_direct(R, L, abs(s), (i, a, b)) elif s > 0: return telescopic_direct(L, R, s, (i, a, b)) def eval_sum(f, limits): from sympy.concrete.delta import deltasummation, _has_simple_delta from sympy.functions import KroneckerDelta (i, a, b) = limits if f.is_zero: return S.Zero if i not in f.free_symbols: return f*(b - a + 1) if a == b: return f.subs(i, a) if isinstance(f, Piecewise): if not any(i in arg.args[1].free_symbols for arg in f.args): # Piecewise conditions do not depend on the dummy summation variable, # therefore we can fold: Sum(Piecewise((e, c), ...), limits) # --> Piecewise((Sum(e, limits), c), ...) newargs = [] for arg in f.args: newexpr = eval_sum(arg.expr, limits) if newexpr is None: return None newargs.append((newexpr, arg.cond)) return f.func(*newargs) if f.has(KroneckerDelta): f = f.replace( lambda x: isinstance(x, Sum), lambda x: x.factor() ) if _has_simple_delta(f, limits[0]): return deltasummation(f, limits) dif = b - a definite = dif.is_Integer # Doing it directly may be faster if there are very few terms. if definite and (dif < 100): return eval_sum_direct(f, (i, a, b)) if isinstance(f, Piecewise): return None # Try to do it symbolically. Even when the number of terms is known, # this can save time when b-a is big. # We should try to transform to partial fractions value = eval_sum_symbolic(f.expand(), (i, a, b)) if value is not None: return value # Do it directly if definite: return eval_sum_direct(f, (i, a, b)) def eval_sum_direct(expr, limits): """ Evaluate expression directly, but perform some simple checks first to possibly result in a smaller expression and faster execution. """ from sympy.core import Add (i, a, b) = limits dif = b - a # Linearity if expr.is_Mul: # Try factor out everything not including i without_i, with_i = expr.as_independent(i) if without_i != 1: s = eval_sum_direct(with_i, (i, a, b)) if s: r = without_i*s if r is not S.NaN: return r else: # Try term by term L, R = expr.as_two_terms() if not L.has(i): sR = eval_sum_direct(R, (i, a, b)) if sR: return L*sR if not R.has(i): sL = eval_sum_direct(L, (i, a, b)) if sL: return sL*R try: expr = apart(expr, i) # see if it becomes an Add except PolynomialError: pass if expr.is_Add: # Try factor out everything not including i without_i, with_i = expr.as_independent(i) if without_i != 0: s = eval_sum_direct(with_i, (i, a, b)) if s: r = without_i*(dif + 1) + s if r is not S.NaN: return r else: # Try term by term L, R = expr.as_two_terms() lsum = eval_sum_direct(L, (i, a, b)) rsum = eval_sum_direct(R, (i, a, b)) if None not in (lsum, rsum): r = lsum + rsum if r is not S.NaN: return r return Add(*[expr.subs(i, a + j) for j in range(dif + 1)]) def eval_sum_symbolic(f, limits): from sympy.functions import harmonic, bernoulli f_orig = f (i, a, b) = limits if not f.has(i): return f*(b - a + 1) # Linearity if f.is_Mul: # Try factor out everything not including i without_i, with_i = f.as_independent(i) if without_i != 1: s = eval_sum_symbolic(with_i, (i, a, b)) if s: r = without_i*s if r is not S.NaN: return r else: # Try term by term L, R = f.as_two_terms() if not L.has(i): sR = eval_sum_symbolic(R, (i, a, b)) if sR: return L*sR if not R.has(i): sL = eval_sum_symbolic(L, (i, a, b)) if sL: return sL*R try: f = apart(f, i) # see if it becomes an Add except PolynomialError: pass if f.is_Add: L, R = f.as_two_terms() lrsum = telescopic(L, R, (i, a, b)) if lrsum: return lrsum # Try factor out everything not including i without_i, with_i = f.as_independent(i) if without_i != 0: s = eval_sum_symbolic(with_i, (i, a, b)) if s: r = without_i*(b - a + 1) + s if r is not S.NaN: return r else: # Try term by term lsum = eval_sum_symbolic(L, (i, a, b)) rsum = eval_sum_symbolic(R, (i, a, b)) if None not in (lsum, rsum): r = lsum + rsum if r is not S.NaN: return r # Polynomial terms with Faulhaber's formula n = Wild('n') result = f.match(i**n) if result is not None: n = result[n] if n.is_Integer: if n >= 0: if (b is S.Infinity and not a is S.NegativeInfinity) or \ (a is S.NegativeInfinity and not b is S.Infinity): return S.Infinity return ((bernoulli(n + 1, b + 1) - bernoulli(n + 1, a))/(n + 1)).expand() elif a.is_Integer and a >= 1: if n == -1: return harmonic(b) - harmonic(a - 1) else: return harmonic(b, abs(n)) - harmonic(a - 1, abs(n)) if not (a.has(S.Infinity, S.NegativeInfinity) or b.has(S.Infinity, S.NegativeInfinity)): # Geometric terms c1 = Wild('c1', exclude=[i]) c2 = Wild('c2', exclude=[i]) c3 = Wild('c3', exclude=[i]) wexp = Wild('wexp') # Here we first attempt powsimp on f for easier matching with the # exponential pattern, and attempt expansion on the exponent for easier # matching with the linear pattern. e = f.powsimp().match(c1 ** wexp) if e is not None: e_exp = e.pop(wexp).expand().match(c2*i + c3) if e_exp is not None: e.update(e_exp) if e is not None: p = (c1**c3).subs(e) q = (c1**c2).subs(e) r = p*(q**a - q**(b + 1))/(1 - q) l = p*(b - a + 1) return Piecewise((l, Eq(q, S.One)), (r, True)) r = gosper_sum(f, (i, a, b)) if isinstance(r, (Mul,Add)): from sympy import ordered, Tuple non_limit = r.free_symbols - Tuple(*limits[1:]).free_symbols den = denom(together(r)) den_sym = non_limit & den.free_symbols args = [] for v in ordered(den_sym): try: s = solve(den, v) m = Eq(v, s[0]) if s else S.false if m != False: args.append((Sum(f_orig.subs(*m.args), limits).doit(), m)) break except NotImplementedError: continue args.append((r, True)) return Piecewise(*args) if not r in (None, S.NaN): return r h = eval_sum_hyper(f_orig, (i, a, b)) if h is not None: return h factored = f_orig.factor() if factored != f_orig: return eval_sum_symbolic(factored, (i, a, b)) def _eval_sum_hyper(f, i, a): """ Returns (res, cond). Sums from a to oo. """ from sympy.functions import hyper from sympy.simplify import hyperexpand, hypersimp, fraction, simplify from sympy.polys.polytools import Poly, factor from sympy.core.numbers import Float if a != 0: return _eval_sum_hyper(f.subs(i, i + a), i, 0) if f.subs(i, 0) == 0: if simplify(f.subs(i, Dummy('i', integer=True, positive=True))) == 0: return S.Zero, True return _eval_sum_hyper(f.subs(i, i + 1), i, 0) hs = hypersimp(f, i) if hs is None: return None if isinstance(hs, Float): from sympy.simplify.simplify import nsimplify hs = nsimplify(hs) numer, denom = fraction(factor(hs)) top, topl = numer.as_coeff_mul(i) bot, botl = denom.as_coeff_mul(i) ab = [top, bot] factors = [topl, botl] params = [[], []] for k in range(2): for fac in factors[k]: mul = 1 if fac.is_Pow: mul = fac.exp fac = fac.base if not mul.is_Integer: return None p = Poly(fac, i) if p.degree() != 1: return None m, n = p.all_coeffs() ab[k] *= m**mul params[k] += [n/m]*mul # Add "1" to numerator parameters, to account for implicit n! in # hypergeometric series. ap = params[0] + [1] bq = params[1] x = ab[0]/ab[1] h = hyper(ap, bq, x) f = combsimp(f) return f.subs(i, 0)*hyperexpand(h), h.convergence_statement def eval_sum_hyper(f, i_a_b): from sympy.logic.boolalg import And i, a, b = i_a_b if (b - a).is_Integer: # We are never going to do better than doing the sum in the obvious way return None old_sum = Sum(f, (i, a, b)) if b != S.Infinity: if a is S.NegativeInfinity: res = _eval_sum_hyper(f.subs(i, -i), i, -b) if res is not None: return Piecewise(res, (old_sum, True)) else: res1 = _eval_sum_hyper(f, i, a) res2 = _eval_sum_hyper(f, i, b + 1) if res1 is None or res2 is None: return None (res1, cond1), (res2, cond2) = res1, res2 cond = And(cond1, cond2) if cond == False: return None return Piecewise((res1 - res2, cond), (old_sum, True)) if a is S.NegativeInfinity: res1 = _eval_sum_hyper(f.subs(i, -i), i, 1) res2 = _eval_sum_hyper(f, i, 0) if res1 is None or res2 is None: return None res1, cond1 = res1 res2, cond2 = res2 cond = And(cond1, cond2) if cond == False or cond.as_set() == S.EmptySet: return None return Piecewise((res1 + res2, cond), (old_sum, True)) # Now b == oo, a != -oo res = _eval_sum_hyper(f, i, a) if res is not None: r, c = res if c == False: if r.is_number: f = f.subs(i, Dummy('i', integer=True, positive=True) + a) if f.is_positive or f.is_zero: return S.Infinity elif f.is_negative: return S.NegativeInfinity return None return Piecewise(res, (old_sum, True)) def _eval_matrix_sum(expression): f = expression.function for n, limit in enumerate(expression.limits): i, a, b = limit dif = b - a if dif.is_Integer: if (dif < 0) == True: a, b = b + 1, a - 1 f = -f newf = eval_sum_direct(f, (i, a, b)) if newf is not None: return newf.doit() def _dummy_with_inherited_properties_concrete(limits): """ Return a Dummy symbol that inherits as much assumptions based on the provided symbol and limits as possible. If the symbol already has all possible assumptions, return None. """ x, a, b = limits l = [a, b] assumptions_to_consider = ['extended_nonnegative', 'nonnegative', 'extended_nonpositive', 'nonpositive', 'extended_positive', 'positive', 'extended_negative', 'negative', 'integer', 'rational', 'finite', 'zero', 'real', 'extended_real'] assumptions_to_keep = {} assumptions_to_add = {} for assum in assumptions_to_consider: assum_true = x._assumptions.get(assum, None) if assum_true: assumptions_to_keep[assum] = True elif all([getattr(i, 'is_' + assum) for i in l]): assumptions_to_add[assum] = True if assumptions_to_add: assumptions_to_keep.update(assumptions_to_add) return Dummy('d', **assumptions_to_keep) else: return None
0a77cc2b14dc769d45169292fd06a5da22959a0819998e65eda6a266956c4932
"""Tools to assist importing optional external modules.""" from __future__ import print_function, division import sys from distutils.version import LooseVersion # Override these in the module to change the default warning behavior. # For example, you might set both to False before running the tests so that # warnings are not printed to the console, or set both to True for debugging. WARN_NOT_INSTALLED = None # Default is False WARN_OLD_VERSION = None # Default is True def __sympy_debug(): # helper function from sympy/__init__.py # We don't just import SYMPY_DEBUG from that file because we don't want to # import all of sympy just to use this module. import os debug_str = os.getenv('SYMPY_DEBUG', 'False') if debug_str in ('True', 'False'): return eval(debug_str) else: raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" % debug_str) if __sympy_debug(): WARN_OLD_VERSION = True WARN_NOT_INSTALLED = True def import_module(module, min_module_version=None, min_python_version=None, warn_not_installed=None, warn_old_version=None, module_version_attr='__version__', module_version_attr_call_args=None, import_kwargs={}, catch=()): """ Import and return a module if it is installed. If the module is not installed, it returns None. A minimum version for the module can be given as the keyword argument min_module_version. This should be comparable against the module version. By default, module.__version__ is used to get the module version. To override this, set the module_version_attr keyword argument. If the attribute of the module to get the version should be called (e.g., module.version()), then set module_version_attr_call_args to the args such that module.module_version_attr(*module_version_attr_call_args) returns the module's version. If the module version is less than min_module_version using the Python < comparison, None will be returned, even if the module is installed. You can use this to keep from importing an incompatible older version of a module. You can also specify a minimum Python version by using the min_python_version keyword argument. This should be comparable against sys.version_info. If the keyword argument warn_not_installed is set to True, the function will emit a UserWarning when the module is not installed. If the keyword argument warn_old_version is set to True, the function will emit a UserWarning when the library is installed, but cannot be imported because of the min_module_version or min_python_version options. Note that because of the way warnings are handled, a warning will be emitted for each module only once. You can change the default warning behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and WARN_OLD_VERSION is True. This function uses __import__() to import the module. To pass additional options to __import__(), use the import_kwargs keyword argument. For example, to import a submodule A.B, you must pass a nonempty fromlist option to __import__. See the docstring of __import__(). This catches ImportError to determine if the module is not installed. To catch additional errors, pass them as a tuple to the catch keyword argument. Examples ======== >>> from sympy.external import import_module >>> numpy = import_module('numpy') >>> numpy = import_module('numpy', min_python_version=(2, 7), ... warn_old_version=False) >>> numpy = import_module('numpy', min_module_version='1.5', ... warn_old_version=False) # numpy.__version__ is a string >>> # gmpy does not have __version__, but it does have gmpy.version() >>> gmpy = import_module('gmpy', min_module_version='1.14', ... module_version_attr='version', module_version_attr_call_args=(), ... warn_old_version=False) >>> # To import a submodule, you must pass a nonempty fromlist to >>> # __import__(). The values do not matter. >>> p3 = import_module('mpl_toolkits.mplot3d', ... import_kwargs={'fromlist':['something']}) >>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened >>> matplotlib = import_module('matplotlib', ... import_kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,)) """ # keyword argument overrides default, and global variable overrides # keyword argument. warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None else warn_old_version or True) warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None else warn_not_installed or False) import warnings # Check Python first so we don't waste time importing a module we can't use if min_python_version: if sys.version_info < min_python_version: if warn_old_version: warnings.warn("Python version is too old to use %s " "(%s or newer required)" % ( module, '.'.join(map(str, min_python_version))), UserWarning, stacklevel=2) return # PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it if module == 'numpy' and '__pypy__' in sys.builtin_module_names: return try: mod = __import__(module, **import_kwargs) ## there's something funny about imports with matplotlib and py3k. doing ## from matplotlib import collections ## gives python's stdlib collections module. explicitly re-importing ## the module fixes this. from_list = import_kwargs.get('fromlist', tuple()) for submod in from_list: if submod == 'collections' and mod.__name__ == 'matplotlib': __import__(module + '.' + submod) except ImportError: if warn_not_installed: warnings.warn("%s module is not installed" % module, UserWarning, stacklevel=2) return except catch as e: if warn_not_installed: warnings.warn( "%s module could not be used (%s)" % (module, repr(e)), stacklevel=2) return if min_module_version: modversion = getattr(mod, module_version_attr) if module_version_attr_call_args is not None: modversion = modversion(*module_version_attr_call_args) if LooseVersion(modversion) < LooseVersion(min_module_version): if warn_old_version: # Attempt to create a pretty string version of the version if isinstance(min_module_version, str): verstr = min_module_version elif isinstance(min_module_version, (tuple, list)): verstr = '.'.join(map(str, min_module_version)) else: # Either don't know what this is. Hopefully # it's something that has a nice str version, like an int. verstr = str(min_module_version) warnings.warn("%s version is too old to use " "(%s or newer required)" % (module, verstr), UserWarning, stacklevel=2) return return mod
8360a2c86e80cc82a8f1b73a87c1efefe2f2eb0b7fde6d5035d4c07177ee9080
from __future__ import print_function, division from sympy import Integer from sympy.core import Symbol from sympy.utilities import public @public def approximants(l, X=Symbol('x'), simplify=False): """ Return a generator for consecutive Pade approximants for a series. It can also be used for computing the rational generating function of a series when possible, since the last approximant returned by the generator will be the generating function (if any). The input list can contain more complex expressions than integer or rational numbers; symbols may also be involved in the computation. An example below show how to compute the generating function of the whole Pascal triangle. The generator can be asked to apply the sympy.simplify function on each generated term, which will make the computation slower; however it may be useful when symbols are involved in the expressions. Examples ======== >>> from sympy.series import approximants >>> from sympy import lucas, fibonacci, symbols, binomial >>> g = [lucas(k) for k in range(16)] >>> [e for e in approximants(g)] [2, -4/(x - 2), (5*x - 2)/(3*x - 1), (x - 2)/(x**2 + x - 1)] >>> h = [fibonacci(k) for k in range(16)] >>> [e for e in approximants(h)] [x, -x/(x - 1), (x**2 - x)/(2*x - 1), -x/(x**2 + x - 1)] >>> x, t = symbols("x,t") >>> p=[sum(binomial(k,i)*x**i for i in range(k+1)) for k in range(16)] >>> y = approximants(p, t) >>> for k in range(3): print(next(y)) 1 (x + 1)/((-x - 1)*(t*(x + 1) + (x + 1)/(-x - 1))) nan >>> y = approximants(p, t, simplify=True) >>> for k in range(3): print(next(y)) 1 -1/(t*(x + 1) - 1) nan See Also ======== See function sympy.concrete.guess.guess_generating_function_rational and function mpmath.pade """ p1, q1 = [Integer(1)], [Integer(0)] p2, q2 = [Integer(0)], [Integer(1)] while len(l): b = 0 while l[b]==0: b += 1 if b == len(l): return m = [Integer(1)/l[b]] for k in range(b+1, len(l)): s = 0 for j in range(b, k): s -= l[j+1] * m[b-j-1] m.append(s/l[b]) l = m a, l[0] = l[0], 0 p = [0] * max(len(p2), b+len(p1)) q = [0] * max(len(q2), b+len(q1)) for k in range(len(p2)): p[k] = a*p2[k] for k in range(b, b+len(p1)): p[k] += p1[k-b] for k in range(len(q2)): q[k] = a*q2[k] for k in range(b, b+len(q1)): q[k] += q1[k-b] while p[-1]==0: p.pop() while q[-1]==0: q.pop() p1, p2 = p2, p q1, q2 = q2, q # yield result from sympy import denom, lcm, simplify as simp c = 1 for x in p: c = lcm(c, denom(x)) for x in q: c = lcm(c, denom(x)) out = ( sum(c*e*X**k for k, e in enumerate(p)) / sum(c*e*X**k for k, e in enumerate(q)) ) if simplify: yield(simp(out)) else: yield out return
518ace8dd5eb90c3ff08818c08effcea36e937abeb1274155f50a578ae02ec22
""" Contains the base class for series Made using sequences in mind """ from __future__ import print_function, division from sympy.core.expr import Expr from sympy.core.singleton import S from sympy.core.cache import cacheit class SeriesBase(Expr): """Base Class for series""" @property def interval(self): """The interval on which the series is defined""" raise NotImplementedError("(%s).interval" % self) @property def start(self): """The starting point of the series. This point is included""" raise NotImplementedError("(%s).start" % self) @property def stop(self): """The ending point of the series. This point is included""" raise NotImplementedError("(%s).stop" % self) @property def length(self): """Length of the series expansion""" raise NotImplementedError("(%s).length" % self) @property def variables(self): """Returns a tuple of variables that are bounded""" return () @property def free_symbols(self): """ This method returns the symbols in the object, excluding those that take on a specific value (i.e. the dummy symbols). """ return (set(j for i in self.args for j in i.free_symbols) .difference(self.variables)) @cacheit def term(self, pt): """Term at point pt of a series""" if pt < self.start or pt > self.stop: raise IndexError("Index %s out of bounds %s" % (pt, self.interval)) return self._eval_term(pt) def _eval_term(self, pt): raise NotImplementedError("The _eval_term method should be added to" "%s to return series term so it is available" "when 'term' calls it." % self.func) def _ith_point(self, i): """ Returns the i'th point of a series If start point is negative infinity, point is returned from the end. Assumes the first point to be indexed zero. Examples ======== TODO """ if self.start is S.NegativeInfinity: initial = self.stop step = -1 else: initial = self.start step = 1 return initial + i*step def __iter__(self): i = 0 while i < self.length: pt = self._ith_point(i) yield self.term(pt) i += 1 def __getitem__(self, index): if isinstance(index, int): index = self._ith_point(index) return self.term(index) elif isinstance(index, slice): start, stop = index.start, index.stop if start is None: start = 0 if stop is None: stop = self.length return [self.term(self._ith_point(i)) for i in range(start, stop, index.step or 1)]
ea7a78849ec568575dd8def498ec16ac14d5025f1586f3af690ccaf38f51e329
""" Convergence acceleration / extrapolation methods for series and sequences. References: Carl M. Bender & Steven A. Orszag, "Advanced Mathematical Methods for Scientists and Engineers: Asymptotic Methods and Perturbation Theory", Springer 1999. (Shanks transformation: pp. 368-375, Richardson extrapolation: pp. 375-377.) """ from __future__ import print_function, division from sympy import factorial, Integer, S def richardson(A, k, n, N): """ Calculate an approximation for lim k->oo A(k) using Richardson extrapolation with the terms A(n), A(n+1), ..., A(n+N+1). Choosing N ~= 2*n often gives good results. A simple example is to calculate exp(1) using the limit definition. This limit converges slowly; n = 100 only produces two accurate digits: >>> from sympy.abc import n >>> e = (1 + 1/n)**n >>> print(round(e.subs(n, 100).evalf(), 10)) 2.7048138294 Richardson extrapolation with 11 appropriately chosen terms gives a value that is accurate to the indicated precision: >>> from sympy import E >>> from sympy.series.acceleration import richardson >>> print(round(richardson(e, n, 10, 20).evalf(), 10)) 2.7182818285 >>> print(round(E.evalf(), 10)) 2.7182818285 Another useful application is to speed up convergence of series. Computing 100 terms of the zeta(2) series 1/k**2 yields only two accurate digits: >>> from sympy.abc import k, n >>> from sympy import Sum >>> A = Sum(k**-2, (k, 1, n)) >>> print(round(A.subs(n, 100).evalf(), 10)) 1.6349839002 Richardson extrapolation performs much better: >>> from sympy import pi >>> print(round(richardson(A, n, 10, 20).evalf(), 10)) 1.6449340668 >>> print(round(((pi**2)/6).evalf(), 10)) # Exact value 1.6449340668 """ s = S.Zero for j in range(0, N + 1): s += A.subs(k, Integer(n + j)).doit() * (n + j)**N * (-1)**(j + N) / \ (factorial(j) * factorial(N - j)) return s def shanks(A, k, n, m=1): """ Calculate an approximation for lim k->oo A(k) using the n-term Shanks transformation S(A)(n). With m > 1, calculate the m-fold recursive Shanks transformation S(S(...S(A)...))(n). The Shanks transformation is useful for summing Taylor series that converge slowly near a pole or singularity, e.g. for log(2): >>> from sympy.abc import k, n >>> from sympy import Sum, Integer >>> from sympy.series.acceleration import shanks >>> A = Sum(Integer(-1)**(k+1) / k, (k, 1, n)) >>> print(round(A.subs(n, 100).doit().evalf(), 10)) 0.6881721793 >>> print(round(shanks(A, n, 25).evalf(), 10)) 0.6931396564 >>> print(round(shanks(A, n, 25, 5).evalf(), 10)) 0.6931471806 The correct value is 0.6931471805599453094172321215. """ table = [A.subs(k, Integer(j)).doit() for j in range(n + m + 2)] table2 = table[:] for i in range(1, m + 1): for j in range(i, n + m + 1): x, y, z = table[j - 1], table[j], table[j + 1] table2[j] = (z*x - y**2) / (z + x - 2*y) table = table2[:] return table[n]
eba227c0bc0ff600d43ff83ddea2da7505d1dcee8d4f1e1bb7fed6fcdcbfdfd4
from __future__ import print_function, division from sympy.core.basic import Basic from sympy.core.cache import cacheit from sympy.core.compatibility import is_sequence, iterable, ordered from sympy.core.containers import Tuple from sympy.core.decorators import call_highest_priority from sympy.core.parameters import global_parameters from sympy.core.function import AppliedUndef from sympy.core.mul import Mul from sympy.core.numbers import Integer from sympy.core.relational import Eq from sympy.core.singleton import S, Singleton from sympy.core.symbol import Dummy, Symbol, Wild from sympy.core.sympify import sympify from sympy.polys import lcm, factor from sympy.sets.sets import Interval, Intersection from sympy.simplify import simplify from sympy.tensor.indexed import Idx from sympy.utilities.iterables import flatten from sympy import expand ############################################################################### # SEQUENCES # ############################################################################### class SeqBase(Basic): """Base class for sequences""" is_commutative = True _op_priority = 15 @staticmethod def _start_key(expr): """Return start (if possible) else S.Infinity. adapted from Set._infimum_key """ try: start = expr.start except (NotImplementedError, AttributeError, ValueError): start = S.Infinity return start def _intersect_interval(self, other): """Returns start and stop. Takes intersection over the two intervals. """ interval = Intersection(self.interval, other.interval) return interval.inf, interval.sup @property def gen(self): """Returns the generator for the sequence""" raise NotImplementedError("(%s).gen" % self) @property def interval(self): """The interval on which the sequence is defined""" raise NotImplementedError("(%s).interval" % self) @property def start(self): """The starting point of the sequence. This point is included""" raise NotImplementedError("(%s).start" % self) @property def stop(self): """The ending point of the sequence. This point is included""" raise NotImplementedError("(%s).stop" % self) @property def length(self): """Length of the sequence""" raise NotImplementedError("(%s).length" % self) @property def variables(self): """Returns a tuple of variables that are bounded""" return () @property def free_symbols(self): """ This method returns the symbols in the object, excluding those that take on a specific value (i.e. the dummy symbols). Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n, m >>> SeqFormula(m*n**2, (n, 0, 5)).free_symbols {m} """ return (set(j for i in self.args for j in i.free_symbols .difference(self.variables))) @cacheit def coeff(self, pt): """Returns the coefficient at point pt""" if pt < self.start or pt > self.stop: raise IndexError("Index %s out of bounds %s" % (pt, self.interval)) return self._eval_coeff(pt) def _eval_coeff(self, pt): raise NotImplementedError("The _eval_coeff method should be added to" "%s to return coefficient so it is available" "when coeff calls it." % self.func) def _ith_point(self, i): """Returns the i'th point of a sequence. If start point is negative infinity, point is returned from the end. Assumes the first point to be indexed zero. Examples ========= >>> from sympy import oo >>> from sympy.series.sequences import SeqPer bounded >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(0) -10 >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(5) -5 End is at infinity >>> SeqPer((1, 2, 3), (0, oo))._ith_point(5) 5 Starts at negative infinity >>> SeqPer((1, 2, 3), (-oo, 0))._ith_point(5) -5 """ if self.start is S.NegativeInfinity: initial = self.stop else: initial = self.start if self.start is S.NegativeInfinity: step = -1 else: step = 1 return initial + i*step def _add(self, other): """ Should only be used internally. self._add(other) returns a new, term-wise added sequence if self knows how to add with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqAdd` class. """ return None def _mul(self, other): """ Should only be used internally. self._mul(other) returns a new, term-wise multiplied sequence if self knows how to multiply with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqMul` class. """ return None def coeff_mul(self, other): """ Should be used when ``other`` is not a sequence. Should be defined to define custom behaviour. Examples ======== >>> from sympy import S, oo, SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2).coeff_mul(2) SeqFormula(2*n**2, (n, 0, oo)) Notes ===== '*' defines multiplication of sequences with sequences only. """ return Mul(self, other) def __add__(self, other): """Returns the term-wise addition of 'self' and 'other'. ``other`` should be a sequence. Examples ======== >>> from sympy import S, oo, SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) + SeqFormula(n**3) SeqFormula(n**3 + n**2, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot add sequence and %s' % type(other)) return SeqAdd(self, other) @call_highest_priority('__add__') def __radd__(self, other): return self + other def __sub__(self, other): """Returns the term-wise subtraction of 'self' and 'other'. ``other`` should be a sequence. Examples ======== >>> from sympy import S, oo, SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) - (SeqFormula(n)) SeqFormula(n**2 - n, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot subtract sequence and %s' % type(other)) return SeqAdd(self, -other) @call_highest_priority('__sub__') def __rsub__(self, other): return (-self) + other def __neg__(self): """Negates the sequence. Examples ======== >>> from sympy import S, oo, SeqFormula >>> from sympy.abc import n >>> -SeqFormula(n**2) SeqFormula(-n**2, (n, 0, oo)) """ return self.coeff_mul(-1) def __mul__(self, other): """Returns the term-wise multiplication of 'self' and 'other'. ``other`` should be a sequence. For ``other`` not being a sequence see :func:`coeff_mul` method. Examples ======== >>> from sympy import S, oo, SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) * (SeqFormula(n)) SeqFormula(n**3, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot multiply sequence and %s' % type(other)) return SeqMul(self, other) @call_highest_priority('__mul__') def __rmul__(self, other): return self * other def __iter__(self): for i in range(self.length): pt = self._ith_point(i) yield self.coeff(pt) def __getitem__(self, index): if isinstance(index, int): index = self._ith_point(index) return self.coeff(index) elif isinstance(index, slice): start, stop = index.start, index.stop if start is None: start = 0 if stop is None: stop = self.length return [self.coeff(self._ith_point(i)) for i in range(start, stop, index.step or 1)] def find_linear_recurrence(self,n,d=None,gfvar=None): r""" Finds the shortest linear recurrence that satisfies the first n terms of sequence of order `\leq` n/2 if possible. If d is specified, find shortest linear recurrence of order `\leq` min(d, n/2) if possible. Returns list of coefficients ``[b(1), b(2), ...]`` corresponding to the recurrence relation ``x(n) = b(1)*x(n-1) + b(2)*x(n-2) + ...`` Returns ``[]`` if no recurrence is found. If gfvar is specified, also returns ordinary generating function as a function of gfvar. Examples ======== >>> from sympy import sequence, sqrt, oo, lucas >>> from sympy.abc import n, x, y >>> sequence(n**2).find_linear_recurrence(10, 2) [] >>> sequence(n**2).find_linear_recurrence(10) [3, -3, 1] >>> sequence(2**n).find_linear_recurrence(10) [2] >>> sequence(23*n**4+91*n**2).find_linear_recurrence(10) [5, -10, 10, -5, 1] >>> sequence(sqrt(5)*(((1 + sqrt(5))/2)**n - (-(1 + sqrt(5))/2)**(-n))/5).find_linear_recurrence(10) [1, 1] >>> sequence(x+y*(-2)**(-n), (n, 0, oo)).find_linear_recurrence(30) [1/2, 1/2] >>> sequence(3*5**n + 12).find_linear_recurrence(20,gfvar=x) ([6, -5], 3*(5 - 21*x)/((x - 1)*(5*x - 1))) >>> sequence(lucas(n)).find_linear_recurrence(15,gfvar=x) ([1, 1], (x - 2)/(x**2 + x - 1)) """ from sympy.matrices import Matrix x = [simplify(expand(t)) for t in self[:n]] lx = len(x) if d is None: r = lx//2 else: r = min(d,lx//2) coeffs = [] for l in range(1, r+1): l2 = 2*l mlist = [] for k in range(l): mlist.append(x[k:k+l]) m = Matrix(mlist) if m.det() != 0: y = simplify(m.LUsolve(Matrix(x[l:l2]))) if lx == l2: coeffs = flatten(y[::-1]) break mlist = [] for k in range(l,lx-l): mlist.append(x[k:k+l]) m = Matrix(mlist) if m*y == Matrix(x[l2:]): coeffs = flatten(y[::-1]) break if gfvar is None: return coeffs else: l = len(coeffs) if l == 0: return [], None else: n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l for i in range(l-1): n += x[i]*gfvar**i for j in range(l-i-1): n -= coeffs[i]*x[j]*gfvar**(i+j+1) d -= coeffs[i]*gfvar**(i+1) return coeffs, simplify(factor(n)/factor(d)) class EmptySequence(SeqBase, metaclass=Singleton): """Represents an empty sequence. The empty sequence is also available as a singleton as ``S.EmptySequence``. Examples ======== >>> from sympy import EmptySequence, SeqPer, oo >>> from sympy.abc import x >>> EmptySequence EmptySequence >>> SeqPer((1, 2), (x, 0, 10)) + EmptySequence SeqPer((1, 2), (x, 0, 10)) >>> SeqPer((1, 2)) * EmptySequence EmptySequence >>> EmptySequence.coeff_mul(-1) EmptySequence """ @property def interval(self): return S.EmptySet @property def length(self): return S.Zero def coeff_mul(self, coeff): """See docstring of SeqBase.coeff_mul""" return self def __iter__(self): return iter([]) class SeqExpr(SeqBase): """Sequence expression class. Various sequences should inherit from this class. Examples ======== >>> from sympy.series.sequences import SeqExpr >>> from sympy.abc import x >>> s = SeqExpr((1, 2, 3), (x, 0, 10)) >>> s.gen (1, 2, 3) >>> s.interval Interval(0, 10) >>> s.length 11 See Also ======== sympy.series.sequences.SeqPer sympy.series.sequences.SeqFormula """ @property def gen(self): return self.args[0] @property def interval(self): return Interval(self.args[1][1], self.args[1][2]) @property def start(self): return self.interval.inf @property def stop(self): return self.interval.sup @property def length(self): return self.stop - self.start + 1 @property def variables(self): return (self.args[1][0],) class SeqPer(SeqExpr): """Represents a periodic sequence. The elements are repeated after a given period. Examples ======== >>> from sympy import SeqPer, oo >>> from sympy.abc import k >>> s = SeqPer((1, 2, 3), (0, 5)) >>> s.periodical (1, 2, 3) >>> s.period 3 For value at a particular point >>> s.coeff(3) 1 supports slicing >>> s[:] [1, 2, 3, 1, 2, 3] iterable >>> list(s) [1, 2, 3, 1, 2, 3] sequence starts from negative infinity >>> SeqPer((1, 2, 3), (-oo, 0))[0:6] [1, 2, 3, 1, 2, 3] Periodic formulas >>> SeqPer((k, k**2, k**3), (k, 0, oo))[0:6] [0, 1, 8, 3, 16, 125] See Also ======== sympy.series.sequences.SeqFormula """ def __new__(cls, periodical, limits=None): periodical = sympify(periodical) def _find_x(periodical): free = periodical.free_symbols if len(periodical.free_symbols) == 1: return free.pop() else: return Dummy('k') x, start, stop = None, None, None if limits is None: x, start, stop = _find_x(periodical), 0, S.Infinity if is_sequence(limits, Tuple): if len(limits) == 3: x, start, stop = limits elif len(limits) == 2: x = _find_x(periodical) start, stop = limits if not isinstance(x, (Symbol, Idx)) or start is None or stop is None: raise ValueError('Invalid limits given: %s' % str(limits)) if start is S.NegativeInfinity and stop is S.Infinity: raise ValueError("Both the start and end value" "cannot be unbounded") limits = sympify((x, start, stop)) if is_sequence(periodical, Tuple): periodical = sympify(tuple(flatten(periodical))) else: raise ValueError("invalid period %s should be something " "like e.g (1, 2) " % periodical) if Interval(limits[1], limits[2]) is S.EmptySet: return S.EmptySequence return Basic.__new__(cls, periodical, limits) @property def period(self): return len(self.gen) @property def periodical(self): return self.gen def _eval_coeff(self, pt): if self.start is S.NegativeInfinity: idx = (self.stop - pt) % self.period else: idx = (pt - self.start) % self.period return self.periodical[idx].subs(self.variables[0], pt) def _add(self, other): """See docstring of SeqBase._add""" if isinstance(other, SeqPer): per1, lper1 = self.periodical, self.period per2, lper2 = other.periodical, other.period per_length = lcm(lper1, lper2) new_per = [] for x in range(per_length): ele1 = per1[x % lper1] ele2 = per2[x % lper2] new_per.append(ele1 + ele2) start, stop = self._intersect_interval(other) return SeqPer(new_per, (self.variables[0], start, stop)) def _mul(self, other): """See docstring of SeqBase._mul""" if isinstance(other, SeqPer): per1, lper1 = self.periodical, self.period per2, lper2 = other.periodical, other.period per_length = lcm(lper1, lper2) new_per = [] for x in range(per_length): ele1 = per1[x % lper1] ele2 = per2[x % lper2] new_per.append(ele1 * ele2) start, stop = self._intersect_interval(other) return SeqPer(new_per, (self.variables[0], start, stop)) def coeff_mul(self, coeff): """See docstring of SeqBase.coeff_mul""" coeff = sympify(coeff) per = [x * coeff for x in self.periodical] return SeqPer(per, self.args[1]) class SeqFormula(SeqExpr): """Represents sequence based on a formula. Elements are generated using a formula. Examples ======== >>> from sympy import SeqFormula, oo, Symbol >>> n = Symbol('n') >>> s = SeqFormula(n**2, (n, 0, 5)) >>> s.formula n**2 For value at a particular point >>> s.coeff(3) 9 supports slicing >>> s[:] [0, 1, 4, 9, 16, 25] iterable >>> list(s) [0, 1, 4, 9, 16, 25] sequence starts from negative infinity >>> SeqFormula(n**2, (-oo, 0))[0:6] [0, 1, 4, 9, 16, 25] See Also ======== sympy.series.sequences.SeqPer """ def __new__(cls, formula, limits=None): formula = sympify(formula) def _find_x(formula): free = formula.free_symbols if len(free) == 1: return free.pop() elif not free: return Dummy('k') else: raise ValueError( " specify dummy variables for %s. If the formula contains" " more than one free symbol, a dummy variable should be" " supplied explicitly e.g., SeqFormula(m*n**2, (n, 0, 5))" % formula) x, start, stop = None, None, None if limits is None: x, start, stop = _find_x(formula), 0, S.Infinity if is_sequence(limits, Tuple): if len(limits) == 3: x, start, stop = limits elif len(limits) == 2: x = _find_x(formula) start, stop = limits if not isinstance(x, (Symbol, Idx)) or start is None or stop is None: raise ValueError('Invalid limits given: %s' % str(limits)) if start is S.NegativeInfinity and stop is S.Infinity: raise ValueError("Both the start and end value " "cannot be unbounded") limits = sympify((x, start, stop)) if Interval(limits[1], limits[2]) is S.EmptySet: return S.EmptySequence return Basic.__new__(cls, formula, limits) @property def formula(self): return self.gen def _eval_coeff(self, pt): d = self.variables[0] return self.formula.subs(d, pt) def _add(self, other): """See docstring of SeqBase._add""" if isinstance(other, SeqFormula): form1, v1 = self.formula, self.variables[0] form2, v2 = other.formula, other.variables[0] formula = form1 + form2.subs(v2, v1) start, stop = self._intersect_interval(other) return SeqFormula(formula, (v1, start, stop)) def _mul(self, other): """See docstring of SeqBase._mul""" if isinstance(other, SeqFormula): form1, v1 = self.formula, self.variables[0] form2, v2 = other.formula, other.variables[0] formula = form1 * form2.subs(v2, v1) start, stop = self._intersect_interval(other) return SeqFormula(formula, (v1, start, stop)) def coeff_mul(self, coeff): """See docstring of SeqBase.coeff_mul""" coeff = sympify(coeff) formula = self.formula * coeff return SeqFormula(formula, self.args[1]) def expand(self, *args, **kwargs): return SeqFormula(expand(self.formula, *args, **kwargs), self.args[1]) class RecursiveSeq(SeqBase): """A finite degree recursive sequence. That is, a sequence a(n) that depends on a fixed, finite number of its previous values. The general form is a(n) = f(a(n - 1), a(n - 2), ..., a(n - d)) for some fixed, positive integer d, where f is some function defined by a SymPy expression. Parameters ========== recurrence : SymPy expression defining recurrence This is *not* an equality, only the expression that the nth term is equal to. For example, if :code:`a(n) = f(a(n - 1), ..., a(n - d))`, then the expression should be :code:`f(a(n - 1), ..., a(n - d))`. yn : applied undefined function Represents the nth term of the sequence as e.g. :code:`y(n)` where :code:`y` is an undefined function and `n` is the sequence index. n : symbolic argument The name of the variable that the recurrence is in, e.g., :code:`n` if the recurrence function is :code:`y(n)`. initial : iterable with length equal to the degree of the recurrence The initial values of the recurrence. start : start value of sequence (inclusive) Examples ======== >>> from sympy import Function, symbols >>> from sympy.series.sequences import RecursiveSeq >>> y = Function("y") >>> n = symbols("n") >>> fib = RecursiveSeq(y(n - 1) + y(n - 2), y(n), n, [0, 1]) >>> fib.coeff(3) # Value at a particular point 2 >>> fib[:6] # supports slicing [0, 1, 1, 2, 3, 5] >>> fib.recurrence # inspect recurrence Eq(y(n), y(n - 2) + y(n - 1)) >>> fib.degree # automatically determine degree 2 >>> for x in zip(range(10), fib): # supports iteration ... print(x) (0, 0) (1, 1) (2, 1) (3, 2) (4, 3) (5, 5) (6, 8) (7, 13) (8, 21) (9, 34) See Also ======== sympy.series.sequences.SeqFormula """ def __new__(cls, recurrence, yn, n, initial=None, start=0): if not isinstance(yn, AppliedUndef): raise TypeError("recurrence sequence must be an applied undefined function" ", found `{}`".format(yn)) if not isinstance(n, Basic) or not n.is_symbol: raise TypeError("recurrence variable must be a symbol" ", found `{}`".format(n)) if yn.args != (n,): raise TypeError("recurrence sequence does not match symbol") y = yn.func k = Wild("k", exclude=(n,)) degree = 0 # Find all applications of y in the recurrence and check that: # 1. The function y is only being used with a single argument; and # 2. All arguments are n + k for constant negative integers k. prev_ys = recurrence.find(y) for prev_y in prev_ys: if len(prev_y.args) != 1: raise TypeError("Recurrence should be in a single variable") shift = prev_y.args[0].match(n + k)[k] if not (shift.is_constant() and shift.is_integer and shift < 0): raise TypeError("Recurrence should have constant," " negative, integer shifts" " (found {})".format(prev_y)) if -shift > degree: degree = -shift if not initial: initial = [Dummy("c_{}".format(k)) for k in range(degree)] if len(initial) != degree: raise ValueError("Number of initial terms must equal degree") degree = Integer(degree) start = sympify(start) initial = Tuple(*(sympify(x) for x in initial)) seq = Basic.__new__(cls, recurrence, yn, n, initial, start) seq.cache = {y(start + k): init for k, init in enumerate(initial)} seq.degree = degree return seq @property def _recurrence(self): """Equation defining recurrence.""" return self.args[0] @property def recurrence(self): """Equation defining recurrence.""" return Eq(self.yn, self.args[0]) @property def yn(self): """Applied function representing the nth term""" return self.args[1] @property def y(self): """Undefined function for the nth term of the sequence""" return self.yn.func @property def n(self): """Sequence index symbol""" return self.args[2] @property def initial(self): """The initial values of the sequence""" return self.args[3] @property def start(self): """The starting point of the sequence. This point is included""" return self.args[4] @property def stop(self): """The ending point of the sequence. (oo)""" return S.Infinity @property def interval(self): """Interval on which sequence is defined.""" return (self.start, S.Infinity) def _eval_coeff(self, index): if index - self.start < len(self.cache): return self.cache[self.y(index)] for current in range(len(self.cache), index + 1): # Use xreplace over subs for performance. # See issue #10697. seq_index = self.start + current current_recurrence = self._recurrence.xreplace({self.n: seq_index}) new_term = current_recurrence.xreplace(self.cache) self.cache[self.y(seq_index)] = new_term return self.cache[self.y(self.start + current)] def __iter__(self): index = self.start while True: yield self._eval_coeff(index) index += 1 def sequence(seq, limits=None): """Returns appropriate sequence object. If ``seq`` is a sympy sequence, returns :class:`SeqPer` object otherwise returns :class:`SeqFormula` object. Examples ======== >>> from sympy import sequence, SeqPer, SeqFormula >>> from sympy.abc import n >>> sequence(n**2, (n, 0, 5)) SeqFormula(n**2, (n, 0, 5)) >>> sequence((1, 2, 3), (n, 0, 5)) SeqPer((1, 2, 3), (n, 0, 5)) See Also ======== sympy.series.sequences.SeqPer sympy.series.sequences.SeqFormula """ seq = sympify(seq) if is_sequence(seq, Tuple): return SeqPer(seq, limits) else: return SeqFormula(seq, limits) ############################################################################### # OPERATIONS # ############################################################################### class SeqExprOp(SeqBase): """Base class for operations on sequences. Examples ======== >>> from sympy.series.sequences import SeqExprOp, sequence >>> from sympy.abc import n >>> s1 = sequence(n**2, (n, 0, 10)) >>> s2 = sequence((1, 2, 3), (n, 5, 10)) >>> s = SeqExprOp(s1, s2) >>> s.gen (n**2, (1, 2, 3)) >>> s.interval Interval(5, 10) >>> s.length 6 See Also ======== sympy.series.sequences.SeqAdd sympy.series.sequences.SeqMul """ @property def gen(self): """Generator for the sequence. returns a tuple of generators of all the argument sequences. """ return tuple(a.gen for a in self.args) @property def interval(self): """Sequence is defined on the intersection of all the intervals of respective sequences """ return Intersection(*(a.interval for a in self.args)) @property def start(self): return self.interval.inf @property def stop(self): return self.interval.sup @property def variables(self): """Cumulative of all the bound variables""" return tuple(flatten([a.variables for a in self.args])) @property def length(self): return self.stop - self.start + 1 class SeqAdd(SeqExprOp): """Represents term-wise addition of sequences. Rules: * The interval on which sequence is defined is the intersection of respective intervals of sequences. * Anything + :class:`EmptySequence` remains unchanged. * Other rules are defined in ``_add`` methods of sequence classes. Examples ======== >>> from sympy import EmptySequence, oo, SeqAdd, SeqPer, SeqFormula >>> from sympy.abc import n >>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), EmptySequence) SeqPer((1, 2), (n, 0, oo)) >>> SeqAdd(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10))) EmptySequence >>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2, (n, 0, oo))) SeqAdd(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo))) >>> SeqAdd(SeqFormula(n**3), SeqFormula(n**2)) SeqFormula(n**3 + n**2, (n, 0, oo)) See Also ======== sympy.series.sequences.SeqMul """ def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) # flatten inputs args = list(args) # adapted from sympy.sets.sets.Union def _flatten(arg): if isinstance(arg, SeqBase): if isinstance(arg, SeqAdd): return sum(map(_flatten, arg.args), []) else: return [arg] if iterable(arg): return sum(map(_flatten, arg), []) raise TypeError("Input must be Sequences or " " iterables of Sequences") args = _flatten(args) args = [a for a in args if a is not S.EmptySequence] # Addition of no sequences is EmptySequence if not args: return S.EmptySequence if Intersection(*(a.interval for a in args)) is S.EmptySet: return S.EmptySequence # reduce using known rules if evaluate: return SeqAdd.reduce(args) args = list(ordered(args, SeqBase._start_key)) return Basic.__new__(cls, *args) @staticmethod def reduce(args): """Simplify :class:`SeqAdd` using known rules. Iterates through all pairs and ask the constituent sequences if they can simplify themselves with any other constituent. Notes ===== adapted from ``Union.reduce`` """ new_args = True while new_args: for id1, s in enumerate(args): new_args = False for id2, t in enumerate(args): if id1 == id2: continue new_seq = s._add(t) # This returns None if s does not know how to add # with t. Returns the newly added sequence otherwise if new_seq is not None: new_args = [a for a in args if a not in (s, t)] new_args.append(new_seq) break if new_args: args = new_args break if len(args) == 1: return args.pop() else: return SeqAdd(args, evaluate=False) def _eval_coeff(self, pt): """adds up the coefficients of all the sequences at point pt""" return sum(a.coeff(pt) for a in self.args) class SeqMul(SeqExprOp): r"""Represents term-wise multiplication of sequences. Handles multiplication of sequences only. For multiplication with other objects see :func:`SeqBase.coeff_mul`. Rules: * The interval on which sequence is defined is the intersection of respective intervals of sequences. * Anything \* :class:`EmptySequence` returns :class:`EmptySequence`. * Other rules are defined in ``_mul`` methods of sequence classes. Examples ======== >>> from sympy import EmptySequence, oo, SeqMul, SeqPer, SeqFormula >>> from sympy.abc import n >>> SeqMul(SeqPer((1, 2), (n, 0, oo)), EmptySequence) EmptySequence >>> SeqMul(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10))) EmptySequence >>> SeqMul(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2)) SeqMul(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo))) >>> SeqMul(SeqFormula(n**3), SeqFormula(n**2)) SeqFormula(n**5, (n, 0, oo)) See Also ======== sympy.series.sequences.SeqAdd """ def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) # flatten inputs args = list(args) # adapted from sympy.sets.sets.Union def _flatten(arg): if isinstance(arg, SeqBase): if isinstance(arg, SeqMul): return sum(map(_flatten, arg.args), []) else: return [arg] elif iterable(arg): return sum(map(_flatten, arg), []) raise TypeError("Input must be Sequences or " " iterables of Sequences") args = _flatten(args) # Multiplication of no sequences is EmptySequence if not args: return S.EmptySequence if Intersection(*(a.interval for a in args)) is S.EmptySet: return S.EmptySequence # reduce using known rules if evaluate: return SeqMul.reduce(args) args = list(ordered(args, SeqBase._start_key)) return Basic.__new__(cls, *args) @staticmethod def reduce(args): """Simplify a :class:`SeqMul` using known rules. Iterates through all pairs and ask the constituent sequences if they can simplify themselves with any other constituent. Notes ===== adapted from ``Union.reduce`` """ new_args = True while new_args: for id1, s in enumerate(args): new_args = False for id2, t in enumerate(args): if id1 == id2: continue new_seq = s._mul(t) # This returns None if s does not know how to multiply # with t. Returns the newly multiplied sequence otherwise if new_seq is not None: new_args = [a for a in args if a not in (s, t)] new_args.append(new_seq) break if new_args: args = new_args break if len(args) == 1: return args.pop() else: return SeqMul(args, evaluate=False) def _eval_coeff(self, pt): """multiplies the coefficients of all the sequences at point pt""" val = 1 for a in self.args: val *= a.coeff(pt) return val
559f36f558a4f0ceaca21543f88d8d3c9f4935f679a82844505e9ef308300c2b
from __future__ import print_function, division from sympy.core import S, Symbol, Add, sympify, Expr, PoleError, Mul from sympy.core.exprtools import factor_terms from sympy.core.numbers import GoldenRatio from sympy.core.symbol import Dummy from sympy.functions.combinatorial.factorials import factorial from sympy.functions.combinatorial.numbers import fibonacci from sympy.functions.special.gamma_functions import gamma from sympy.polys import PolynomialError, factor from sympy.series.order import Order from sympy.simplify.ratsimp import ratsimp from sympy.simplify.simplify import together from .gruntz import gruntz def limit(e, z, z0, dir="+"): """Computes the limit of ``e(z)`` at the point ``z0``. Parameters ========== e : expression, the limit of which is to be taken z : symbol representing the variable in the limit. Other symbols are treated as constants. Multivariate limits are not supported. z0 : the value toward which ``z`` tends. Can be any expression, including ``oo`` and ``-oo``. dir : string, optional (default: "+") The limit is bi-directional if ``dir="+-"``, from the right (z->z0+) if ``dir="+"``, and from the left (z->z0-) if ``dir="-"``. For infinite ``z0`` (``oo`` or ``-oo``), the ``dir`` argument is determined from the direction of the infinity (i.e., ``dir="-"`` for ``oo``). Examples ======== >>> from sympy import limit, sin, Symbol, oo >>> from sympy.abc import x >>> limit(sin(x)/x, x, 0) 1 >>> limit(1/x, x, 0) # default dir='+' oo >>> limit(1/x, x, 0, dir="-") -oo >>> limit(1/x, x, 0, dir='+-') Traceback (most recent call last): ... ValueError: The limit does not exist since left hand limit = -oo and right hand limit = oo >>> limit(1/x, x, oo) 0 Notes ===== First we try some heuristics for easy and frequent cases like "x", "1/x", "x**2" and similar, so that it's fast. For all other cases, we use the Gruntz algorithm (see the gruntz() function). See Also ======== limit_seq : returns the limit of a sequence. """ return Limit(e, z, z0, dir).doit(deep=False) def heuristics(e, z, z0, dir): """Computes the limit of an expression term-wise. Parameters are the same as for the ``limit`` function. Works with the arguments of expression ``e`` one by one, computing the limit of each and then combining the results. This approach works only for simple limits, but it is fast. """ from sympy.calculus.util import AccumBounds rv = None if abs(z0) is S.Infinity: rv = limit(e.subs(z, 1/z), z, S.Zero, "+" if z0 is S.Infinity else "-") if isinstance(rv, Limit): return elif e.is_Mul or e.is_Add or e.is_Pow or e.is_Function: r = [] for a in e.args: l = limit(a, z, z0, dir) if l.has(S.Infinity) and l.is_finite is None: if isinstance(e, Add): m = factor_terms(e) if not isinstance(m, Mul): # try together m = together(m) if not isinstance(m, Mul): # try factor if the previous methods failed m = factor(e) if isinstance(m, Mul): return heuristics(m, z, z0, dir) return return elif isinstance(l, Limit): return elif l is S.NaN: return else: r.append(l) if r: rv = e.func(*r) if rv is S.NaN and e.is_Mul and any(isinstance(rr, AccumBounds) for rr in r): r2 = [] e2 = [] for ii in range(len(r)): if isinstance(r[ii], AccumBounds): r2.append(r[ii]) else: e2.append(e.args[ii]) if len(e2) > 0: e3 = Mul(*e2).simplify() l = limit(e3, z, z0, dir) rv = l * Mul(*r2) if rv is S.NaN: try: rat_e = ratsimp(e) except PolynomialError: return if rat_e is S.NaN or rat_e == e: return return limit(rat_e, z, z0, dir) return rv class Limit(Expr): """Represents an unevaluated limit. Examples ======== >>> from sympy import Limit, sin, Symbol >>> from sympy.abc import x >>> Limit(sin(x)/x, x, 0) Limit(sin(x)/x, x, 0) >>> Limit(1/x, x, 0, dir="-") Limit(1/x, x, 0, dir='-') """ def __new__(cls, e, z, z0, dir="+"): e = sympify(e) z = sympify(z) z0 = sympify(z0) if z0 is S.Infinity: dir = "-" elif z0 is S.NegativeInfinity: dir = "+" if isinstance(dir, str): dir = Symbol(dir) elif not isinstance(dir, Symbol): raise TypeError("direction must be of type basestring or " "Symbol, not %s" % type(dir)) if str(dir) not in ('+', '-', '+-'): raise ValueError("direction must be one of '+', '-' " "or '+-', not %s" % dir) obj = Expr.__new__(cls) obj._args = (e, z, z0, dir) return obj @property def free_symbols(self): e = self.args[0] isyms = e.free_symbols isyms.difference_update(self.args[1].free_symbols) isyms.update(self.args[2].free_symbols) return isyms def doit(self, **hints): """Evaluates the limit. Parameters ========== deep : bool, optional (default: True) Invoke the ``doit`` method of the expressions involved before taking the limit. hints : optional keyword arguments To be passed to ``doit`` methods; only used if deep is True. """ from sympy.functions import RisingFactorial e, z, z0, dir = self.args if z0 is S.ComplexInfinity: raise NotImplementedError("Limits at complex " "infinity are not implemented") if hints.get('deep', True): e = e.doit(**hints) z = z.doit(**hints) z0 = z0.doit(**hints) if e == z: return z0 if not e.has(z): return e # gruntz fails on factorials but works with the gamma function # If no factorial term is present, e should remain unchanged. # factorial is defined to be zero for negative inputs (which # differs from gamma) so only rewrite for positive z0. if z0.is_extended_positive: e = e.rewrite([factorial, RisingFactorial], gamma) if e.is_Mul: if abs(z0) is S.Infinity: e = factor_terms(e) e = e.rewrite(fibonacci, GoldenRatio) ok = lambda w: (z in w.free_symbols and any(a.is_polynomial(z) or any(z in m.free_symbols and m.is_polynomial(z) for m in Mul.make_args(a)) for a in Add.make_args(w))) if all(ok(w) for w in e.as_numer_denom()): u = Dummy(positive=True) if z0 is S.NegativeInfinity: inve = e.subs(z, -1/u) else: inve = e.subs(z, 1/u) try: r = limit(inve.as_leading_term(u), u, S.Zero, "+") if isinstance(r, Limit): return self else: return r except ValueError: pass if e.is_Order: return Order(limit(e.expr, z, z0), *e.args[1:]) l = None try: if str(dir) == '+-': r = gruntz(e, z, z0, '+') l = gruntz(e, z, z0, '-') if l != r: raise ValueError("The limit does not exist since " "left hand limit = %s and right hand limit = %s" % (l, r)) else: r = gruntz(e, z, z0, dir) if r is S.NaN or l is S.NaN: raise PoleError() except (PoleError, ValueError): if l is not None: raise r = heuristics(e, z, z0, dir) if r is None: return self return r
18bc1a7a71d2a3a815c87838ed6ad6ddbeed3399c7b99068d1010a63d099dccd
"""Fourier Series""" from __future__ import print_function, division from sympy import pi, oo, Wild from sympy.core.expr import Expr from sympy.core.add import Add from sympy.core.compatibility import is_sequence from sympy.core.containers import Tuple from sympy.core.singleton import S from sympy.core.symbol import Dummy, Symbol from sympy.core.sympify import sympify from sympy.functions.elementary.trigonometric import sin, cos, sinc from sympy.series.series_class import SeriesBase from sympy.series.sequences import SeqFormula from sympy.sets.sets import Interval from sympy.simplify.fu import TR2, TR1, TR10, sincos_to_sum def fourier_cos_seq(func, limits, n): """Returns the cos sequence in a Fourier series""" from sympy.integrals import integrate x, L = limits[0], limits[2] - limits[1] cos_term = cos(2*n*pi*x / L) formula = 2 * cos_term * integrate(func * cos_term, limits) / L a0 = formula.subs(n, S.Zero) / 2 return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits) / L, (n, 1, oo)) def fourier_sin_seq(func, limits, n): """Returns the sin sequence in a Fourier series""" from sympy.integrals import integrate x, L = limits[0], limits[2] - limits[1] sin_term = sin(2*n*pi*x / L) return SeqFormula(2 * sin_term * integrate(func * sin_term, limits) / L, (n, 1, oo)) def _process_limits(func, limits): """ Limits should be of the form (x, start, stop). x should be a symbol. Both start and stop should be bounded. * If x is not given, x is determined from func. * If limits is None. Limit of the form (x, -pi, pi) is returned. Examples ======== >>> from sympy import pi >>> from sympy.series.fourier import _process_limits as pari >>> from sympy.abc import x >>> pari(x**2, (x, -2, 2)) (x, -2, 2) >>> pari(x**2, (-2, 2)) (x, -2, 2) >>> pari(x**2, None) (x, -pi, pi) """ def _find_x(func): free = func.free_symbols if len(free) == 1: return free.pop() elif not free: return Dummy('k') else: raise ValueError( " specify dummy variables for %s. If the function contains" " more than one free symbol, a dummy variable should be" " supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))" % func) x, start, stop = None, None, None if limits is None: x, start, stop = _find_x(func), -pi, pi if is_sequence(limits, Tuple): if len(limits) == 3: x, start, stop = limits elif len(limits) == 2: x = _find_x(func) start, stop = limits if not isinstance(x, Symbol) or start is None or stop is None: raise ValueError('Invalid limits given: %s' % str(limits)) unbounded = [S.NegativeInfinity, S.Infinity] if start in unbounded or stop in unbounded: raise ValueError("Both the start and end value should be bounded") return sympify((x, start, stop)) def finite_check(f, x, L): def check_fx(exprs, x): return x not in exprs.free_symbols def check_sincos(_expr, x, L): if isinstance(_expr, (sin, cos)): sincos_args = _expr.args[0] if sincos_args.match(a*(pi/L)*x + b) is not None: return True else: return False _expr = sincos_to_sum(TR2(TR1(f))) add_coeff = _expr.as_coeff_add() a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k != S.Zero, ]) b = Wild('b', properties=[lambda k: x not in k.free_symbols, ]) for s in add_coeff[1]: mul_coeffs = s.as_coeff_mul()[1] for t in mul_coeffs: if not (check_fx(t, x) or check_sincos(t, x, L)): return False, f return True, _expr class FourierSeries(SeriesBase): r"""Represents Fourier sine/cosine series. This class only represents a fourier series. No computation is performed. For how to compute Fourier series, see the :func:`fourier_series` docstring. See Also ======== sympy.series.fourier.fourier_series """ def __new__(cls, *args): args = map(sympify, args) return Expr.__new__(cls, *args) @property def function(self): return self.args[0] @property def x(self): return self.args[1][0] @property def period(self): return (self.args[1][1], self.args[1][2]) @property def a0(self): return self.args[2][0] @property def an(self): return self.args[2][1] @property def bn(self): return self.args[2][2] @property def interval(self): return Interval(0, oo) @property def start(self): return self.interval.inf @property def stop(self): return self.interval.sup @property def length(self): return oo @property def L(self): return abs(self.period[1] - self.period[0]) / 2 def _eval_subs(self, old, new): x = self.x if old.has(x): return self def truncate(self, n=3): """ Return the first n nonzero terms of the series. If n is None return an iterator. Parameters ========== n : int or None Amount of non-zero terms in approximation or None. Returns ======= Expr or iterator Approximation of function expanded into Fourier series. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x, (x, -pi, pi)) >>> s.truncate(4) 2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2 See Also ======== sympy.series.fourier.FourierSeries.sigma_approximation """ if n is None: return iter(self) terms = [] for t in self: if len(terms) == n: break if t is not S.Zero: terms.append(t) return Add(*terms) def sigma_approximation(self, n=3): r""" Return :math:`\sigma`-approximation of Fourier series with respect to order n. Sigma approximation adjusts a Fourier summation to eliminate the Gibbs phenomenon which would otherwise occur at discontinuities. A sigma-approximated summation for a Fourier series of a T-periodical function can be written as .. math:: s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1} \operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot \left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr) + b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right], where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier series coefficients and :math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos :math:`\sigma` factor (expressed in terms of normalized :math:`\operatorname{sinc}` function). Parameters ========== n : int Highest order of the terms taken into account in approximation. Returns ======= Expr Sigma approximation of function expanded into Fourier series. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x, (x, -pi, pi)) >>> s.sigma_approximation(4) 2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3 See Also ======== sympy.series.fourier.FourierSeries.truncate Notes ===== The behaviour of :meth:`~sympy.series.fourier.FourierSeries.sigma_approximation` is different from :meth:`~sympy.series.fourier.FourierSeries.truncate` - it takes all nonzero terms of degree smaller than n, rather than first n nonzero ones. References ========== .. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon .. [2] https://en.wikipedia.org/wiki/Sigma_approximation """ terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n]) if t is not S.Zero] return Add(*terms) def shift(self, s): """Shift the function by a term independent of x. f(x) -> f(x) + s This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.shift(1).truncate() -4*cos(x) + cos(2*x) + 1 + pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) a0 = self.a0 + s sfunc = self.function + s return self.func(sfunc, self.args[1], (a0, self.an, self.bn)) def shiftx(self, s): """Shift x by a term independent of x. f(x) -> f(x + s) This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.shiftx(1).truncate() -4*cos(x + 1) + cos(2*x + 2) + pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) an = self.an.subs(x, x + s) bn = self.bn.subs(x, x + s) sfunc = self.function.subs(x, x + s) return self.func(sfunc, self.args[1], (self.a0, an, bn)) def scale(self, s): """Scale the function by a term independent of x. f(x) -> s * f(x) This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.scale(2).truncate() -8*cos(x) + 2*cos(2*x) + 2*pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) an = self.an.coeff_mul(s) bn = self.bn.coeff_mul(s) a0 = self.a0 * s sfunc = self.args[0] * s return self.func(sfunc, self.args[1], (a0, an, bn)) def scalex(self, s): """Scale x by a term independent of x. f(x) -> f(s*x) This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.scalex(2).truncate() -4*cos(2*x) + cos(4*x) + pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) an = self.an.subs(x, x * s) bn = self.bn.subs(x, x * s) sfunc = self.function.subs(x, x * s) return self.func(sfunc, self.args[1], (self.a0, an, bn)) def _eval_as_leading_term(self, x): for t in self: if t is not S.Zero: return t def _eval_term(self, pt): if pt == 0: return self.a0 return self.an.coeff(pt) + self.bn.coeff(pt) def __neg__(self): return self.scale(-1) def __add__(self, other): if isinstance(other, FourierSeries): if self.period != other.period: raise ValueError("Both the series should have same periods") x, y = self.x, other.x function = self.function + other.function.subs(y, x) if self.x not in function.free_symbols: return function an = self.an + other.an bn = self.bn + other.bn a0 = self.a0 + other.a0 return self.func(function, self.args[1], (a0, an, bn)) return Add(self, other) def __sub__(self, other): return self.__add__(-other) class FiniteFourierSeries(FourierSeries): r"""Represents Finite Fourier sine/cosine series. For how to compute Fourier series, see the :func:`fourier_series` docstring. Parameters ========== f : Expr Expression for finding fourier_series limits : ( x, start, stop) x is the independent variable for the expression f (start, stop) is the period of the fourier series exprs: (a0, an, bn) or Expr a0 is the constant term a0 of the fourier series an is a dictionary of coefficients of cos terms an[k] = coefficient of cos(pi*(k/L)*x) bn is a dictionary of coefficients of sin terms bn[k] = coefficient of sin(pi*(k/L)*x) or exprs can be an expression to be converted to fourier form Methods ======= This class is an extension of FourierSeries class. Please refer to sympy.series.fourier.FourierSeries for further information. See Also ======== sympy.series.fourier.FourierSeries sympy.series.fourier.fourier_series """ def __new__(cls, f, limits, exprs): f = sympify(f) limits = sympify(limits) exprs = sympify(exprs) if not (type(exprs) == Tuple and len(exprs) == 3): # exprs is not of form (a0, an, bn) # Converts the expression to fourier form c, e = exprs.as_coeff_add() rexpr = c + Add(*[TR10(i) for i in e]) a0, exp_ls = rexpr.expand(trig=False, power_base=False, power_exp=False, log=False).as_coeff_add() x = limits[0] L = abs(limits[2] - limits[1]) / 2 a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k is not S.Zero, ]) b = Wild('b', properties=[lambda k: x not in k.free_symbols, ]) an = dict() bn = dict() # separates the coefficients of sin and cos terms in dictionaries an, and bn for p in exp_ls: t = p.match(b * cos(a * (pi / L) * x)) q = p.match(b * sin(a * (pi / L) * x)) if t: an[t[a]] = t[b] + an.get(t[a], S.Zero) elif q: bn[q[a]] = q[b] + bn.get(q[a], S.Zero) else: a0 += p exprs = Tuple(a0, an, bn) return Expr.__new__(cls, f, limits, exprs) @property def interval(self): _length = 1 if self.a0 else 0 _length += max(set(self.an.keys()).union(set(self.bn.keys()))) + 1 return Interval(0, _length) @property def length(self): return self.stop - self.start def shiftx(self, s): s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) _expr = self.truncate().subs(x, x + s) sfunc = self.function.subs(x, x + s) return self.func(sfunc, self.args[1], _expr) def scale(self, s): s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) _expr = self.truncate() * s sfunc = self.function * s return self.func(sfunc, self.args[1], _expr) def scalex(self, s): s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) _expr = self.truncate().subs(x, x * s) sfunc = self.function.subs(x, x * s) return self.func(sfunc, self.args[1], _expr) def _eval_term(self, pt): if pt == 0: return self.a0 _term = self.an.get(pt, S.Zero) * cos(pt * (pi / self.L) * self.x) \ + self.bn.get(pt, S.Zero) * sin(pt * (pi / self.L) * self.x) return _term def __add__(self, other): if isinstance(other, FourierSeries): return other.__add__(fourier_series(self.function, self.args[1],\ finite=False)) elif isinstance(other, FiniteFourierSeries): if self.period != other.period: raise ValueError("Both the series should have same periods") x, y = self.x, other.x function = self.function + other.function.subs(y, x) if self.x not in function.free_symbols: return function return fourier_series(function, limits=self.args[1]) def fourier_series(f, limits=None, finite=True): """Computes Fourier sine/cosine series expansion. Returns a :class:`FourierSeries` object. Examples ======== >>> from sympy import fourier_series, pi, cos >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.truncate(n=3) -4*cos(x) + cos(2*x) + pi**2/3 Shifting >>> s.shift(1).truncate() -4*cos(x) + cos(2*x) + 1 + pi**2/3 >>> s.shiftx(1).truncate() -4*cos(x + 1) + cos(2*x + 2) + pi**2/3 Scaling >>> s.scale(2).truncate() -8*cos(x) + 2*cos(2*x) + 2*pi**2/3 >>> s.scalex(2).truncate() -4*cos(2*x) + cos(4*x) + pi**2/3 Notes ===== Computing Fourier series can be slow due to the integration required in computing an, bn. It is faster to compute Fourier series of a function by using shifting and scaling on an already computed Fourier series rather than computing again. e.g. If the Fourier series of ``x**2`` is known the Fourier series of ``x**2 - 1`` can be found by shifting by ``-1``. See Also ======== sympy.series.fourier.FourierSeries References ========== .. [1] mathworld.wolfram.com/FourierSeries.html """ f = sympify(f) limits = _process_limits(f, limits) x = limits[0] if x not in f.free_symbols: return f if finite: L = abs(limits[2] - limits[1]) / 2 is_finite, res_f = finite_check(f, x, L) if is_finite: return FiniteFourierSeries(f, limits, res_f) n = Dummy('n') neg_f = f.subs(x, -x) if f == neg_f: a0, an = fourier_cos_seq(f, limits, n) bn = SeqFormula(0, (1, oo)) elif f == -neg_f: a0 = S.Zero an = SeqFormula(0, (1, oo)) bn = fourier_sin_seq(f, limits, n) else: a0, an = fourier_cos_seq(f, limits, n) bn = fourier_sin_seq(f, limits, n) return FourierSeries(f, limits, (a0, an, bn))
4a24dab12479de5d069f68c42efc8a84f754bf1f7f89f3f06c2b1e7344fe2145
from __future__ import print_function, division from sympy.core import S, sympify, Expr, Rational, Dummy from sympy.core import Add, Mul, expand_power_base, expand_log from sympy.core.cache import cacheit from sympy.core.compatibility import default_sort_key, is_sequence from sympy.core.containers import Tuple from sympy.sets.sets import Complement from sympy.utilities.iterables import uniq class Order(Expr): r""" Represents the limiting behavior of some function The order of a function characterizes the function based on the limiting behavior of the function as it goes to some limit. Only taking the limit point to be a number is currently supported. This is expressed in big O notation [1]_. The formal definition for the order of a function `g(x)` about a point `a` is such that `g(x) = O(f(x))` as `x \rightarrow a` if and only if for any `\delta > 0` there exists a `M > 0` such that `|g(x)| \leq M|f(x)|` for `|x-a| < \delta`. This is equivalent to `\lim_{x \rightarrow a} \sup |g(x)/f(x)| < \infty`. Let's illustrate it on the following example by taking the expansion of `\sin(x)` about 0: .. math :: \sin(x) = x - x^3/3! + O(x^5) where in this case `O(x^5) = x^5/5! - x^7/7! + \cdots`. By the definition of `O`, for any `\delta > 0` there is an `M` such that: .. math :: |x^5/5! - x^7/7! + ....| <= M|x^5| \text{ for } |x| < \delta or by the alternate definition: .. math :: \lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| < \infty which surely is true, because .. math :: \lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| = 1/5! As it is usually used, the order of a function can be intuitively thought of representing all terms of powers greater than the one specified. For example, `O(x^3)` corresponds to any terms proportional to `x^3, x^4,\ldots` and any higher power. For a polynomial, this leaves terms proportional to `x^2`, `x` and constants. Examples ======== >>> from sympy import O, oo, cos, pi >>> from sympy.abc import x, y >>> O(x + x**2) O(x) >>> O(x + x**2, (x, 0)) O(x) >>> O(x + x**2, (x, oo)) O(x**2, (x, oo)) >>> O(1 + x*y) O(1, x, y) >>> O(1 + x*y, (x, 0), (y, 0)) O(1, x, y) >>> O(1 + x*y, (x, oo), (y, oo)) O(x*y, (x, oo), (y, oo)) >>> O(1) in O(1, x) True >>> O(1, x) in O(1) False >>> O(x) in O(1, x) True >>> O(x**2) in O(x) True >>> O(x)*x O(x**2) >>> O(x) - O(x) O(x) >>> O(cos(x)) O(1) >>> O(cos(x), (x, pi/2)) O(x - pi/2, (x, pi/2)) References ========== .. [1] `Big O notation <https://en.wikipedia.org/wiki/Big_O_notation>`_ Notes ===== In ``O(f(x), x)`` the expression ``f(x)`` is assumed to have a leading term. ``O(f(x), x)`` is automatically transformed to ``O(f(x).as_leading_term(x),x)``. ``O(expr*f(x), x)`` is ``O(f(x), x)`` ``O(expr, x)`` is ``O(1)`` ``O(0, x)`` is 0. Multivariate O is also supported: ``O(f(x, y), x, y)`` is transformed to ``O(f(x, y).as_leading_term(x,y).as_leading_term(y), x, y)`` In the multivariate case, it is assumed the limits w.r.t. the various symbols commute. If no symbols are passed then all symbols in the expression are used and the limit point is assumed to be zero. """ is_Order = True __slots__ = () @cacheit def __new__(cls, expr, *args, **kwargs): expr = sympify(expr) if not args: if expr.is_Order: variables = expr.variables point = expr.point else: variables = list(expr.free_symbols) point = [S.Zero]*len(variables) else: args = list(args if is_sequence(args) else [args]) variables, point = [], [] if is_sequence(args[0]): for a in args: v, p = list(map(sympify, a)) variables.append(v) point.append(p) else: variables = list(map(sympify, args)) point = [S.Zero]*len(variables) if not all(v.is_symbol for v in variables): raise TypeError('Variables are not symbols, got %s' % variables) if len(list(uniq(variables))) != len(variables): raise ValueError('Variables are supposed to be unique symbols, got %s' % variables) if expr.is_Order: expr_vp = dict(expr.args[1:]) new_vp = dict(expr_vp) vp = dict(zip(variables, point)) for v, p in vp.items(): if v in new_vp.keys(): if p != new_vp[v]: raise NotImplementedError( "Mixing Order at different points is not supported.") else: new_vp[v] = p if set(expr_vp.keys()) == set(new_vp.keys()): return expr else: variables = list(new_vp.keys()) point = [new_vp[v] for v in variables] if expr is S.NaN: return S.NaN if any(x in p.free_symbols for x in variables for p in point): raise ValueError('Got %s as a point.' % point) if variables: if any(p != point[0] for p in point): raise NotImplementedError( "Multivariable orders at different points are not supported.") if point[0] is S.Infinity: s = {k: 1/Dummy() for k in variables} rs = {1/v: 1/k for k, v in s.items()} elif point[0] is S.NegativeInfinity: s = {k: -1/Dummy() for k in variables} rs = {-1/v: -1/k for k, v in s.items()} elif point[0] is not S.Zero: s = dict((k, Dummy() + point[0]) for k in variables) rs = dict((v - point[0], k - point[0]) for k, v in s.items()) else: s = () rs = () expr = expr.subs(s) if expr.is_Add: from sympy import expand_multinomial expr = expand_multinomial(expr) if s: args = tuple([r[0] for r in rs.items()]) else: args = tuple(variables) if len(variables) > 1: # XXX: better way? We need this expand() to # workaround e.g: expr = x*(x + y). # (x*(x + y)).as_leading_term(x, y) currently returns # x*y (wrong order term!). That's why we want to deal with # expand()'ed expr (handled in "if expr.is_Add" branch below). expr = expr.expand() old_expr = None while old_expr != expr: old_expr = expr if expr.is_Add: lst = expr.extract_leading_order(args) expr = Add(*[f.expr for (e, f) in lst]) elif expr: expr = expr.as_leading_term(*args) expr = expr.as_independent(*args, as_Add=False)[1] expr = expand_power_base(expr) expr = expand_log(expr) if len(args) == 1: # The definition of O(f(x)) symbol explicitly stated that # the argument of f(x) is irrelevant. That's why we can # combine some power exponents (only "on top" of the # expression tree for f(x)), e.g.: # x**p * (-x)**q -> x**(p+q) for real p, q. x = args[0] margs = list(Mul.make_args( expr.as_independent(x, as_Add=False)[1])) for i, t in enumerate(margs): if t.is_Pow: b, q = t.args if b in (x, -x) and q.is_real and not q.has(x): margs[i] = x**q elif b.is_Pow and not b.exp.has(x): b, r = b.args if b in (x, -x) and r.is_real: margs[i] = x**(r*q) elif b.is_Mul and b.args[0] is S.NegativeOne: b = -b if b.is_Pow and not b.exp.has(x): b, r = b.args if b in (x, -x) and r.is_real: margs[i] = x**(r*q) expr = Mul(*margs) expr = expr.subs(rs) if expr.is_Order: expr = expr.expr if not expr.has(*variables) and not expr.is_zero: expr = S.One # create Order instance: vp = dict(zip(variables, point)) variables.sort(key=default_sort_key) point = [vp[v] for v in variables] args = (expr,) + Tuple(*zip(variables, point)) obj = Expr.__new__(cls, *args) return obj def _eval_nseries(self, x, n, logx): return self @property def expr(self): return self.args[0] @property def variables(self): if self.args[1:]: return tuple(x[0] for x in self.args[1:]) else: return () @property def point(self): if self.args[1:]: return tuple(x[1] for x in self.args[1:]) else: return () @property def free_symbols(self): return self.expr.free_symbols | set(self.variables) def _eval_power(b, e): if e.is_Number and e.is_nonnegative: return b.func(b.expr ** e, *b.args[1:]) if e == O(1): return b return def as_expr_variables(self, order_symbols): if order_symbols is None: order_symbols = self.args[1:] else: if (not all(o[1] == order_symbols[0][1] for o in order_symbols) and not all(p == self.point[0] for p in self.point)): # pragma: no cover raise NotImplementedError('Order at points other than 0 ' 'or oo not supported, got %s as a point.' % self.point) if order_symbols and order_symbols[0][1] != self.point[0]: raise NotImplementedError( "Multiplying Order at different points is not supported.") order_symbols = dict(order_symbols) for s, p in dict(self.args[1:]).items(): if s not in order_symbols.keys(): order_symbols[s] = p order_symbols = sorted(order_symbols.items(), key=lambda x: default_sort_key(x[0])) return self.expr, tuple(order_symbols) def removeO(self): return S.Zero def getO(self): return self @cacheit def contains(self, expr): r""" Return True if expr belongs to Order(self.expr, \*self.variables). Return False if self belongs to expr. Return None if the inclusion relation cannot be determined (e.g. when self and expr have different symbols). """ from sympy import powsimp if expr.is_zero: return True if expr is S.NaN: return False point = self.point[0] if self.point else S.Zero if expr.is_Order: if (any(p != point for p in expr.point) or any(p != point for p in self.point)): return None if expr.expr == self.expr: # O(1) + O(1), O(1) + O(1, x), etc. return all([x in self.args[1:] for x in expr.args[1:]]) if expr.expr.is_Add: return all([self.contains(x) for x in expr.expr.args]) if self.expr.is_Add and point.is_zero: return any([self.func(x, *self.args[1:]).contains(expr) for x in self.expr.args]) if self.variables and expr.variables: common_symbols = tuple( [s for s in self.variables if s in expr.variables]) elif self.variables: common_symbols = self.variables else: common_symbols = expr.variables if not common_symbols: return None if (self.expr.is_Pow and len(self.variables) == 1 and self.variables == expr.variables): symbol = self.variables[0] other = expr.expr.as_independent(symbol, as_Add=False)[1] if (other.is_Pow and other.base == symbol and self.expr.base == symbol): if point.is_zero: rv = (self.expr.exp - other.exp).is_nonpositive if point.is_infinite: rv = (self.expr.exp - other.exp).is_nonnegative if rv is not None: return rv r = None ratio = self.expr/expr.expr ratio = powsimp(ratio, deep=True, combine='exp') for s in common_symbols: from sympy.series.limits import Limit l = Limit(ratio, s, point).doit(heuristics=False) if not isinstance(l, Limit): l = l != 0 else: l = None if r is None: r = l else: if r != l: return return r if self.expr.is_Pow and len(self.variables) == 1: symbol = self.variables[0] other = expr.as_independent(symbol, as_Add=False)[1] if (other.is_Pow and other.base == symbol and self.expr.base == symbol): if point.is_zero: rv = (self.expr.exp - other.exp).is_nonpositive if point.is_infinite: rv = (self.expr.exp - other.exp).is_nonnegative if rv is not None: return rv obj = self.func(expr, *self.args[1:]) return self.contains(obj) def __contains__(self, other): result = self.contains(other) if result is None: raise TypeError('contains did not evaluate to a bool') return result def _eval_subs(self, old, new): if old in self.variables: newexpr = self.expr.subs(old, new) i = self.variables.index(old) newvars = list(self.variables) newpt = list(self.point) if new.is_symbol: newvars[i] = new else: syms = new.free_symbols if len(syms) == 1 or old in syms: if old in syms: var = self.variables[i] else: var = syms.pop() # First, try to substitute self.point in the "new" # expr to see if this is a fixed point. # E.g. O(y).subs(y, sin(x)) point = new.subs(var, self.point[i]) if point != self.point[i]: from sympy.solvers.solveset import solveset d = Dummy() sol = solveset(old - new.subs(var, d), d) if isinstance(sol, Complement): e1 = sol.args[0] e2 = sol.args[1] sol = set(e1) - set(e2) res = [dict(zip((d, ), sol))] point = d.subs(res[0]).limit(old, self.point[i]) newvars[i] = var newpt[i] = point elif old not in syms: del newvars[i], newpt[i] if not syms and new == self.point[i]: newvars.extend(syms) newpt.extend([S.Zero]*len(syms)) else: return return Order(newexpr, *zip(newvars, newpt)) def _eval_conjugate(self): expr = self.expr._eval_conjugate() if expr is not None: return self.func(expr, *self.args[1:]) def _eval_derivative(self, x): return self.func(self.expr.diff(x), *self.args[1:]) or self def _eval_transpose(self): expr = self.expr._eval_transpose() if expr is not None: return self.func(expr, *self.args[1:]) def _sage_(self): #XXX: SAGE doesn't have Order yet. Let's return 0 instead. return Rational(0)._sage_() def __neg__(self): return self O = Order
2a451c6164bc8f61db18a248684ef39a53b1f826d0300444d02cde363c21e3f9
""" Expand Hypergeometric (and Meijer G) functions into named special functions. The algorithm for doing this uses a collection of lookup tables of hypergeometric functions, and various of their properties, to expand many hypergeometric functions in terms of special functions. It is based on the following paper: Kelly B. Roach. Meijer G Function Representations. In: Proceedings of the 1997 International Symposium on Symbolic and Algebraic Computation, pages 205-211, New York, 1997. ACM. It is described in great(er) detail in the Sphinx documentation. """ # SUMMARY OF EXTENSIONS FOR MEIJER G FUNCTIONS # # o z**rho G(ap, bq; z) = G(ap + rho, bq + rho; z) # # o denote z*d/dz by D # # o It is helpful to keep in mind that ap and bq play essentially symmetric # roles: G(1/z) has slightly altered parameters, with ap and bq interchanged. # # o There are four shift operators: # A_J = b_J - D, J = 1, ..., n # B_J = 1 - a_j + D, J = 1, ..., m # C_J = -b_J + D, J = m+1, ..., q # D_J = a_J - 1 - D, J = n+1, ..., p # # A_J, C_J increment b_J # B_J, D_J decrement a_J # # o The corresponding four inverse-shift operators are defined if there # is no cancellation. Thus e.g. an index a_J (upper or lower) can be # incremented if a_J != b_i for i = 1, ..., q. # # o Order reduction: if b_j - a_i is a non-negative integer, where # j <= m and i > n, the corresponding quotient of gamma functions reduces # to a polynomial. Hence the G function can be expressed using a G-function # of lower order. # Similarly if j > m and i <= n. # # Secondly, there are paired index theorems [Adamchik, The evaluation of # integrals of Bessel functions via G-function identities]. Suppose there # are three parameters a, b, c, where a is an a_i, i <= n, b is a b_j, # j <= m and c is a denominator parameter (i.e. a_i, i > n or b_j, j > m). # Suppose further all three differ by integers. # Then the order can be reduced. # TODO work this out in detail. # # o An index quadruple is called suitable if its order cannot be reduced. # If there exists a sequence of shift operators transforming one index # quadruple into another, we say one is reachable from the other. # # o Deciding if one index quadruple is reachable from another is tricky. For # this reason, we use hand-built routines to match and instantiate formulas. # from __future__ import print_function, division from collections import defaultdict from itertools import product from sympy import SYMPY_DEBUG from sympy.core import (S, Dummy, symbols, sympify, Tuple, expand, I, pi, Mul, EulerGamma, oo, zoo, expand_func, Add, nan, Expr, Rational) from sympy.core.compatibility import default_sort_key, reduce from sympy.core.mod import Mod from sympy.functions import (exp, sqrt, root, log, lowergamma, cos, besseli, gamma, uppergamma, expint, erf, sin, besselj, Ei, Ci, Si, Shi, sinh, cosh, Chi, fresnels, fresnelc, polar_lift, exp_polar, floor, ceiling, rf, factorial, lerchphi, Piecewise, re, elliptic_k, elliptic_e) from sympy.functions.elementary.complexes import polarify, unpolarify from sympy.functions.special.hyper import (hyper, HyperRep_atanh, HyperRep_power1, HyperRep_power2, HyperRep_log1, HyperRep_asin1, HyperRep_asin2, HyperRep_sqrts1, HyperRep_sqrts2, HyperRep_log2, HyperRep_cosasin, HyperRep_sinasin, meijerg) from sympy.polys import poly, Poly from sympy.series import residue from sympy.simplify import simplify # type: ignore from sympy.simplify.powsimp import powdenest from sympy.utilities.iterables import sift # function to define "buckets" def _mod1(x): # TODO see if this can work as Mod(x, 1); this will require # different handling of the "buckets" since these need to # be sorted and that fails when there is a mixture of # integers and expressions with parameters. With the current # Mod behavior, Mod(k, 1) == Mod(1, 1) == 0 if k is an integer. # Although the sorting can be done with Basic.compare, this may # still require different handling of the sorted buckets. if x.is_Number: return Mod(x, 1) c, x = x.as_coeff_Add() return Mod(c, 1) + x # leave add formulae at the top for easy reference def add_formulae(formulae): """ Create our knowledge base. """ from sympy.matrices import Matrix a, b, c, z = symbols('a b c, z', cls=Dummy) def add(ap, bq, res): func = Hyper_Function(ap, bq) formulae.append(Formula(func, z, res, (a, b, c))) def addb(ap, bq, B, C, M): func = Hyper_Function(ap, bq) formulae.append(Formula(func, z, None, (a, b, c), B, C, M)) # Luke, Y. L. (1969), The Special Functions and Their Approximations, # Volume 1, section 6.2 # 0F0 add((), (), exp(z)) # 1F0 add((a, ), (), HyperRep_power1(-a, z)) # 2F1 addb((a, a - S.Half), (2*a, ), Matrix([HyperRep_power2(a, z), HyperRep_power2(a + S.Half, z)/2]), Matrix([[1, 0]]), Matrix([[(a - S.Half)*z/(1 - z), (S.Half - a)*z/(1 - z)], [a/(1 - z), a*(z - 2)/(1 - z)]])) addb((1, 1), (2, ), Matrix([HyperRep_log1(z), 1]), Matrix([[-1/z, 0]]), Matrix([[0, z/(z - 1)], [0, 0]])) addb((S.Half, 1), (S('3/2'), ), Matrix([HyperRep_atanh(z), 1]), Matrix([[1, 0]]), Matrix([[Rational(-1, 2), 1/(1 - z)/2], [0, 0]])) addb((S.Half, S.Half), (S('3/2'), ), Matrix([HyperRep_asin1(z), HyperRep_power1(Rational(-1, 2), z)]), Matrix([[1, 0]]), Matrix([[Rational(-1, 2), S.Half], [0, z/(1 - z)/2]])) addb((a, S.Half + a), (S.Half, ), Matrix([HyperRep_sqrts1(-a, z), -HyperRep_sqrts2(-a - S.Half, z)]), Matrix([[1, 0]]), Matrix([[0, -a], [z*(-2*a - 1)/2/(1 - z), S.Half - z*(-2*a - 1)/(1 - z)]])) # A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990). # Integrals and Series: More Special Functions, Vol. 3,. # Gordon and Breach Science Publisher addb([a, -a], [S.Half], Matrix([HyperRep_cosasin(a, z), HyperRep_sinasin(a, z)]), Matrix([[1, 0]]), Matrix([[0, -a], [a*z/(1 - z), 1/(1 - z)/2]])) addb([1, 1], [3*S.Half], Matrix([HyperRep_asin2(z), 1]), Matrix([[1, 0]]), Matrix([[(z - S.Half)/(1 - z), 1/(1 - z)/2], [0, 0]])) # Complete elliptic integrals K(z) and E(z), both a 2F1 function addb([S.Half, S.Half], [S.One], Matrix([elliptic_k(z), elliptic_e(z)]), Matrix([[2/pi, 0]]), Matrix([[Rational(-1, 2), -1/(2*z-2)], [Rational(-1, 2), S.Half]])) addb([Rational(-1, 2), S.Half], [S.One], Matrix([elliptic_k(z), elliptic_e(z)]), Matrix([[0, 2/pi]]), Matrix([[Rational(-1, 2), -1/(2*z-2)], [Rational(-1, 2), S.Half]])) # 3F2 addb([Rational(-1, 2), 1, 1], [S.Half, 2], Matrix([z*HyperRep_atanh(z), HyperRep_log1(z), 1]), Matrix([[Rational(-2, 3), -S.One/(3*z), Rational(2, 3)]]), Matrix([[S.Half, 0, z/(1 - z)/2], [0, 0, z/(z - 1)], [0, 0, 0]])) # actually the formula for 3/2 is much nicer ... addb([Rational(-1, 2), 1, 1], [2, 2], Matrix([HyperRep_power1(S.Half, z), HyperRep_log2(z), 1]), Matrix([[Rational(4, 9) - 16/(9*z), 4/(3*z), 16/(9*z)]]), Matrix([[z/2/(z - 1), 0, 0], [1/(2*(z - 1)), 0, S.Half], [0, 0, 0]])) # 1F1 addb([1], [b], Matrix([z**(1 - b) * exp(z) * lowergamma(b - 1, z), 1]), Matrix([[b - 1, 0]]), Matrix([[1 - b + z, 1], [0, 0]])) addb([a], [2*a], Matrix([z**(S.Half - a)*exp(z/2)*besseli(a - S.Half, z/2) * gamma(a + S.Half)/4**(S.Half - a), z**(S.Half - a)*exp(z/2)*besseli(a + S.Half, z/2) * gamma(a + S.Half)/4**(S.Half - a)]), Matrix([[1, 0]]), Matrix([[z/2, z/2], [z/2, (z/2 - 2*a)]])) mz = polar_lift(-1)*z addb([a], [a + 1], Matrix([mz**(-a)*a*lowergamma(a, mz), a*exp(z)]), Matrix([[1, 0]]), Matrix([[-a, 1], [0, z]])) # This one is redundant. add([Rational(-1, 2)], [S.Half], exp(z) - sqrt(pi*z)*(-I)*erf(I*sqrt(z))) # Added to get nice results for Laplace transform of Fresnel functions # http://functions.wolfram.com/07.22.03.6437.01 # Basic rule #add([1], [Rational(3, 4), Rational(5, 4)], # sqrt(pi) * (cos(2*sqrt(polar_lift(-1)*z))*fresnelc(2*root(polar_lift(-1)*z,4)/sqrt(pi)) + # sin(2*sqrt(polar_lift(-1)*z))*fresnels(2*root(polar_lift(-1)*z,4)/sqrt(pi))) # / (2*root(polar_lift(-1)*z,4))) # Manually tuned rule addb([1], [Rational(3, 4), Rational(5, 4)], Matrix([ sqrt(pi)*(I*sinh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)) + cosh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))) * exp(-I*pi/4)/(2*root(z, 4)), sqrt(pi)*root(z, 4)*(sinh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)) + I*cosh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))) *exp(-I*pi/4)/2, 1 ]), Matrix([[1, 0, 0]]), Matrix([[Rational(-1, 4), 1, Rational(1, 4)], [ z, Rational(1, 4), 0], [ 0, 0, 0]])) # 2F2 addb([S.Half, a], [Rational(3, 2), a + 1], Matrix([a/(2*a - 1)*(-I)*sqrt(pi/z)*erf(I*sqrt(z)), a/(2*a - 1)*(polar_lift(-1)*z)**(-a)* lowergamma(a, polar_lift(-1)*z), a/(2*a - 1)*exp(z)]), Matrix([[1, -1, 0]]), Matrix([[Rational(-1, 2), 0, 1], [0, -a, 1], [0, 0, z]])) # We make a "basis" of four functions instead of three, and give EulerGamma # an extra slot (it could just be a coefficient to 1). The advantage is # that this way Polys will not see multivariate polynomials (it treats # EulerGamma as an indeterminate), which is *way* faster. addb([1, 1], [2, 2], Matrix([Ei(z) - log(z), exp(z), 1, EulerGamma]), Matrix([[1/z, 0, 0, -1/z]]), Matrix([[0, 1, -1, 0], [0, z, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])) # 0F1 add((), (S.Half, ), cosh(2*sqrt(z))) addb([], [b], Matrix([gamma(b)*z**((1 - b)/2)*besseli(b - 1, 2*sqrt(z)), gamma(b)*z**(1 - b/2)*besseli(b, 2*sqrt(z))]), Matrix([[1, 0]]), Matrix([[0, 1], [z, (1 - b)]])) # 0F3 x = 4*z**Rational(1, 4) def fp(a, z): return besseli(a, x) + besselj(a, x) def fm(a, z): return besseli(a, x) - besselj(a, x) # TODO branching addb([], [S.Half, a, a + S.Half], Matrix([fp(2*a - 1, z), fm(2*a, z)*z**Rational(1, 4), fm(2*a - 1, z)*sqrt(z), fp(2*a, z)*z**Rational(3, 4)]) * 2**(-2*a)*gamma(2*a)*z**((1 - 2*a)/4), Matrix([[1, 0, 0, 0]]), Matrix([[0, 1, 0, 0], [0, S.Half - a, 1, 0], [0, 0, S.Half, 1], [z, 0, 0, 1 - a]])) x = 2*(4*z)**Rational(1, 4)*exp_polar(I*pi/4) addb([], [a, a + S.Half, 2*a], (2*sqrt(polar_lift(-1)*z))**(1 - 2*a)*gamma(2*a)**2 * Matrix([besselj(2*a - 1, x)*besseli(2*a - 1, x), x*(besseli(2*a, x)*besselj(2*a - 1, x) - besseli(2*a - 1, x)*besselj(2*a, x)), x**2*besseli(2*a, x)*besselj(2*a, x), x**3*(besseli(2*a, x)*besselj(2*a - 1, x) + besseli(2*a - 1, x)*besselj(2*a, x))]), Matrix([[1, 0, 0, 0]]), Matrix([[0, Rational(1, 4), 0, 0], [0, (1 - 2*a)/2, Rational(-1, 2), 0], [0, 0, 1 - 2*a, Rational(1, 4)], [-32*z, 0, 0, 1 - a]])) # 1F2 addb([a], [a - S.Half, 2*a], Matrix([z**(S.Half - a)*besseli(a - S.Half, sqrt(z))**2, z**(1 - a)*besseli(a - S.Half, sqrt(z)) *besseli(a - Rational(3, 2), sqrt(z)), z**(Rational(3, 2) - a)*besseli(a - Rational(3, 2), sqrt(z))**2]), Matrix([[-gamma(a + S.Half)**2/4**(S.Half - a), 2*gamma(a - S.Half)*gamma(a + S.Half)/4**(1 - a), 0]]), Matrix([[1 - 2*a, 1, 0], [z/2, S.Half - a, S.Half], [0, z, 0]])) addb([S.Half], [b, 2 - b], pi*(1 - b)/sin(pi*b)* Matrix([besseli(1 - b, sqrt(z))*besseli(b - 1, sqrt(z)), sqrt(z)*(besseli(-b, sqrt(z))*besseli(b - 1, sqrt(z)) + besseli(1 - b, sqrt(z))*besseli(b, sqrt(z))), besseli(-b, sqrt(z))*besseli(b, sqrt(z))]), Matrix([[1, 0, 0]]), Matrix([[b - 1, S.Half, 0], [z, 0, z], [0, S.Half, -b]])) addb([S.Half], [Rational(3, 2), Rational(3, 2)], Matrix([Shi(2*sqrt(z))/2/sqrt(z), sinh(2*sqrt(z))/2/sqrt(z), cosh(2*sqrt(z))]), Matrix([[1, 0, 0]]), Matrix([[Rational(-1, 2), S.Half, 0], [0, Rational(-1, 2), S.Half], [0, 2*z, 0]])) # FresnelS # Basic rule #add([Rational(3, 4)], [Rational(3, 2),Rational(7, 4)], 6*fresnels( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( pi * (exp(pi*I/4)*root(z,4)*2/sqrt(pi))**3 ) ) # Manually tuned rule addb([Rational(3, 4)], [Rational(3, 2), Rational(7, 4)], Matrix( [ fresnels( exp( pi*I/4)*root( z, 4)*2/sqrt( pi) ) / ( pi * (exp(pi*I/4)*root(z, 4)*2/sqrt(pi))**3 ), sinh(2*sqrt(z))/sqrt(z), cosh(2*sqrt(z)) ]), Matrix([[6, 0, 0]]), Matrix([[Rational(-3, 4), Rational(1, 16), 0], [ 0, Rational(-1, 2), 1], [ 0, z, 0]])) # FresnelC # Basic rule #add([Rational(1, 4)], [S.Half,Rational(5, 4)], fresnelc( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) ) # Manually tuned rule addb([Rational(1, 4)], [S.Half, Rational(5, 4)], Matrix( [ sqrt( pi)*exp( -I*pi/4)*fresnelc( 2*root(z, 4)*exp(I*pi/4)/sqrt(pi))/(2*root(z, 4)), cosh(2*sqrt(z)), sinh(2*sqrt(z))*sqrt(z) ]), Matrix([[1, 0, 0]]), Matrix([[Rational(-1, 4), Rational(1, 4), 0 ], [ 0, 0, 1 ], [ 0, z, S.Half]])) # 2F3 # XXX with this five-parameter formula is pretty slow with the current # Formula.find_instantiations (creates 2!*3!*3**(2+3) ~ 3000 # instantiations ... But it's not too bad. addb([a, a + S.Half], [2*a, b, 2*a - b + 1], gamma(b)*gamma(2*a - b + 1) * (sqrt(z)/2)**(1 - 2*a) * Matrix([besseli(b - 1, sqrt(z))*besseli(2*a - b, sqrt(z)), sqrt(z)*besseli(b, sqrt(z))*besseli(2*a - b, sqrt(z)), sqrt(z)*besseli(b - 1, sqrt(z))*besseli(2*a - b + 1, sqrt(z)), besseli(b, sqrt(z))*besseli(2*a - b + 1, sqrt(z))]), Matrix([[1, 0, 0, 0]]), Matrix([[0, S.Half, S.Half, 0], [z/2, 1 - b, 0, z/2], [z/2, 0, b - 2*a, z/2], [0, S.Half, S.Half, -2*a]])) # (C/f above comment about eulergamma in the basis). addb([1, 1], [2, 2, Rational(3, 2)], Matrix([Chi(2*sqrt(z)) - log(2*sqrt(z)), cosh(2*sqrt(z)), sqrt(z)*sinh(2*sqrt(z)), 1, EulerGamma]), Matrix([[1/z, 0, 0, 0, -1/z]]), Matrix([[0, S.Half, 0, Rational(-1, 2), 0], [0, 0, 1, 0, 0], [0, z, S.Half, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])) # 3F3 # This is rule: http://functions.wolfram.com/07.31.03.0134.01 # Initial reason to add it was a nice solution for # integrate(erf(a*z)/z**2, z) and same for erfc and erfi. # Basic rule # add([1, 1, a], [2, 2, a+1], (a/(z*(a-1)**2)) * # (1 - (-z)**(1-a) * (gamma(a) - uppergamma(a,-z)) # - (a-1) * (EulerGamma + uppergamma(0,-z) + log(-z)) # - exp(z))) # Manually tuned rule addb([1, 1, a], [2, 2, a+1], Matrix([a*(log(-z) + expint(1, -z) + EulerGamma)/(z*(a**2 - 2*a + 1)), a*(-z)**(-a)*(gamma(a) - uppergamma(a, -z))/(a - 1)**2, a*exp(z)/(a**2 - 2*a + 1), a/(z*(a**2 - 2*a + 1))]), Matrix([[1-a, 1, -1/z, 1]]), Matrix([[-1,0,-1/z,1], [0,-a,1,0], [0,0,z,0], [0,0,0,-1]])) def add_meijerg_formulae(formulae): from sympy.matrices import Matrix a, b, c, z = list(map(Dummy, 'abcz')) rho = Dummy('rho') def add(an, ap, bm, bq, B, C, M, matcher): formulae.append(MeijerFormula(an, ap, bm, bq, z, [a, b, c, rho], B, C, M, matcher)) def detect_uppergamma(func): x = func.an[0] y, z = func.bm swapped = False if not _mod1((x - y).simplify()): swapped = True (y, z) = (z, y) if _mod1((x - z).simplify()) or x - z > 0: return None l = [y, x] if swapped: l = [x, y] return {rho: y, a: x - y}, G_Function([x], [], l, []) add([a + rho], [], [rho, a + rho], [], Matrix([gamma(1 - a)*z**rho*exp(z)*uppergamma(a, z), gamma(1 - a)*z**(a + rho)]), Matrix([[1, 0]]), Matrix([[rho + z, -1], [0, a + rho]]), detect_uppergamma) def detect_3113(func): """http://functions.wolfram.com/07.34.03.0984.01""" x = func.an[0] u, v, w = func.bm if _mod1((u - v).simplify()) == 0: if _mod1((v - w).simplify()) == 0: return sig = (S.Half, S.Half, S.Zero) x1, x2, y = u, v, w else: if _mod1((x - u).simplify()) == 0: sig = (S.Half, S.Zero, S.Half) x1, y, x2 = u, v, w else: sig = (S.Zero, S.Half, S.Half) y, x1, x2 = u, v, w if (_mod1((x - x1).simplify()) != 0 or _mod1((x - x2).simplify()) != 0 or _mod1((x - y).simplify()) != S.Half or x - x1 > 0 or x - x2 > 0): return return {a: x}, G_Function([x], [], [x - S.Half + t for t in sig], []) s = sin(2*sqrt(z)) c_ = cos(2*sqrt(z)) S_ = Si(2*sqrt(z)) - pi/2 C = Ci(2*sqrt(z)) add([a], [], [a, a, a - S.Half], [], Matrix([sqrt(pi)*z**(a - S.Half)*(c_*S_ - s*C), sqrt(pi)*z**a*(s*S_ + c_*C), sqrt(pi)*z**a]), Matrix([[-2, 0, 0]]), Matrix([[a - S.Half, -1, 0], [z, a, S.Half], [0, 0, a]]), detect_3113) def make_simp(z): """ Create a function that simplifies rational functions in ``z``. """ def simp(expr): """ Efficiently simplify the rational function ``expr``. """ numer, denom = expr.as_numer_denom() numer = numer.expand() # denom = denom.expand() # is this needed? c, numer, denom = poly(numer, z).cancel(poly(denom, z)) return c * numer.as_expr() / denom.as_expr() return simp def debug(*args): if SYMPY_DEBUG: for a in args: print(a, end="") print() class Hyper_Function(Expr): """ A generalized hypergeometric function. """ def __new__(cls, ap, bq): obj = super(Hyper_Function, cls).__new__(cls) obj.ap = Tuple(*list(map(expand, ap))) obj.bq = Tuple(*list(map(expand, bq))) return obj @property def args(self): return (self.ap, self.bq) @property def sizes(self): return (len(self.ap), len(self.bq)) @property def gamma(self): """ Number of upper parameters that are negative integers This is a transformation invariant. """ return sum(bool(x.is_integer and x.is_negative) for x in self.ap) def _hashable_content(self): return super(Hyper_Function, self)._hashable_content() + (self.ap, self.bq) def __call__(self, arg): return hyper(self.ap, self.bq, arg) def build_invariants(self): """ Compute the invariant vector. The invariant vector is: (gamma, ((s1, n1), ..., (sk, nk)), ((t1, m1), ..., (tr, mr))) where gamma is the number of integer a < 0, s1 < ... < sk nl is the number of parameters a_i congruent to sl mod 1 t1 < ... < tr ml is the number of parameters b_i congruent to tl mod 1 If the index pair contains parameters, then this is not truly an invariant, since the parameters cannot be sorted uniquely mod1. Examples ======== >>> from sympy.simplify.hyperexpand import Hyper_Function >>> from sympy import S >>> ap = (S.Half, S.One/3, S(-1)/2, -2) >>> bq = (1, 2) Here gamma = 1, k = 3, s1 = 0, s2 = 1/3, s3 = 1/2 n1 = 1, n2 = 1, n2 = 2 r = 1, t1 = 0 m1 = 2: >>> Hyper_Function(ap, bq).build_invariants() (1, ((0, 1), (1/3, 1), (1/2, 2)), ((0, 2),)) """ abuckets, bbuckets = sift(self.ap, _mod1), sift(self.bq, _mod1) def tr(bucket): bucket = list(bucket.items()) if not any(isinstance(x[0], Mod) for x in bucket): bucket.sort(key=lambda x: default_sort_key(x[0])) bucket = tuple([(mod, len(values)) for mod, values in bucket if values]) return bucket return (self.gamma, tr(abuckets), tr(bbuckets)) def difficulty(self, func): """ Estimate how many steps it takes to reach ``func`` from self. Return -1 if impossible. """ if self.gamma != func.gamma: return -1 oabuckets, obbuckets, abuckets, bbuckets = [sift(params, _mod1) for params in (self.ap, self.bq, func.ap, func.bq)] diff = 0 for bucket, obucket in [(abuckets, oabuckets), (bbuckets, obbuckets)]: for mod in set(list(bucket.keys()) + list(obucket.keys())): if (not mod in bucket) or (not mod in obucket) \ or len(bucket[mod]) != len(obucket[mod]): return -1 l1 = list(bucket[mod]) l2 = list(obucket[mod]) l1.sort() l2.sort() for i, j in zip(l1, l2): diff += abs(i - j) return diff def _is_suitable_origin(self): """ Decide if ``self`` is a suitable origin. A function is a suitable origin iff: * none of the ai equals bj + n, with n a non-negative integer * none of the ai is zero * none of the bj is a non-positive integer Note that this gives meaningful results only when none of the indices are symbolic. """ for a in self.ap: for b in self.bq: if (a - b).is_integer and (a - b).is_negative is False: return False for a in self.ap: if a == 0: return False for b in self.bq: if b.is_integer and b.is_nonpositive: return False return True class G_Function(Expr): """ A Meijer G-function. """ def __new__(cls, an, ap, bm, bq): obj = super(G_Function, cls).__new__(cls) obj.an = Tuple(*list(map(expand, an))) obj.ap = Tuple(*list(map(expand, ap))) obj.bm = Tuple(*list(map(expand, bm))) obj.bq = Tuple(*list(map(expand, bq))) return obj @property def args(self): return (self.an, self.ap, self.bm, self.bq) def _hashable_content(self): return super(G_Function, self)._hashable_content() + self.args def __call__(self, z): return meijerg(self.an, self.ap, self.bm, self.bq, z) def compute_buckets(self): """ Compute buckets for the fours sets of parameters. We guarantee that any two equal Mod objects returned are actually the same, and that the buckets are sorted by real part (an and bq descendending, bm and ap ascending). Examples ======== >>> from sympy.simplify.hyperexpand import G_Function >>> from sympy.abc import y >>> from sympy import S, symbols >>> a, b = [1, 3, 2, S(3)/2], [1 + y, y, 2, y + 3] >>> G_Function(a, b, [2], [y]).compute_buckets() ({0: [3, 2, 1], 1/2: [3/2]}, {0: [2], y: [y, y + 1, y + 3]}, {0: [2]}, {y: [y]}) """ dicts = pan, pap, pbm, pbq = [defaultdict(list) for i in range(4)] for dic, lis in zip(dicts, (self.an, self.ap, self.bm, self.bq)): for x in lis: dic[_mod1(x)].append(x) for dic, flip in zip(dicts, (True, False, False, True)): for m, items in dic.items(): x0 = items[0] items.sort(key=lambda x: x - x0, reverse=flip) dic[m] = items return tuple([dict(w) for w in dicts]) @property def signature(self): return (len(self.an), len(self.ap), len(self.bm), len(self.bq)) # Dummy variable. _x = Dummy('x') class Formula(object): """ This class represents hypergeometric formulae. Its data members are: - z, the argument - closed_form, the closed form expression - symbols, the free symbols (parameters) in the formula - func, the function - B, C, M (see _compute_basis) Examples ======== >>> from sympy.abc import a, b, z >>> from sympy.simplify.hyperexpand import Formula, Hyper_Function >>> func = Hyper_Function((a/2, a/3 + b, (1+a)/2), (a, b, (a+b)/7)) >>> f = Formula(func, z, None, [a, b]) """ def _compute_basis(self, closed_form): """ Compute a set of functions B=(f1, ..., fn), a nxn matrix M and a 1xn matrix C such that: closed_form = C B z d/dz B = M B. """ from sympy.matrices import Matrix, eye, zeros afactors = [_x + a for a in self.func.ap] bfactors = [_x + b - 1 for b in self.func.bq] expr = _x*Mul(*bfactors) - self.z*Mul(*afactors) poly = Poly(expr, _x) n = poly.degree() - 1 b = [closed_form] for _ in range(n): b.append(self.z*b[-1].diff(self.z)) self.B = Matrix(b) self.C = Matrix([[1] + [0]*n]) m = eye(n) m = m.col_insert(0, zeros(n, 1)) l = poly.all_coeffs()[1:] l.reverse() self.M = m.row_insert(n, -Matrix([l])/poly.all_coeffs()[0]) def __init__(self, func, z, res, symbols, B=None, C=None, M=None): z = sympify(z) res = sympify(res) symbols = [x for x in sympify(symbols) if func.has(x)] self.z = z self.symbols = symbols self.B = B self.C = C self.M = M self.func = func # TODO with symbolic parameters, it could be advantageous # (for prettier answers) to compute a basis only *after* # instantiation if res is not None: self._compute_basis(res) @property def closed_form(self): return reduce(lambda s,m: s+m[0]*m[1], zip(self.C, self.B), S.Zero) def find_instantiations(self, func): """ Find substitutions of the free symbols that match ``func``. Return the substitution dictionaries as a list. Note that the returned instantiations need not actually match, or be valid! """ from sympy.solvers import solve ap = func.ap bq = func.bq if len(ap) != len(self.func.ap) or len(bq) != len(self.func.bq): raise TypeError('Cannot instantiate other number of parameters') symbol_values = [] for a in self.symbols: if a in self.func.ap.args: symbol_values.append(ap) elif a in self.func.bq.args: symbol_values.append(bq) else: raise ValueError("At least one of the parameters of the " "formula must be equal to %s" % (a,)) base_repl = [dict(list(zip(self.symbols, values))) for values in product(*symbol_values)] abuckets, bbuckets = [sift(params, _mod1) for params in [ap, bq]] a_inv, b_inv = [dict((a, len(vals)) for a, vals in bucket.items()) for bucket in [abuckets, bbuckets]] critical_values = [[0] for _ in self.symbols] result = [] _n = Dummy() for repl in base_repl: symb_a, symb_b = [sift(params, lambda x: _mod1(x.xreplace(repl))) for params in [self.func.ap, self.func.bq]] for bucket, obucket in [(abuckets, symb_a), (bbuckets, symb_b)]: for mod in set(list(bucket.keys()) + list(obucket.keys())): if (not mod in bucket) or (not mod in obucket) \ or len(bucket[mod]) != len(obucket[mod]): break for a, vals in zip(self.symbols, critical_values): if repl[a].free_symbols: continue exprs = [expr for expr in obucket[mod] if expr.has(a)] repl0 = repl.copy() repl0[a] += _n for expr in exprs: for target in bucket[mod]: n0, = solve(expr.xreplace(repl0) - target, _n) if n0.free_symbols: raise ValueError("Value should not be true") vals.append(n0) else: values = [] for a, vals in zip(self.symbols, critical_values): a0 = repl[a] min_ = floor(min(vals)) max_ = ceiling(max(vals)) values.append([a0 + n for n in range(min_, max_ + 1)]) result.extend(dict(list(zip(self.symbols, l))) for l in product(*values)) return result class FormulaCollection(object): """ A collection of formulae to use as origins. """ def __init__(self): """ Doing this globally at module init time is a pain ... """ self.symbolic_formulae = {} self.concrete_formulae = {} self.formulae = [] add_formulae(self.formulae) # Now process the formulae into a helpful form. # These dicts are indexed by (p, q). for f in self.formulae: sizes = f.func.sizes if len(f.symbols) > 0: self.symbolic_formulae.setdefault(sizes, []).append(f) else: inv = f.func.build_invariants() self.concrete_formulae.setdefault(sizes, {})[inv] = f def lookup_origin(self, func): """ Given the suitable target ``func``, try to find an origin in our knowledge base. Examples ======== >>> from sympy.simplify.hyperexpand import (FormulaCollection, ... Hyper_Function) >>> f = FormulaCollection() >>> f.lookup_origin(Hyper_Function((), ())).closed_form exp(_z) >>> f.lookup_origin(Hyper_Function([1], ())).closed_form HyperRep_power1(-1, _z) >>> from sympy import S >>> i = Hyper_Function([S('1/4'), S('3/4 + 4')], [S.Half]) >>> f.lookup_origin(i).closed_form HyperRep_sqrts1(-1/4, _z) """ inv = func.build_invariants() sizes = func.sizes if sizes in self.concrete_formulae and \ inv in self.concrete_formulae[sizes]: return self.concrete_formulae[sizes][inv] # We don't have a concrete formula. Try to instantiate. if not sizes in self.symbolic_formulae: return None # Too bad... possible = [] for f in self.symbolic_formulae[sizes]: repls = f.find_instantiations(func) for repl in repls: func2 = f.func.xreplace(repl) if not func2._is_suitable_origin(): continue diff = func2.difficulty(func) if diff == -1: continue possible.append((diff, repl, f, func2)) # find the nearest origin possible.sort(key=lambda x: x[0]) for _, repl, f, func2 in possible: f2 = Formula(func2, f.z, None, [], f.B.subs(repl), f.C.subs(repl), f.M.subs(repl)) if not any(e.has(S.NaN, oo, -oo, zoo) for e in [f2.B, f2.M, f2.C]): return f2 return None class MeijerFormula(object): """ This class represents a Meijer G-function formula. Its data members are: - z, the argument - symbols, the free symbols (parameters) in the formula - func, the function - B, C, M (c/f ordinary Formula) """ def __init__(self, an, ap, bm, bq, z, symbols, B, C, M, matcher): an, ap, bm, bq = [Tuple(*list(map(expand, w))) for w in [an, ap, bm, bq]] self.func = G_Function(an, ap, bm, bq) self.z = z self.symbols = symbols self._matcher = matcher self.B = B self.C = C self.M = M @property def closed_form(self): return reduce(lambda s,m: s+m[0]*m[1], zip(self.C, self.B), S.Zero) def try_instantiate(self, func): """ Try to instantiate the current formula to (almost) match func. This uses the _matcher passed on init. """ if func.signature != self.func.signature: return None res = self._matcher(func) if res is not None: subs, newfunc = res return MeijerFormula(newfunc.an, newfunc.ap, newfunc.bm, newfunc.bq, self.z, [], self.B.subs(subs), self.C.subs(subs), self.M.subs(subs), None) class MeijerFormulaCollection(object): """ This class holds a collection of meijer g formulae. """ def __init__(self): formulae = [] add_meijerg_formulae(formulae) self.formulae = defaultdict(list) for formula in formulae: self.formulae[formula.func.signature].append(formula) self.formulae = dict(self.formulae) def lookup_origin(self, func): """ Try to find a formula that matches func. """ if not func.signature in self.formulae: return None for formula in self.formulae[func.signature]: res = formula.try_instantiate(func) if res is not None: return res class Operator(object): """ Base class for operators to be applied to our functions. These operators are differential operators. They are by convention expressed in the variable D = z*d/dz (although this base class does not actually care). Note that when the operator is applied to an object, we typically do *not* blindly differentiate but instead use a different representation of the z*d/dz operator (see make_derivative_operator). To subclass from this, define a __init__ method that initializes a self._poly variable. This variable stores a polynomial. By convention the generator is z*d/dz, and acts to the right of all coefficients. Thus this poly x**2 + 2*z*x + 1 represents the differential operator (z*d/dz)**2 + 2*z**2*d/dz. This class is used only in the implementation of the hypergeometric function expansion algorithm. """ def apply(self, obj, op): """ Apply ``self`` to the object ``obj``, where the generator is ``op``. Examples ======== >>> from sympy.simplify.hyperexpand import Operator >>> from sympy.polys.polytools import Poly >>> from sympy.abc import x, y, z >>> op = Operator() >>> op._poly = Poly(x**2 + z*x + y, x) >>> op.apply(z**7, lambda f: f.diff(z)) y*z**7 + 7*z**7 + 42*z**5 """ coeffs = self._poly.all_coeffs() coeffs.reverse() diffs = [obj] for c in coeffs[1:]: diffs.append(op(diffs[-1])) r = coeffs[0]*diffs[0] for c, d in zip(coeffs[1:], diffs[1:]): r += c*d return r class MultOperator(Operator): """ Simply multiply by a "constant" """ def __init__(self, p): self._poly = Poly(p, _x) class ShiftA(Operator): """ Increment an upper index. """ def __init__(self, ai): ai = sympify(ai) if ai == 0: raise ValueError('Cannot increment zero upper index.') self._poly = Poly(_x/ai + 1, _x) def __str__(self): return '<Increment upper %s.>' % (1/self._poly.all_coeffs()[0]) class ShiftB(Operator): """ Decrement a lower index. """ def __init__(self, bi): bi = sympify(bi) if bi == 1: raise ValueError('Cannot decrement unit lower index.') self._poly = Poly(_x/(bi - 1) + 1, _x) def __str__(self): return '<Decrement lower %s.>' % (1/self._poly.all_coeffs()[0] + 1) class UnShiftA(Operator): """ Decrement an upper index. """ def __init__(self, ap, bq, i, z): """ Note: i counts from zero! """ ap, bq, i = list(map(sympify, [ap, bq, i])) self._ap = ap self._bq = bq self._i = i ap = list(ap) bq = list(bq) ai = ap.pop(i) - 1 if ai == 0: raise ValueError('Cannot decrement unit upper index.') m = Poly(z*ai, _x) for a in ap: m *= Poly(_x + a, _x) A = Dummy('A') n = D = Poly(ai*A - ai, A) for b in bq: n *= D + (b - 1).as_poly(A) b0 = -n.nth(0) if b0 == 0: raise ValueError('Cannot decrement upper index: ' 'cancels with lower') n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, _x/ai + 1), _x) self._poly = Poly((n - m)/b0, _x) def __str__(self): return '<Decrement upper index #%s of %s, %s.>' % (self._i, self._ap, self._bq) class UnShiftB(Operator): """ Increment a lower index. """ def __init__(self, ap, bq, i, z): """ Note: i counts from zero! """ ap, bq, i = list(map(sympify, [ap, bq, i])) self._ap = ap self._bq = bq self._i = i ap = list(ap) bq = list(bq) bi = bq.pop(i) + 1 if bi == 0: raise ValueError('Cannot increment -1 lower index.') m = Poly(_x*(bi - 1), _x) for b in bq: m *= Poly(_x + b - 1, _x) B = Dummy('B') D = Poly((bi - 1)*B - bi + 1, B) n = Poly(z, B) for a in ap: n *= (D + a.as_poly(B)) b0 = n.nth(0) if b0 == 0: raise ValueError('Cannot increment index: cancels with upper') n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs( B, _x/(bi - 1) + 1), _x) self._poly = Poly((m - n)/b0, _x) def __str__(self): return '<Increment lower index #%s of %s, %s.>' % (self._i, self._ap, self._bq) class MeijerShiftA(Operator): """ Increment an upper b index. """ def __init__(self, bi): bi = sympify(bi) self._poly = Poly(bi - _x, _x) def __str__(self): return '<Increment upper b=%s.>' % (self._poly.all_coeffs()[1]) class MeijerShiftB(Operator): """ Decrement an upper a index. """ def __init__(self, bi): bi = sympify(bi) self._poly = Poly(1 - bi + _x, _x) def __str__(self): return '<Decrement upper a=%s.>' % (1 - self._poly.all_coeffs()[1]) class MeijerShiftC(Operator): """ Increment a lower b index. """ def __init__(self, bi): bi = sympify(bi) self._poly = Poly(-bi + _x, _x) def __str__(self): return '<Increment lower b=%s.>' % (-self._poly.all_coeffs()[1]) class MeijerShiftD(Operator): """ Decrement a lower a index. """ def __init__(self, bi): bi = sympify(bi) self._poly = Poly(bi - 1 - _x, _x) def __str__(self): return '<Decrement lower a=%s.>' % (self._poly.all_coeffs()[1] + 1) class MeijerUnShiftA(Operator): """ Decrement an upper b index. """ def __init__(self, an, ap, bm, bq, i, z): """ Note: i counts from zero! """ an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i])) self._an = an self._ap = ap self._bm = bm self._bq = bq self._i = i an = list(an) ap = list(ap) bm = list(bm) bq = list(bq) bi = bm.pop(i) - 1 m = Poly(1, _x) for b in bm: m *= Poly(b - _x, _x) for b in bq: m *= Poly(_x - b, _x) A = Dummy('A') D = Poly(bi - A, A) n = Poly(z, A) for a in an: n *= (D + 1 - a) for a in ap: n *= (-D + a - 1) b0 = n.nth(0) if b0 == 0: raise ValueError('Cannot decrement upper b index (cancels)') n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, bi - _x), _x) self._poly = Poly((m - n)/b0, _x) def __str__(self): return '<Decrement upper b index #%s of %s, %s, %s, %s.>' % (self._i, self._an, self._ap, self._bm, self._bq) class MeijerUnShiftB(Operator): """ Increment an upper a index. """ def __init__(self, an, ap, bm, bq, i, z): """ Note: i counts from zero! """ an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i])) self._an = an self._ap = ap self._bm = bm self._bq = bq self._i = i an = list(an) ap = list(ap) bm = list(bm) bq = list(bq) ai = an.pop(i) + 1 m = Poly(z, _x) for a in an: m *= Poly(1 - a + _x, _x) for a in ap: m *= Poly(a - 1 - _x, _x) B = Dummy('B') D = Poly(B + ai - 1, B) n = Poly(1, B) for b in bm: n *= (-D + b) for b in bq: n *= (D - b) b0 = n.nth(0) if b0 == 0: raise ValueError('Cannot increment upper a index (cancels)') n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs( B, 1 - ai + _x), _x) self._poly = Poly((m - n)/b0, _x) def __str__(self): return '<Increment upper a index #%s of %s, %s, %s, %s.>' % (self._i, self._an, self._ap, self._bm, self._bq) class MeijerUnShiftC(Operator): """ Decrement a lower b index. """ # XXX this is "essentially" the same as MeijerUnShiftA. This "essentially" # can be made rigorous using the functional equation G(1/z) = G'(z), # where G' denotes a G function of slightly altered parameters. # However, sorting out the details seems harder than just coding it # again. def __init__(self, an, ap, bm, bq, i, z): """ Note: i counts from zero! """ an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i])) self._an = an self._ap = ap self._bm = bm self._bq = bq self._i = i an = list(an) ap = list(ap) bm = list(bm) bq = list(bq) bi = bq.pop(i) - 1 m = Poly(1, _x) for b in bm: m *= Poly(b - _x, _x) for b in bq: m *= Poly(_x - b, _x) C = Dummy('C') D = Poly(bi + C, C) n = Poly(z, C) for a in an: n *= (D + 1 - a) for a in ap: n *= (-D + a - 1) b0 = n.nth(0) if b0 == 0: raise ValueError('Cannot decrement lower b index (cancels)') n = Poly(Poly(n.all_coeffs()[:-1], C).as_expr().subs(C, _x - bi), _x) self._poly = Poly((m - n)/b0, _x) def __str__(self): return '<Decrement lower b index #%s of %s, %s, %s, %s.>' % (self._i, self._an, self._ap, self._bm, self._bq) class MeijerUnShiftD(Operator): """ Increment a lower a index. """ # XXX This is essentially the same as MeijerUnShiftA. # See comment at MeijerUnShiftC. def __init__(self, an, ap, bm, bq, i, z): """ Note: i counts from zero! """ an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i])) self._an = an self._ap = ap self._bm = bm self._bq = bq self._i = i an = list(an) ap = list(ap) bm = list(bm) bq = list(bq) ai = ap.pop(i) + 1 m = Poly(z, _x) for a in an: m *= Poly(1 - a + _x, _x) for a in ap: m *= Poly(a - 1 - _x, _x) B = Dummy('B') # - this is the shift operator `D_I` D = Poly(ai - 1 - B, B) n = Poly(1, B) for b in bm: n *= (-D + b) for b in bq: n *= (D - b) b0 = n.nth(0) if b0 == 0: raise ValueError('Cannot increment lower a index (cancels)') n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs( B, ai - 1 - _x), _x) self._poly = Poly((m - n)/b0, _x) def __str__(self): return '<Increment lower a index #%s of %s, %s, %s, %s.>' % (self._i, self._an, self._ap, self._bm, self._bq) class ReduceOrder(Operator): """ Reduce Order by cancelling an upper and a lower index. """ def __new__(cls, ai, bj): """ For convenience if reduction is not possible, return None. """ ai = sympify(ai) bj = sympify(bj) n = ai - bj if not n.is_Integer or n < 0: return None if bj.is_integer and bj.is_nonpositive: return None expr = Operator.__new__(cls) p = S.One for k in range(n): p *= (_x + bj + k)/(bj + k) expr._poly = Poly(p, _x) expr._a = ai expr._b = bj return expr @classmethod def _meijer(cls, b, a, sign): """ Cancel b + sign*s and a + sign*s This is for meijer G functions. """ b = sympify(b) a = sympify(a) n = b - a if n.is_negative or not n.is_Integer: return None expr = Operator.__new__(cls) p = S.One for k in range(n): p *= (sign*_x + a + k) expr._poly = Poly(p, _x) if sign == -1: expr._a = b expr._b = a else: expr._b = Add(1, a - 1, evaluate=False) expr._a = Add(1, b - 1, evaluate=False) return expr @classmethod def meijer_minus(cls, b, a): return cls._meijer(b, a, -1) @classmethod def meijer_plus(cls, a, b): return cls._meijer(1 - a, 1 - b, 1) def __str__(self): return '<Reduce order by cancelling upper %s with lower %s.>' % \ (self._a, self._b) def _reduce_order(ap, bq, gen, key): """ Order reduction algorithm used in Hypergeometric and Meijer G """ ap = list(ap) bq = list(bq) ap.sort(key=key) bq.sort(key=key) nap = [] # we will edit bq in place operators = [] for a in ap: op = None for i in range(len(bq)): op = gen(a, bq[i]) if op is not None: bq.pop(i) break if op is None: nap.append(a) else: operators.append(op) return nap, bq, operators def reduce_order(func): """ Given the hypergeometric function ``func``, find a sequence of operators to reduces order as much as possible. Return (newfunc, [operators]), where applying the operators to the hypergeometric function newfunc yields func. Examples ======== >>> from sympy.simplify.hyperexpand import reduce_order, Hyper_Function >>> reduce_order(Hyper_Function((1, 2), (3, 4))) (Hyper_Function((1, 2), (3, 4)), []) >>> reduce_order(Hyper_Function((1,), (1,))) (Hyper_Function((), ()), [<Reduce order by cancelling upper 1 with lower 1.>]) >>> reduce_order(Hyper_Function((2, 4), (3, 3))) (Hyper_Function((2,), (3,)), [<Reduce order by cancelling upper 4 with lower 3.>]) """ nap, nbq, operators = _reduce_order(func.ap, func.bq, ReduceOrder, default_sort_key) return Hyper_Function(Tuple(*nap), Tuple(*nbq)), operators def reduce_order_meijer(func): """ Given the Meijer G function parameters, ``func``, find a sequence of operators that reduces order as much as possible. Return newfunc, [operators]. Examples ======== >>> from sympy.simplify.hyperexpand import (reduce_order_meijer, ... G_Function) >>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 2]))[0] G_Function((4, 3), (5, 6), (3, 4), (2, 1)) >>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 8]))[0] G_Function((3,), (5, 6), (3, 4), (1,)) >>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [1, 5]))[0] G_Function((3,), (), (), (1,)) >>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [5, 3]))[0] G_Function((), (), (), ()) """ nan, nbq, ops1 = _reduce_order(func.an, func.bq, ReduceOrder.meijer_plus, lambda x: default_sort_key(-x)) nbm, nap, ops2 = _reduce_order(func.bm, func.ap, ReduceOrder.meijer_minus, default_sort_key) return G_Function(nan, nap, nbm, nbq), ops1 + ops2 def make_derivative_operator(M, z): """ Create a derivative operator, to be passed to Operator.apply. """ def doit(C): r = z*C.diff(z) + C*M r = r.applyfunc(make_simp(z)) return r return doit def apply_operators(obj, ops, op): """ Apply the list of operators ``ops`` to object ``obj``, substituting ``op`` for the generator. """ res = obj for o in reversed(ops): res = o.apply(res, op) return res def devise_plan(target, origin, z): """ Devise a plan (consisting of shift and un-shift operators) to be applied to the hypergeometric function ``target`` to yield ``origin``. Returns a list of operators. Examples ======== >>> from sympy.simplify.hyperexpand import devise_plan, Hyper_Function >>> from sympy.abc import z Nothing to do: >>> devise_plan(Hyper_Function((1, 2), ()), Hyper_Function((1, 2), ()), z) [] >>> devise_plan(Hyper_Function((), (1, 2)), Hyper_Function((), (1, 2)), z) [] Very simple plans: >>> devise_plan(Hyper_Function((2,), ()), Hyper_Function((1,), ()), z) [<Increment upper 1.>] >>> devise_plan(Hyper_Function((), (2,)), Hyper_Function((), (1,)), z) [<Increment lower index #0 of [], [1].>] Several buckets: >>> from sympy import S >>> devise_plan(Hyper_Function((1, S.Half), ()), ... Hyper_Function((2, S('3/2')), ()), z) #doctest: +NORMALIZE_WHITESPACE [<Decrement upper index #0 of [3/2, 1], [].>, <Decrement upper index #0 of [2, 3/2], [].>] A slightly more complicated plan: >>> devise_plan(Hyper_Function((1, 3), ()), Hyper_Function((2, 2), ()), z) [<Increment upper 2.>, <Decrement upper index #0 of [2, 2], [].>] Another more complicated plan: (note that the ap have to be shifted first!) >>> devise_plan(Hyper_Function((1, -1), (2,)), Hyper_Function((3, -2), (4,)), z) [<Decrement lower 3.>, <Decrement lower 4.>, <Decrement upper index #1 of [-1, 2], [4].>, <Decrement upper index #1 of [-1, 3], [4].>, <Increment upper -2.>] """ abuckets, bbuckets, nabuckets, nbbuckets = [sift(params, _mod1) for params in (target.ap, target.bq, origin.ap, origin.bq)] if len(list(abuckets.keys())) != len(list(nabuckets.keys())) or \ len(list(bbuckets.keys())) != len(list(nbbuckets.keys())): raise ValueError('%s not reachable from %s' % (target, origin)) ops = [] def do_shifts(fro, to, inc, dec): ops = [] for i in range(len(fro)): if to[i] - fro[i] > 0: sh = inc ch = 1 else: sh = dec ch = -1 while to[i] != fro[i]: ops += [sh(fro, i)] fro[i] += ch return ops def do_shifts_a(nal, nbk, al, aother, bother): """ Shift us from (nal, nbk) to (al, nbk). """ return do_shifts(nal, al, lambda p, i: ShiftA(p[i]), lambda p, i: UnShiftA(p + aother, nbk + bother, i, z)) def do_shifts_b(nal, nbk, bk, aother, bother): """ Shift us from (nal, nbk) to (nal, bk). """ return do_shifts(nbk, bk, lambda p, i: UnShiftB(nal + aother, p + bother, i, z), lambda p, i: ShiftB(p[i])) for r in sorted(list(abuckets.keys()) + list(bbuckets.keys()), key=default_sort_key): al = () nal = () bk = () nbk = () if r in abuckets: al = abuckets[r] nal = nabuckets[r] if r in bbuckets: bk = bbuckets[r] nbk = nbbuckets[r] if len(al) != len(nal) or len(bk) != len(nbk): raise ValueError('%s not reachable from %s' % (target, origin)) al, nal, bk, nbk = [sorted(list(w), key=default_sort_key) for w in [al, nal, bk, nbk]] def others(dic, key): l = [] for k, value in dic.items(): if k != key: l += list(dic[k]) return l aother = others(nabuckets, r) bother = others(nbbuckets, r) if len(al) == 0: # there can be no complications, just shift the bs as we please ops += do_shifts_b([], nbk, bk, aother, bother) elif len(bk) == 0: # there can be no complications, just shift the as as we please ops += do_shifts_a(nal, [], al, aother, bother) else: namax = nal[-1] amax = al[-1] if nbk[0] - namax <= 0 or bk[0] - amax <= 0: raise ValueError('Non-suitable parameters.') if namax - amax > 0: # we are going to shift down - first do the as, then the bs ops += do_shifts_a(nal, nbk, al, aother, bother) ops += do_shifts_b(al, nbk, bk, aother, bother) else: # we are going to shift up - first do the bs, then the as ops += do_shifts_b(nal, nbk, bk, aother, bother) ops += do_shifts_a(nal, bk, al, aother, bother) nabuckets[r] = al nbbuckets[r] = bk ops.reverse() return ops def try_shifted_sum(func, z): """ Try to recognise a hypergeometric sum that starts from k > 0. """ abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1) if len(abuckets[S.Zero]) != 1: return None r = abuckets[S.Zero][0] if r <= 0: return None if not S.Zero in bbuckets: return None l = list(bbuckets[S.Zero]) l.sort() k = l[0] if k <= 0: return None nap = list(func.ap) nap.remove(r) nbq = list(func.bq) nbq.remove(k) k -= 1 nap = [x - k for x in nap] nbq = [x - k for x in nbq] ops = [] for n in range(r - 1): ops.append(ShiftA(n + 1)) ops.reverse() fac = factorial(k)/z**k for a in nap: fac /= rf(a, k) for b in nbq: fac *= rf(b, k) ops += [MultOperator(fac)] p = 0 for n in range(k): m = z**n/factorial(n) for a in nap: m *= rf(a, n) for b in nbq: m /= rf(b, n) p += m return Hyper_Function(nap, nbq), ops, -p def try_polynomial(func, z): """ Recognise polynomial cases. Returns None if not such a case. Requires order to be fully reduced. """ abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1) a0 = abuckets[S.Zero] b0 = bbuckets[S.Zero] a0.sort() b0.sort() al0 = [x for x in a0 if x <= 0] bl0 = [x for x in b0 if x <= 0] if bl0 and all(a < bl0[-1] for a in al0): return oo if not al0: return None a = al0[-1] fac = 1 res = S.One for n in Tuple(*list(range(-a))): fac *= z fac /= n + 1 for a in func.ap: fac *= a + n for b in func.bq: fac /= b + n res += fac return res def try_lerchphi(func): """ Try to find an expression for Hyper_Function ``func`` in terms of Lerch Transcendents. Return None if no such expression can be found. """ # This is actually quite simple, and is described in Roach's paper, # section 18. # We don't need to implement the reduction to polylog here, this # is handled by expand_func. from sympy.matrices import Matrix, zeros from sympy.polys import apart # First we need to figure out if the summation coefficient is a rational # function of the summation index, and construct that rational function. abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1) paired = {} for key, value in abuckets.items(): if key != 0 and not key in bbuckets: return None bvalue = bbuckets[key] paired[key] = (list(value), list(bvalue)) bbuckets.pop(key, None) if bbuckets != {}: return None if not S.Zero in abuckets: return None aints, bints = paired[S.Zero] # Account for the additional n! in denominator paired[S.Zero] = (aints, bints + [1]) t = Dummy('t') numer = S.One denom = S.One for key, (avalue, bvalue) in paired.items(): if len(avalue) != len(bvalue): return None # Note that since order has been reduced fully, all the b are # bigger than all the a they differ from by an integer. In particular # if there are any negative b left, this function is not well-defined. for a, b in zip(avalue, bvalue): if (a - b).is_positive: k = a - b numer *= rf(b + t, k) denom *= rf(b, k) else: k = b - a numer *= rf(a, k) denom *= rf(a + t, k) # Now do a partial fraction decomposition. # We assemble two structures: a list monomials of pairs (a, b) representing # a*t**b (b a non-negative integer), and a dict terms, where # terms[a] = [(b, c)] means that there is a term b/(t-a)**c. part = apart(numer/denom, t) args = Add.make_args(part) monomials = [] terms = {} for arg in args: numer, denom = arg.as_numer_denom() if not denom.has(t): p = Poly(numer, t) if not p.is_monomial: raise TypeError("p should be monomial") ((b, ), a) = p.LT() monomials += [(a/denom, b)] continue if numer.has(t): raise NotImplementedError('Need partial fraction decomposition' ' with linear denominators') indep, [dep] = denom.as_coeff_mul(t) n = 1 if dep.is_Pow: n = dep.exp dep = dep.base if dep == t: a == 0 elif dep.is_Add: a, tmp = dep.as_independent(t) b = 1 if tmp != t: b, _ = tmp.as_independent(t) if dep != b*t + a: raise NotImplementedError('unrecognised form %s' % dep) a /= b indep *= b**n else: raise NotImplementedError('unrecognised form of partial fraction') terms.setdefault(a, []).append((numer/indep, n)) # Now that we have this information, assemble our formula. All the # monomials yield rational functions and go into one basis element. # The terms[a] are related by differentiation. If the largest exponent is # n, we need lerchphi(z, k, a) for k = 1, 2, ..., n. # deriv maps a basis to its derivative, expressed as a C(z)-linear # combination of other basis elements. deriv = {} coeffs = {} z = Dummy('z') monomials.sort(key=lambda x: x[1]) mon = {0: 1/(1 - z)} if monomials: for k in range(monomials[-1][1]): mon[k + 1] = z*mon[k].diff(z) for a, n in monomials: coeffs.setdefault(S.One, []).append(a*mon[n]) for a, l in terms.items(): for c, k in l: coeffs.setdefault(lerchphi(z, k, a), []).append(c) l.sort(key=lambda x: x[1]) for k in range(2, l[-1][1] + 1): deriv[lerchphi(z, k, a)] = [(-a, lerchphi(z, k, a)), (1, lerchphi(z, k - 1, a))] deriv[lerchphi(z, 1, a)] = [(-a, lerchphi(z, 1, a)), (1/(1 - z), S.One)] trans = {} for n, b in enumerate([S.One] + list(deriv.keys())): trans[b] = n basis = [expand_func(b) for (b, _) in sorted(list(trans.items()), key=lambda x:x[1])] B = Matrix(basis) C = Matrix([[0]*len(B)]) for b, c in coeffs.items(): C[trans[b]] = Add(*c) M = zeros(len(B)) for b, l in deriv.items(): for c, b2 in l: M[trans[b], trans[b2]] = c return Formula(func, z, None, [], B, C, M) def build_hypergeometric_formula(func): """ Create a formula object representing the hypergeometric function ``func``. """ # We know that no `ap` are negative integers, otherwise "detect poly" # would have kicked in. However, `ap` could be empty. In this case we can # use a different basis. # I'm not aware of a basis that works in all cases. from sympy import zeros, Matrix, eye z = Dummy('z') if func.ap: afactors = [_x + a for a in func.ap] bfactors = [_x + b - 1 for b in func.bq] expr = _x*Mul(*bfactors) - z*Mul(*afactors) poly = Poly(expr, _x) n = poly.degree() basis = [] M = zeros(n) for k in range(n): a = func.ap[0] + k basis += [hyper([a] + list(func.ap[1:]), func.bq, z)] if k < n - 1: M[k, k] = -a M[k, k + 1] = a B = Matrix(basis) C = Matrix([[1] + [0]*(n - 1)]) derivs = [eye(n)] for k in range(n): derivs.append(M*derivs[k]) l = poly.all_coeffs() l.reverse() res = [0]*n for k, c in enumerate(l): for r, d in enumerate(C*derivs[k]): res[r] += c*d for k, c in enumerate(res): M[n - 1, k] = -c/derivs[n - 1][0, n - 1]/poly.all_coeffs()[0] return Formula(func, z, None, [], B, C, M) else: # Since there are no `ap`, none of the `bq` can be non-positive # integers. basis = [] bq = list(func.bq[:]) for i in range(len(bq)): basis += [hyper([], bq, z)] bq[i] += 1 basis += [hyper([], bq, z)] B = Matrix(basis) n = len(B) C = Matrix([[1] + [0]*(n - 1)]) M = zeros(n) M[0, n - 1] = z/Mul(*func.bq) for k in range(1, n): M[k, k - 1] = func.bq[k - 1] M[k, k] = -func.bq[k - 1] return Formula(func, z, None, [], B, C, M) def hyperexpand_special(ap, bq, z): """ Try to find a closed-form expression for hyper(ap, bq, z), where ``z`` is supposed to be a "special" value, e.g. 1. This function tries various of the classical summation formulae (Gauss, Saalschuetz, etc). """ # This code is very ad-hoc. There are many clever algorithms # (notably Zeilberger's) related to this problem. # For now we just want a few simple cases to work. p, q = len(ap), len(bq) z_ = z z = unpolarify(z) if z == 0: return S.One if p == 2 and q == 1: # 2F1 a, b, c = ap + bq if z == 1: # Gauss return gamma(c - a - b)*gamma(c)/gamma(c - a)/gamma(c - b) if z == -1 and simplify(b - a + c) == 1: b, a = a, b if z == -1 and simplify(a - b + c) == 1: # Kummer if b.is_integer and b.is_negative: return 2*cos(pi*b/2)*gamma(-b)*gamma(b - a + 1) \ /gamma(-b/2)/gamma(b/2 - a + 1) else: return gamma(b/2 + 1)*gamma(b - a + 1) \ /gamma(b + 1)/gamma(b/2 - a + 1) # TODO tons of more formulae # investigate what algorithms exist return hyper(ap, bq, z_) _collection = None def _hyperexpand(func, z, ops0=[], z0=Dummy('z0'), premult=1, prem=0, rewrite='default'): """ Try to find an expression for the hypergeometric function ``func``. The result is expressed in terms of a dummy variable z0. Then it is multiplied by premult. Then ops0 is applied. premult must be a*z**prem for some a independent of z. """ if z.is_zero: return S.One z = polarify(z, subs=False) if rewrite == 'default': rewrite = 'nonrepsmall' def carryout_plan(f, ops): C = apply_operators(f.C.subs(f.z, z0), ops, make_derivative_operator(f.M.subs(f.z, z0), z0)) from sympy import eye C = apply_operators(C, ops0, make_derivative_operator(f.M.subs(f.z, z0) + prem*eye(f.M.shape[0]), z0)) if premult == 1: C = C.applyfunc(make_simp(z0)) r = reduce(lambda s,m: s+m[0]*m[1], zip(C, f.B.subs(f.z, z0)), S.Zero)*premult res = r.subs(z0, z) if rewrite: res = res.rewrite(rewrite) return res # TODO # The following would be possible: # *) PFD Duplication (see Kelly Roach's paper) # *) In a similar spirit, try_lerchphi() can be generalised considerably. global _collection if _collection is None: _collection = FormulaCollection() debug('Trying to expand hypergeometric function ', func) # First reduce order as much as possible. func, ops = reduce_order(func) if ops: debug(' Reduced order to ', func) else: debug(' Could not reduce order.') # Now try polynomial cases res = try_polynomial(func, z0) if res is not None: debug(' Recognised polynomial.') p = apply_operators(res, ops, lambda f: z0*f.diff(z0)) p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0)) return unpolarify(simplify(p).subs(z0, z)) # Try to recognise a shifted sum. p = S.Zero res = try_shifted_sum(func, z0) if res is not None: func, nops, p = res debug(' Recognised shifted sum, reduced order to ', func) ops += nops # apply the plan for poly p = apply_operators(p, ops, lambda f: z0*f.diff(z0)) p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0)) p = simplify(p).subs(z0, z) # Try special expansions early. if unpolarify(z) in [1, -1] and (len(func.ap), len(func.bq)) == (2, 1): f = build_hypergeometric_formula(func) r = carryout_plan(f, ops).replace(hyper, hyperexpand_special) if not r.has(hyper): return r + p # Try to find a formula in our collection formula = _collection.lookup_origin(func) # Now try a lerch phi formula if formula is None: formula = try_lerchphi(func) if formula is None: debug(' Could not find an origin. ', 'Will return answer in terms of ' 'simpler hypergeometric functions.') formula = build_hypergeometric_formula(func) debug(' Found an origin: ', formula.closed_form, ' ', formula.func) # We need to find the operators that convert formula into func. ops += devise_plan(func, formula.func, z0) # Now carry out the plan. r = carryout_plan(formula, ops) + p return powdenest(r, polar=True).replace(hyper, hyperexpand_special) def devise_plan_meijer(fro, to, z): """ Find operators to convert G-function ``fro`` into G-function ``to``. It is assumed that fro and to have the same signatures, and that in fact any corresponding pair of parameters differs by integers, and a direct path is possible. I.e. if there are parameters a1 b1 c1 and a2 b2 c2 it is assumed that a1 can be shifted to a2, etc. The only thing this routine determines is the order of shifts to apply, nothing clever will be tried. It is also assumed that fro is suitable. Examples ======== >>> from sympy.simplify.hyperexpand import (devise_plan_meijer, ... G_Function) >>> from sympy.abc import z Empty plan: >>> devise_plan_meijer(G_Function([1], [2], [3], [4]), ... G_Function([1], [2], [3], [4]), z) [] Very simple plans: >>> devise_plan_meijer(G_Function([0], [], [], []), ... G_Function([1], [], [], []), z) [<Increment upper a index #0 of [0], [], [], [].>] >>> devise_plan_meijer(G_Function([0], [], [], []), ... G_Function([-1], [], [], []), z) [<Decrement upper a=0.>] >>> devise_plan_meijer(G_Function([], [1], [], []), ... G_Function([], [2], [], []), z) [<Increment lower a index #0 of [], [1], [], [].>] Slightly more complicated plans: >>> devise_plan_meijer(G_Function([0], [], [], []), ... G_Function([2], [], [], []), z) [<Increment upper a index #0 of [1], [], [], [].>, <Increment upper a index #0 of [0], [], [], [].>] >>> devise_plan_meijer(G_Function([0], [], [0], []), ... G_Function([-1], [], [1], []), z) [<Increment upper b=0.>, <Decrement upper a=0.>] Order matters: >>> devise_plan_meijer(G_Function([0], [], [0], []), ... G_Function([1], [], [1], []), z) [<Increment upper a index #0 of [0], [], [1], [].>, <Increment upper b=0.>] """ # TODO for now, we use the following simple heuristic: inverse-shift # when possible, shift otherwise. Give up if we cannot make progress. def try_shift(f, t, shifter, diff, counter): """ Try to apply ``shifter`` in order to bring some element in ``f`` nearer to its counterpart in ``to``. ``diff`` is +/- 1 and determines the effect of ``shifter``. Counter is a list of elements blocking the shift. Return an operator if change was possible, else None. """ for idx, (a, b) in enumerate(zip(f, t)): if ( (a - b).is_integer and (b - a)/diff > 0 and all(a != x for x in counter)): sh = shifter(idx) f[idx] += diff return sh fan = list(fro.an) fap = list(fro.ap) fbm = list(fro.bm) fbq = list(fro.bq) ops = [] change = True while change: change = False op = try_shift(fan, to.an, lambda i: MeijerUnShiftB(fan, fap, fbm, fbq, i, z), 1, fbm + fbq) if op is not None: ops += [op] change = True continue op = try_shift(fap, to.ap, lambda i: MeijerUnShiftD(fan, fap, fbm, fbq, i, z), 1, fbm + fbq) if op is not None: ops += [op] change = True continue op = try_shift(fbm, to.bm, lambda i: MeijerUnShiftA(fan, fap, fbm, fbq, i, z), -1, fan + fap) if op is not None: ops += [op] change = True continue op = try_shift(fbq, to.bq, lambda i: MeijerUnShiftC(fan, fap, fbm, fbq, i, z), -1, fan + fap) if op is not None: ops += [op] change = True continue op = try_shift(fan, to.an, lambda i: MeijerShiftB(fan[i]), -1, []) if op is not None: ops += [op] change = True continue op = try_shift(fap, to.ap, lambda i: MeijerShiftD(fap[i]), -1, []) if op is not None: ops += [op] change = True continue op = try_shift(fbm, to.bm, lambda i: MeijerShiftA(fbm[i]), 1, []) if op is not None: ops += [op] change = True continue op = try_shift(fbq, to.bq, lambda i: MeijerShiftC(fbq[i]), 1, []) if op is not None: ops += [op] change = True continue if fan != list(to.an) or fap != list(to.ap) or fbm != list(to.bm) or \ fbq != list(to.bq): raise NotImplementedError('Could not devise plan.') ops.reverse() return ops _meijercollection = None def _meijergexpand(func, z0, allow_hyper=False, rewrite='default', place=None): """ Try to find an expression for the Meijer G function specified by the G_Function ``func``. If ``allow_hyper`` is True, then returning an expression in terms of hypergeometric functions is allowed. Currently this just does Slater's theorem. If expansions exist both at zero and at infinity, ``place`` can be set to ``0`` or ``zoo`` for the preferred choice. """ global _meijercollection if _meijercollection is None: _meijercollection = MeijerFormulaCollection() if rewrite == 'default': rewrite = None func0 = func debug('Try to expand Meijer G function corresponding to ', func) # We will play games with analytic continuation - rather use a fresh symbol z = Dummy('z') func, ops = reduce_order_meijer(func) if ops: debug(' Reduced order to ', func) else: debug(' Could not reduce order.') # Try to find a direct formula f = _meijercollection.lookup_origin(func) if f is not None: debug(' Found a Meijer G formula: ', f.func) ops += devise_plan_meijer(f.func, func, z) # Now carry out the plan. C = apply_operators(f.C.subs(f.z, z), ops, make_derivative_operator(f.M.subs(f.z, z), z)) C = C.applyfunc(make_simp(z)) r = C*f.B.subs(f.z, z) r = r[0].subs(z, z0) return powdenest(r, polar=True) debug(" Could not find a direct formula. Trying Slater's theorem.") # TODO the following would be possible: # *) Paired Index Theorems # *) PFD Duplication # (See Kelly Roach's paper for details on either.) # # TODO Also, we tend to create combinations of gamma functions that can be # simplified. def can_do(pbm, pap): """ Test if slater applies. """ for i in pbm: if len(pbm[i]) > 1: l = 0 if i in pap: l = len(pap[i]) if l + 1 < len(pbm[i]): return False return True def do_slater(an, bm, ap, bq, z, zfinal): # zfinal is the value that will eventually be substituted for z. # We pass it to _hyperexpand to improve performance. func = G_Function(an, bm, ap, bq) _, pbm, pap, _ = func.compute_buckets() if not can_do(pbm, pap): return S.Zero, False cond = len(an) + len(ap) < len(bm) + len(bq) if len(an) + len(ap) == len(bm) + len(bq): cond = abs(z) < 1 if cond is False: return S.Zero, False res = S.Zero for m in pbm: if len(pbm[m]) == 1: bh = pbm[m][0] fac = 1 bo = list(bm) bo.remove(bh) for bj in bo: fac *= gamma(bj - bh) for aj in an: fac *= gamma(1 + bh - aj) for bj in bq: fac /= gamma(1 + bh - bj) for aj in ap: fac /= gamma(aj - bh) nap = [1 + bh - a for a in list(an) + list(ap)] nbq = [1 + bh - b for b in list(bo) + list(bq)] k = polar_lift(S.NegativeOne**(len(ap) - len(bm))) harg = k*zfinal # NOTE even though k "is" +-1, this has to be t/k instead of # t*k ... we are using polar numbers for consistency! premult = (t/k)**bh hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops, t, premult, bh, rewrite=None) res += fac * hyp else: b_ = pbm[m][0] ki = [bi - b_ for bi in pbm[m][1:]] u = len(ki) li = [ai - b_ for ai in pap[m][:u + 1]] bo = list(bm) for b in pbm[m]: bo.remove(b) ao = list(ap) for a in pap[m][:u]: ao.remove(a) lu = li[-1] di = [l - k for (l, k) in zip(li, ki)] # We first work out the integrand: s = Dummy('s') integrand = z**s for b in bm: if not Mod(b, 1) and b.is_Number: b = int(round(b)) integrand *= gamma(b - s) for a in an: integrand *= gamma(1 - a + s) for b in bq: integrand /= gamma(1 - b + s) for a in ap: integrand /= gamma(a - s) # Now sum the finitely many residues: # XXX This speeds up some cases - is it a good idea? integrand = expand_func(integrand) for r in range(int(round(lu))): resid = residue(integrand, s, b_ + r) resid = apply_operators(resid, ops, lambda f: z*f.diff(z)) res -= resid # Now the hypergeometric term. au = b_ + lu k = polar_lift(S.NegativeOne**(len(ao) + len(bo) + 1)) harg = k*zfinal premult = (t/k)**au nap = [1 + au - a for a in list(an) + list(ap)] + [1] nbq = [1 + au - b for b in list(bm) + list(bq)] hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops, t, premult, au, rewrite=None) C = S.NegativeOne**(lu)/factorial(lu) for i in range(u): C *= S.NegativeOne**di[i]/rf(lu - li[i] + 1, di[i]) for a in an: C *= gamma(1 - a + au) for b in bo: C *= gamma(b - au) for a in ao: C /= gamma(a - au) for b in bq: C /= gamma(1 - b + au) res += C*hyp return res, cond t = Dummy('t') slater1, cond1 = do_slater(func.an, func.bm, func.ap, func.bq, z, z0) def tr(l): return [1 - x for x in l] for op in ops: op._poly = Poly(op._poly.subs({z: 1/t, _x: -_x}), _x) slater2, cond2 = do_slater(tr(func.bm), tr(func.an), tr(func.bq), tr(func.ap), t, 1/z0) slater1 = powdenest(slater1.subs(z, z0), polar=True) slater2 = powdenest(slater2.subs(t, 1/z0), polar=True) if not isinstance(cond2, bool): cond2 = cond2.subs(t, 1/z) m = func(z) if m.delta > 0 or \ (m.delta == 0 and len(m.ap) == len(m.bq) and (re(m.nu) < -1) is not False and polar_lift(z0) == polar_lift(1)): # The condition delta > 0 means that the convergence region is # connected. Any expression we find can be continued analytically # to the entire convergence region. # The conditions delta==0, p==q, re(nu) < -1 imply that G is continuous # on the positive reals, so the values at z=1 agree. if cond1 is not False: cond1 = True if cond2 is not False: cond2 = True if cond1 is True: slater1 = slater1.rewrite(rewrite or 'nonrep') else: slater1 = slater1.rewrite(rewrite or 'nonrepsmall') if cond2 is True: slater2 = slater2.rewrite(rewrite or 'nonrep') else: slater2 = slater2.rewrite(rewrite or 'nonrepsmall') if cond1 is not False and cond2 is not False: # If one condition is False, there is no choice. if place == 0: cond2 = False if place == zoo: cond1 = False if not isinstance(cond1, bool): cond1 = cond1.subs(z, z0) if not isinstance(cond2, bool): cond2 = cond2.subs(z, z0) def weight(expr, cond): if cond is True: c0 = 0 elif cond is False: c0 = 1 else: c0 = 2 if expr.has(oo, zoo, -oo, nan): # XXX this actually should not happen, but consider # S('meijerg(((0, -1/2, 0, -1/2, 1/2), ()), ((0,), # (-1/2, -1/2, -1/2, -1)), exp_polar(I*pi))/4') c0 = 3 return (c0, expr.count(hyper), expr.count_ops()) w1 = weight(slater1, cond1) w2 = weight(slater2, cond2) if min(w1, w2) <= (0, 1, oo): if w1 < w2: return slater1 else: return slater2 if max(w1[0], w2[0]) <= 1 and max(w1[1], w2[1]) <= 1: return Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True)) # We couldn't find an expression without hypergeometric functions. # TODO it would be helpful to give conditions under which the integral # is known to diverge. r = Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True)) if r.has(hyper) and not allow_hyper: debug(' Could express using hypergeometric functions, ' 'but not allowed.') if not r.has(hyper) or allow_hyper: return r return func0(z0) def hyperexpand(f, allow_hyper=False, rewrite='default', place=None): """ Expand hypergeometric functions. If allow_hyper is True, allow partial simplification (that is a result different from input, but still containing hypergeometric functions). If a G-function has expansions both at zero and at infinity, ``place`` can be set to ``0`` or ``zoo`` to indicate the preferred choice. Examples ======== >>> from sympy.simplify.hyperexpand import hyperexpand >>> from sympy.functions import hyper >>> from sympy.abc import z >>> hyperexpand(hyper([], [], z)) exp(z) Non-hyperegeometric parts of the expression and hypergeometric expressions that are not recognised are left unchanged: >>> hyperexpand(1 + hyper([1, 1, 1], [], z)) hyper((1, 1, 1), (), z) + 1 """ f = sympify(f) def do_replace(ap, bq, z): r = _hyperexpand(Hyper_Function(ap, bq), z, rewrite=rewrite) if r is None: return hyper(ap, bq, z) else: return r def do_meijer(ap, bq, z): r = _meijergexpand(G_Function(ap[0], ap[1], bq[0], bq[1]), z, allow_hyper, rewrite=rewrite, place=place) if not r.has(nan, zoo, oo, -oo): return r return f.replace(hyper, do_replace).replace(meijerg, do_meijer)
6fbe20c0ed2db7488a6f8ade3858604cfb6bcd792e314961a3c089c90004b5e2
from __future__ import print_function, division from collections import defaultdict from sympy.core import (Basic, S, Add, Mul, Pow, Symbol, sympify, expand_func, Function, Dummy, Expr, factor_terms, expand_power_exp, Eq) from sympy.core.compatibility import iterable, ordered, as_int from sympy.core.parameters import global_parameters from sympy.core.function import (expand_log, count_ops, _mexpand, _coeff_isneg, nfloat, expand_mul) from sympy.core.numbers import Float, I, pi, Rational, Integer from sympy.core.relational import Relational from sympy.core.rules import Transform from sympy.core.sympify import _sympify from sympy.functions import gamma, exp, sqrt, log, exp_polar, re from sympy.functions.combinatorial.factorials import CombinatorialFunction from sympy.functions.elementary.complexes import unpolarify from sympy.functions.elementary.exponential import ExpBase from sympy.functions.elementary.hyperbolic import HyperbolicFunction from sympy.functions.elementary.integers import ceiling from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold from sympy.functions.elementary.trigonometric import TrigonometricFunction from sympy.functions.special.bessel import besselj, besseli, besselk, jn, bessely from sympy.functions.special.tensor_functions import KroneckerDelta from sympy.polys import together, cancel, factor from sympy.simplify.combsimp import combsimp from sympy.simplify.cse_opts import sub_pre, sub_post from sympy.simplify.powsimp import powsimp from sympy.simplify.radsimp import radsimp, fraction, collect_abs from sympy.simplify.sqrtdenest import sqrtdenest from sympy.simplify.trigsimp import trigsimp, exptrigsimp from sympy.utilities.iterables import has_variety, sift import mpmath def separatevars(expr, symbols=[], dict=False, force=False): """ Separates variables in an expression, if possible. By default, it separates with respect to all symbols in an expression and collects constant coefficients that are independent of symbols. If dict=True then the separated terms will be returned in a dictionary keyed to their corresponding symbols. By default, all symbols in the expression will appear as keys; if symbols are provided, then all those symbols will be used as keys, and any terms in the expression containing other symbols or non-symbols will be returned keyed to the string 'coeff'. (Passing None for symbols will return the expression in a dictionary keyed to 'coeff'.) If force=True, then bases of powers will be separated regardless of assumptions on the symbols involved. Notes ===== The order of the factors is determined by Mul, so that the separated expressions may not necessarily be grouped together. Although factoring is necessary to separate variables in some expressions, it is not necessary in all cases, so one should not count on the returned factors being factored. Examples ======== >>> from sympy.abc import x, y, z, alpha >>> from sympy import separatevars, sin >>> separatevars((x*y)**y) (x*y)**y >>> separatevars((x*y)**y, force=True) x**y*y**y >>> e = 2*x**2*z*sin(y)+2*z*x**2 >>> separatevars(e) 2*x**2*z*(sin(y) + 1) >>> separatevars(e, symbols=(x, y), dict=True) {'coeff': 2*z, x: x**2, y: sin(y) + 1} >>> separatevars(e, [x, y, alpha], dict=True) {'coeff': 2*z, alpha: 1, x: x**2, y: sin(y) + 1} If the expression is not really separable, or is only partially separable, separatevars will do the best it can to separate it by using factoring. >>> separatevars(x + x*y - 3*x**2) -x*(3*x - y - 1) If the expression is not separable then expr is returned unchanged or (if dict=True) then None is returned. >>> eq = 2*x + y*sin(x) >>> separatevars(eq) == eq True >>> separatevars(2*x + y*sin(x), symbols=(x, y), dict=True) is None True """ expr = sympify(expr) if dict: return _separatevars_dict(_separatevars(expr, force), symbols) else: return _separatevars(expr, force) def _separatevars(expr, force): from sympy.functions.elementary.complexes import Abs if isinstance(expr, Abs): arg = expr.args[0] if arg.is_Mul and not arg.is_number: s = separatevars(arg, dict=True, force=force) if s is not None: return Mul(*map(expr.func, s.values())) else: return expr if len(expr.free_symbols) < 2: return expr # don't destroy a Mul since much of the work may already be done if expr.is_Mul: args = list(expr.args) changed = False for i, a in enumerate(args): args[i] = separatevars(a, force) changed = changed or args[i] != a if changed: expr = expr.func(*args) return expr # get a Pow ready for expansion if expr.is_Pow: expr = Pow(separatevars(expr.base, force=force), expr.exp) # First try other expansion methods expr = expr.expand(mul=False, multinomial=False, force=force) _expr, reps = posify(expr) if force else (expr, {}) expr = factor(_expr).subs(reps) if not expr.is_Add: return expr # Find any common coefficients to pull out args = list(expr.args) commonc = args[0].args_cnc(cset=True, warn=False)[0] for i in args[1:]: commonc &= i.args_cnc(cset=True, warn=False)[0] commonc = Mul(*commonc) commonc = commonc.as_coeff_Mul()[1] # ignore constants commonc_set = commonc.args_cnc(cset=True, warn=False)[0] # remove them for i, a in enumerate(args): c, nc = a.args_cnc(cset=True, warn=False) c = c - commonc_set args[i] = Mul(*c)*Mul(*nc) nonsepar = Add(*args) if len(nonsepar.free_symbols) > 1: _expr = nonsepar _expr, reps = posify(_expr) if force else (_expr, {}) _expr = (factor(_expr)).subs(reps) if not _expr.is_Add: nonsepar = _expr return commonc*nonsepar def _separatevars_dict(expr, symbols): if symbols: if not all((t.is_Atom for t in symbols)): raise ValueError("symbols must be Atoms.") symbols = list(symbols) elif symbols is None: return {'coeff': expr} else: symbols = list(expr.free_symbols) if not symbols: return None ret = dict(((i, []) for i in symbols + ['coeff'])) for i in Mul.make_args(expr): expsym = i.free_symbols intersection = set(symbols).intersection(expsym) if len(intersection) > 1: return None if len(intersection) == 0: # There are no symbols, so it is part of the coefficient ret['coeff'].append(i) else: ret[intersection.pop()].append(i) # rebuild for k, v in ret.items(): ret[k] = Mul(*v) return ret def _is_sum_surds(p): args = p.args if p.is_Add else [p] for y in args: if not ((y**2).is_Rational and y.is_extended_real): return False return True def posify(eq): """Return eq (with generic symbols made positive) and a dictionary containing the mapping between the old and new symbols. Any symbol that has positive=None will be replaced with a positive dummy symbol having the same name. This replacement will allow more symbolic processing of expressions, especially those involving powers and logarithms. A dictionary that can be sent to subs to restore eq to its original symbols is also returned. >>> from sympy import posify, Symbol, log, solve >>> from sympy.abc import x >>> posify(x + Symbol('p', positive=True) + Symbol('n', negative=True)) (_x + n + p, {_x: x}) >>> eq = 1/x >>> log(eq).expand() log(1/x) >>> log(posify(eq)[0]).expand() -log(_x) >>> p, rep = posify(eq) >>> log(p).expand().subs(rep) -log(x) It is possible to apply the same transformations to an iterable of expressions: >>> eq = x**2 - 4 >>> solve(eq, x) [-2, 2] >>> eq_x, reps = posify([eq, x]); eq_x [_x**2 - 4, _x] >>> solve(*eq_x) [2] """ eq = sympify(eq) if iterable(eq): f = type(eq) eq = list(eq) syms = set() for e in eq: syms = syms.union(e.atoms(Symbol)) reps = {} for s in syms: reps.update(dict((v, k) for k, v in posify(s)[1].items())) for i, e in enumerate(eq): eq[i] = e.subs(reps) return f(eq), {r: s for s, r in reps.items()} reps = {s: Dummy(s.name, positive=True, **s.assumptions0) for s in eq.free_symbols if s.is_positive is None} eq = eq.subs(reps) return eq, {r: s for s, r in reps.items()} def hypersimp(f, k): """Given combinatorial term f(k) simplify its consecutive term ratio i.e. f(k+1)/f(k). The input term can be composed of functions and integer sequences which have equivalent representation in terms of gamma special function. The algorithm performs three basic steps: 1. Rewrite all functions in terms of gamma, if possible. 2. Rewrite all occurrences of gamma in terms of products of gamma and rising factorial with integer, absolute constant exponent. 3. Perform simplification of nested fractions, powers and if the resulting expression is a quotient of polynomials, reduce their total degree. If f(k) is hypergeometric then as result we arrive with a quotient of polynomials of minimal degree. Otherwise None is returned. For more information on the implemented algorithm refer to: 1. W. Koepf, Algorithms for m-fold Hypergeometric Summation, Journal of Symbolic Computation (1995) 20, 399-417 """ f = sympify(f) g = f.subs(k, k + 1) / f g = g.rewrite(gamma) g = expand_func(g) g = powsimp(g, deep=True, combine='exp') if g.is_rational_function(k): return simplify(g, ratio=S.Infinity) else: return None def hypersimilar(f, g, k): """Returns True if 'f' and 'g' are hyper-similar. Similarity in hypergeometric sense means that a quotient of f(k) and g(k) is a rational function in k. This procedure is useful in solving recurrence relations. For more information see hypersimp(). """ f, g = list(map(sympify, (f, g))) h = (f/g).rewrite(gamma) h = h.expand(func=True, basic=False) return h.is_rational_function(k) def signsimp(expr, evaluate=None): """Make all Add sub-expressions canonical wrt sign. If an Add subexpression, ``a``, can have a sign extracted, as determined by could_extract_minus_sign, it is replaced with Mul(-1, a, evaluate=False). This allows signs to be extracted from powers and products. Examples ======== >>> from sympy import signsimp, exp, symbols >>> from sympy.abc import x, y >>> i = symbols('i', odd=True) >>> n = -1 + 1/x >>> n/x/(-n)**2 - 1/n/x (-1 + 1/x)/(x*(1 - 1/x)**2) - 1/(x*(-1 + 1/x)) >>> signsimp(_) 0 >>> x*n + x*-n x*(-1 + 1/x) + x*(1 - 1/x) >>> signsimp(_) 0 Since powers automatically handle leading signs >>> (-2)**i -2**i signsimp can be used to put the base of a power with an integer exponent into canonical form: >>> n**i (-1 + 1/x)**i By default, signsimp doesn't leave behind any hollow simplification: if making an Add canonical wrt sign didn't change the expression, the original Add is restored. If this is not desired then the keyword ``evaluate`` can be set to False: >>> e = exp(y - x) >>> signsimp(e) == e True >>> signsimp(e, evaluate=False) exp(-(x - y)) """ if evaluate is None: evaluate = global_parameters.evaluate expr = sympify(expr) if not isinstance(expr, (Expr, Relational)) or expr.is_Atom: return expr e = sub_post(sub_pre(expr)) if not isinstance(e, (Expr, Relational)) or e.is_Atom: return e if e.is_Add: return e.func(*[signsimp(a, evaluate) for a in e.args]) if evaluate: e = e.xreplace({m: -(-m) for m in e.atoms(Mul) if -(-m) != m}) return e def simplify(expr, ratio=1.7, measure=count_ops, rational=False, inverse=False, doit=True, **kwargs): """Simplifies the given expression. Simplification is not a well defined term and the exact strategies this function tries can change in the future versions of SymPy. If your algorithm relies on "simplification" (whatever it is), try to determine what you need exactly - is it powsimp()?, radsimp()?, together()?, logcombine()?, or something else? And use this particular function directly, because those are well defined and thus your algorithm will be robust. Nonetheless, especially for interactive use, or when you don't know anything about the structure of the expression, simplify() tries to apply intelligent heuristics to make the input expression "simpler". For example: >>> from sympy import simplify, cos, sin >>> from sympy.abc import x, y >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2) >>> a (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2) >>> simplify(a) x + 1 Note that we could have obtained the same result by using specific simplification functions: >>> from sympy import trigsimp, cancel >>> trigsimp(a) (x**2 + x)/x >>> cancel(_) x + 1 In some cases, applying :func:`simplify` may actually result in some more complicated expression. The default ``ratio=1.7`` prevents more extreme cases: if (result length)/(input length) > ratio, then input is returned unmodified. The ``measure`` parameter lets you specify the function used to determine how complex an expression is. The function should take a single argument as an expression and return a number such that if expression ``a`` is more complex than expression ``b``, then ``measure(a) > measure(b)``. The default measure function is :func:`~.count_ops`, which returns the total number of operations in the expression. For example, if ``ratio=1``, ``simplify`` output can't be longer than input. :: >>> from sympy import sqrt, simplify, count_ops, oo >>> root = 1/(sqrt(2)+3) Since ``simplify(root)`` would result in a slightly longer expression, root is returned unchanged instead:: >>> simplify(root, ratio=1) == root True If ``ratio=oo``, simplify will be applied anyway:: >>> count_ops(simplify(root, ratio=oo)) > count_ops(root) True Note that the shortest expression is not necessary the simplest, so setting ``ratio`` to 1 may not be a good idea. Heuristically, the default value ``ratio=1.7`` seems like a reasonable choice. You can easily define your own measure function based on what you feel should represent the "size" or "complexity" of the input expression. Note that some choices, such as ``lambda expr: len(str(expr))`` may appear to be good metrics, but have other problems (in this case, the measure function may slow down simplify too much for very large expressions). If you don't know what a good metric would be, the default, ``count_ops``, is a good one. For example: >>> from sympy import symbols, log >>> a, b = symbols('a b', positive=True) >>> g = log(a) + log(b) + log(a)*log(1/b) >>> h = simplify(g) >>> h log(a*b**(1 - log(a))) >>> count_ops(g) 8 >>> count_ops(h) 5 So you can see that ``h`` is simpler than ``g`` using the count_ops metric. However, we may not like how ``simplify`` (in this case, using ``logcombine``) has created the ``b**(log(1/a) + 1)`` term. A simple way to reduce this would be to give more weight to powers as operations in ``count_ops``. We can do this by using the ``visual=True`` option: >>> print(count_ops(g, visual=True)) 2*ADD + DIV + 4*LOG + MUL >>> print(count_ops(h, visual=True)) 2*LOG + MUL + POW + SUB >>> from sympy import Symbol, S >>> def my_measure(expr): ... POW = Symbol('POW') ... # Discourage powers by giving POW a weight of 10 ... count = count_ops(expr, visual=True).subs(POW, 10) ... # Every other operation gets a weight of 1 (the default) ... count = count.replace(Symbol, type(S.One)) ... return count >>> my_measure(g) 8 >>> my_measure(h) 14 >>> 15./8 > 1.7 # 1.7 is the default ratio True >>> simplify(g, measure=my_measure) -log(a)*log(b) + log(a) + log(b) Note that because ``simplify()`` internally tries many different simplification strategies and then compares them using the measure function, we get a completely different result that is still different from the input expression by doing this. If rational=True, Floats will be recast as Rationals before simplification. If rational=None, Floats will be recast as Rationals but the result will be recast as Floats. If rational=False(default) then nothing will be done to the Floats. If inverse=True, it will be assumed that a composition of inverse functions, such as sin and asin, can be cancelled in any order. For example, ``asin(sin(x))`` will yield ``x`` without checking whether x belongs to the set where this relation is true. The default is False. Note that ``simplify()`` automatically calls ``doit()`` on the final expression. You can avoid this behavior by passing ``doit=False`` as an argument. """ def shorter(*choices): """ Return the choice that has the fewest ops. In case of a tie, the expression listed first is selected. """ if not has_variety(choices): return choices[0] return min(choices, key=measure) def done(e): rv = e.doit() if doit else e return shorter(rv, collect_abs(rv)) expr = sympify(expr) kwargs = dict( ratio=kwargs.get('ratio', ratio), measure=kwargs.get('measure', measure), rational=kwargs.get('rational', rational), inverse=kwargs.get('inverse', inverse), doit=kwargs.get('doit', doit)) # no routine for Expr needs to check for is_zero if isinstance(expr, Expr) and expr.is_zero and expr*0 == S.Zero: return S.Zero _eval_simplify = getattr(expr, '_eval_simplify', None) if _eval_simplify is not None: return _eval_simplify(**kwargs) original_expr = expr = collect_abs(signsimp(expr)) if not isinstance(expr, Basic) or not expr.args: # XXX: temporary hack return expr if inverse and expr.has(Function): expr = inversecombine(expr) if not expr.args: # simplified to atomic return expr # do deep simplification handled = Add, Mul, Pow, ExpBase expr = expr.replace( # here, checking for x.args is not enough because Basic has # args but Basic does not always play well with replace, e.g. # when simultaneous is True found expressions will be masked # off with a Dummy but not all Basic objects in an expression # can be replaced with a Dummy lambda x: isinstance(x, Expr) and x.args and not isinstance( x, handled), lambda x: x.func(*[simplify(i, **kwargs) for i in x.args]), simultaneous=False) if not isinstance(expr, handled): return done(expr) if not expr.is_commutative: expr = nc_simplify(expr) # TODO: Apply different strategies, considering expression pattern: # is it a purely rational function? Is there any trigonometric function?... # See also https://github.com/sympy/sympy/pull/185. # rationalize Floats floats = False if rational is not False and expr.has(Float): floats = True expr = nsimplify(expr, rational=True) expr = bottom_up(expr, lambda w: getattr(w, 'normal', lambda: w)()) expr = Mul(*powsimp(expr).as_content_primitive()) _e = cancel(expr) expr1 = shorter(_e, _mexpand(_e).cancel()) # issue 6829 expr2 = shorter(together(expr, deep=True), together(expr1, deep=True)) if ratio is S.Infinity: expr = expr2 else: expr = shorter(expr2, expr1, expr) if not isinstance(expr, Basic): # XXX: temporary hack return expr expr = factor_terms(expr, sign=False) from sympy.simplify.hyperexpand import hyperexpand from sympy.functions.special.bessel import BesselBase from sympy import Sum, Product, Integral # Deal with Piecewise separately to avoid recursive growth of expressions if expr.has(Piecewise): # Fold into a single Piecewise expr = piecewise_fold(expr) # Apply doit, if doit=True expr = done(expr) # Still a Piecewise? if expr.has(Piecewise): # Fold into a single Piecewise, in case doit lead to some # expressions being Piecewise expr = piecewise_fold(expr) # kroneckersimp also affects Piecewise if expr.has(KroneckerDelta): expr = kroneckersimp(expr) # Still a Piecewise? if expr.has(Piecewise): from sympy.functions.elementary.piecewise import piecewise_simplify # Do not apply doit on the segments as it has already # been done above, but simplify expr = piecewise_simplify(expr, deep=True, doit=False) # Still a Piecewise? if expr.has(Piecewise): # Try factor common terms expr = shorter(expr, factor_terms(expr)) # As all expressions have been simplified above with the # complete simplify, nothing more needs to be done here return expr # hyperexpand automatically only works on hypergeometric terms # Do this after the Piecewise part to avoid recursive expansion expr = hyperexpand(expr) if expr.has(KroneckerDelta): expr = kroneckersimp(expr) if expr.has(BesselBase): expr = besselsimp(expr) if expr.has(TrigonometricFunction, HyperbolicFunction): expr = trigsimp(expr, deep=True) if expr.has(log): expr = shorter(expand_log(expr, deep=True), logcombine(expr)) if expr.has(CombinatorialFunction, gamma): # expression with gamma functions or non-integer arguments is # automatically passed to gammasimp expr = combsimp(expr) if expr.has(Sum): expr = sum_simplify(expr, **kwargs) if expr.has(Integral): expr = expr.xreplace(dict([ (i, factor_terms(i)) for i in expr.atoms(Integral)])) if expr.has(Product): expr = product_simplify(expr) from sympy.physics.units import Quantity from sympy.physics.units.util import quantity_simplify if expr.has(Quantity): expr = quantity_simplify(expr) short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr) short = shorter(short, cancel(short)) short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short))) if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase): short = exptrigsimp(short) # get rid of hollow 2-arg Mul factorization hollow_mul = Transform( lambda x: Mul(*x.args), lambda x: x.is_Mul and len(x.args) == 2 and x.args[0].is_Number and x.args[1].is_Add and x.is_commutative) expr = short.xreplace(hollow_mul) numer, denom = expr.as_numer_denom() if denom.is_Add: n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1)) if n is not S.One: expr = (numer*n).expand()/d if expr.could_extract_minus_sign(): n, d = fraction(expr) if d != 0: expr = signsimp(-n/(-d)) if measure(expr) > ratio*measure(original_expr): expr = original_expr # restore floats if floats and rational is None: expr = nfloat(expr, exponent=False) return done(expr) def sum_simplify(s, **kwargs): """Main function for Sum simplification""" from sympy.concrete.summations import Sum from sympy.core.function import expand if not isinstance(s, Add): s = s.xreplace(dict([(a, sum_simplify(a, **kwargs)) for a in s.atoms(Add) if a.has(Sum)])) s = expand(s) if not isinstance(s, Add): return s terms = s.args s_t = [] # Sum Terms o_t = [] # Other Terms for term in terms: sum_terms, other = sift(Mul.make_args(term), lambda i: isinstance(i, Sum), binary=True) if not sum_terms: o_t.append(term) continue other = [Mul(*other)] s_t.append(Mul(*(other + [s._eval_simplify(**kwargs) for s in sum_terms]))) result = Add(sum_combine(s_t), *o_t) return result def sum_combine(s_t): """Helper function for Sum simplification Attempts to simplify a list of sums, by combining limits / sum function's returns the simplified sum """ from sympy.concrete.summations import Sum used = [False] * len(s_t) for method in range(2): for i, s_term1 in enumerate(s_t): if not used[i]: for j, s_term2 in enumerate(s_t): if not used[j] and i != j: temp = sum_add(s_term1, s_term2, method) if isinstance(temp, Sum) or isinstance(temp, Mul): s_t[i] = temp s_term1 = s_t[i] used[j] = True result = S.Zero for i, s_term in enumerate(s_t): if not used[i]: result = Add(result, s_term) return result def factor_sum(self, limits=None, radical=False, clear=False, fraction=False, sign=True): """Return Sum with constant factors extracted. If ``limits`` is specified then ``self`` is the summand; the other keywords are passed to ``factor_terms``. Examples ======== >>> from sympy import Sum, Integral >>> from sympy.abc import x, y >>> from sympy.simplify.simplify import factor_sum >>> s = Sum(x*y, (x, 1, 3)) >>> factor_sum(s) y*Sum(x, (x, 1, 3)) >>> factor_sum(s.function, s.limits) y*Sum(x, (x, 1, 3)) """ # XXX deprecate in favor of direct call to factor_terms from sympy.concrete.summations import Sum kwargs = dict(radical=radical, clear=clear, fraction=fraction, sign=sign) expr = Sum(self, *limits) if limits else self return factor_terms(expr, **kwargs) def sum_add(self, other, method=0): """Helper function for Sum simplification""" from sympy.concrete.summations import Sum from sympy import Mul #we know this is something in terms of a constant * a sum #so we temporarily put the constants inside for simplification #then simplify the result def __refactor(val): args = Mul.make_args(val) sumv = next(x for x in args if isinstance(x, Sum)) constant = Mul(*[x for x in args if x != sumv]) return Sum(constant * sumv.function, *sumv.limits) if isinstance(self, Mul): rself = __refactor(self) else: rself = self if isinstance(other, Mul): rother = __refactor(other) else: rother = other if type(rself) == type(rother): if method == 0: if rself.limits == rother.limits: return factor_sum(Sum(rself.function + rother.function, *rself.limits)) elif method == 1: if simplify(rself.function - rother.function) == 0: if len(rself.limits) == len(rother.limits) == 1: i = rself.limits[0][0] x1 = rself.limits[0][1] y1 = rself.limits[0][2] j = rother.limits[0][0] x2 = rother.limits[0][1] y2 = rother.limits[0][2] if i == j: if x2 == y1 + 1: return factor_sum(Sum(rself.function, (i, x1, y2))) elif x1 == y2 + 1: return factor_sum(Sum(rself.function, (i, x2, y1))) return Add(self, other) def product_simplify(s): """Main function for Product simplification""" from sympy.concrete.products import Product terms = Mul.make_args(s) p_t = [] # Product Terms o_t = [] # Other Terms for term in terms: if isinstance(term, Product): p_t.append(term) else: o_t.append(term) used = [False] * len(p_t) for method in range(2): for i, p_term1 in enumerate(p_t): if not used[i]: for j, p_term2 in enumerate(p_t): if not used[j] and i != j: if isinstance(product_mul(p_term1, p_term2, method), Product): p_t[i] = product_mul(p_term1, p_term2, method) used[j] = True result = Mul(*o_t) for i, p_term in enumerate(p_t): if not used[i]: result = Mul(result, p_term) return result def product_mul(self, other, method=0): """Helper function for Product simplification""" from sympy.concrete.products import Product if type(self) == type(other): if method == 0: if self.limits == other.limits: return Product(self.function * other.function, *self.limits) elif method == 1: if simplify(self.function - other.function) == 0: if len(self.limits) == len(other.limits) == 1: i = self.limits[0][0] x1 = self.limits[0][1] y1 = self.limits[0][2] j = other.limits[0][0] x2 = other.limits[0][1] y2 = other.limits[0][2] if i == j: if x2 == y1 + 1: return Product(self.function, (i, x1, y2)) elif x1 == y2 + 1: return Product(self.function, (i, x2, y1)) return Mul(self, other) def _nthroot_solve(p, n, prec): """ helper function for ``nthroot`` It denests ``p**Rational(1, n)`` using its minimal polynomial """ from sympy.polys.numberfields import _minimal_polynomial_sq from sympy.solvers import solve while n % 2 == 0: p = sqrtdenest(sqrt(p)) n = n // 2 if n == 1: return p pn = p**Rational(1, n) x = Symbol('x') f = _minimal_polynomial_sq(p, n, x) if f is None: return None sols = solve(f, x) for sol in sols: if abs(sol - pn).n() < 1./10**prec: sol = sqrtdenest(sol) if _mexpand(sol**n) == p: return sol def logcombine(expr, force=False): """ Takes logarithms and combines them using the following rules: - log(x) + log(y) == log(x*y) if both are positive - a*log(x) == log(x**a) if x is positive and a is real If ``force`` is True then the assumptions above will be assumed to hold if there is no assumption already in place on a quantity. For example, if ``a`` is imaginary or the argument negative, force will not perform a combination but if ``a`` is a symbol with no assumptions the change will take place. Examples ======== >>> from sympy import Symbol, symbols, log, logcombine, I >>> from sympy.abc import a, x, y, z >>> logcombine(a*log(x) + log(y) - log(z)) a*log(x) + log(y) - log(z) >>> logcombine(a*log(x) + log(y) - log(z), force=True) log(x**a*y/z) >>> x,y,z = symbols('x,y,z', positive=True) >>> a = Symbol('a', real=True) >>> logcombine(a*log(x) + log(y) - log(z)) log(x**a*y/z) The transformation is limited to factors and/or terms that contain logs, so the result depends on the initial state of expansion: >>> eq = (2 + 3*I)*log(x) >>> logcombine(eq, force=True) == eq True >>> logcombine(eq.expand(), force=True) log(x**2) + I*log(x**3) See Also ======== posify: replace all symbols with symbols having positive assumptions sympy.core.function.expand_log: expand the logarithms of products and powers; the opposite of logcombine """ def f(rv): if not (rv.is_Add or rv.is_Mul): return rv def gooda(a): # bool to tell whether the leading ``a`` in ``a*log(x)`` # could appear as log(x**a) return (a is not S.NegativeOne and # -1 *could* go, but we disallow (a.is_extended_real or force and a.is_extended_real is not False)) def goodlog(l): # bool to tell whether log ``l``'s argument can combine with others a = l.args[0] return a.is_positive or force and a.is_nonpositive is not False other = [] logs = [] log1 = defaultdict(list) for a in Add.make_args(rv): if isinstance(a, log) and goodlog(a): log1[()].append(([], a)) elif not a.is_Mul: other.append(a) else: ot = [] co = [] lo = [] for ai in a.args: if ai.is_Rational and ai < 0: ot.append(S.NegativeOne) co.append(-ai) elif isinstance(ai, log) and goodlog(ai): lo.append(ai) elif gooda(ai): co.append(ai) else: ot.append(ai) if len(lo) > 1: logs.append((ot, co, lo)) elif lo: log1[tuple(ot)].append((co, lo[0])) else: other.append(a) # if there is only one log in other, put it with the # good logs if len(other) == 1 and isinstance(other[0], log): log1[()].append(([], other.pop())) # if there is only one log at each coefficient and none have # an exponent to place inside the log then there is nothing to do if not logs and all(len(log1[k]) == 1 and log1[k][0] == [] for k in log1): return rv # collapse multi-logs as far as possible in a canonical way # TODO: see if x*log(a)+x*log(a)*log(b) -> x*log(a)*(1+log(b))? # -- in this case, it's unambiguous, but if it were were a log(c) in # each term then it's arbitrary whether they are grouped by log(a) or # by log(c). So for now, just leave this alone; it's probably better to # let the user decide for o, e, l in logs: l = list(ordered(l)) e = log(l.pop(0).args[0]**Mul(*e)) while l: li = l.pop(0) e = log(li.args[0]**e) c, l = Mul(*o), e if isinstance(l, log): # it should be, but check to be sure log1[(c,)].append(([], l)) else: other.append(c*l) # logs that have the same coefficient can multiply for k in list(log1.keys()): log1[Mul(*k)] = log(logcombine(Mul(*[ l.args[0]**Mul(*c) for c, l in log1.pop(k)]), force=force), evaluate=False) # logs that have oppositely signed coefficients can divide for k in ordered(list(log1.keys())): if not k in log1: # already popped as -k continue if -k in log1: # figure out which has the minus sign; the one with # more op counts should be the one num, den = k, -k if num.count_ops() > den.count_ops(): num, den = den, num other.append( num*log(log1.pop(num).args[0]/log1.pop(den).args[0], evaluate=False)) else: other.append(k*log1.pop(k)) return Add(*other) return bottom_up(expr, f) def inversecombine(expr): """Simplify the composition of a function and its inverse. No attention is paid to whether the inverse is a left inverse or a right inverse; thus, the result will in general not be equivalent to the original expression. Examples ======== >>> from sympy.simplify.simplify import inversecombine >>> from sympy import asin, sin, log, exp >>> from sympy.abc import x >>> inversecombine(asin(sin(x))) x >>> inversecombine(2*log(exp(3*x))) 6*x """ def f(rv): if rv.is_Function and hasattr(rv, "inverse"): if (len(rv.args) == 1 and len(rv.args[0].args) == 1 and isinstance(rv.args[0], rv.inverse(argindex=1))): rv = rv.args[0].args[0] return rv return bottom_up(expr, f) def walk(e, *target): """iterate through the args that are the given types (target) and return a list of the args that were traversed; arguments that are not of the specified types are not traversed. Examples ======== >>> from sympy.simplify.simplify import walk >>> from sympy import Min, Max >>> from sympy.abc import x, y, z >>> list(walk(Min(x, Max(y, Min(1, z))), Min)) [Min(x, Max(y, Min(1, z)))] >>> list(walk(Min(x, Max(y, Min(1, z))), Min, Max)) [Min(x, Max(y, Min(1, z))), Max(y, Min(1, z)), Min(1, z)] See Also ======== bottom_up """ if isinstance(e, target): yield e for i in e.args: for w in walk(i, *target): yield w def bottom_up(rv, F, atoms=False, nonbasic=False): """Apply ``F`` to all expressions in an expression tree from the bottom up. If ``atoms`` is True, apply ``F`` even if there are no args; if ``nonbasic`` is True, try to apply ``F`` to non-Basic objects. """ args = getattr(rv, 'args', None) if args is not None: if args: args = tuple([bottom_up(a, F, atoms, nonbasic) for a in args]) if args != rv.args: rv = rv.func(*args) rv = F(rv) elif atoms: rv = F(rv) else: if nonbasic: try: rv = F(rv) except TypeError: pass return rv def kroneckersimp(expr): """ Simplify expressions with KroneckerDelta. The only simplification currently attempted is to identify multiplicative cancellation: >>> from sympy import KroneckerDelta, kroneckersimp >>> from sympy.abc import i, j >>> kroneckersimp(1 + KroneckerDelta(0, j) * KroneckerDelta(1, j)) 1 """ def args_cancel(args1, args2): for i1 in range(2): for i2 in range(2): a1 = args1[i1] a2 = args2[i2] a3 = args1[(i1 + 1) % 2] a4 = args2[(i2 + 1) % 2] if Eq(a1, a2) is S.true and Eq(a3, a4) is S.false: return True return False def cancel_kronecker_mul(m): from sympy.utilities.iterables import subsets args = m.args deltas = [a for a in args if isinstance(a, KroneckerDelta)] for delta1, delta2 in subsets(deltas, 2): args1 = delta1.args args2 = delta2.args if args_cancel(args1, args2): return 0*m return m if not expr.has(KroneckerDelta): return expr if expr.has(Piecewise): expr = expr.rewrite(KroneckerDelta) newexpr = expr expr = None while newexpr != expr: expr = newexpr newexpr = expr.replace(lambda e: isinstance(e, Mul), cancel_kronecker_mul) return expr def besselsimp(expr): """ Simplify bessel-type functions. This routine tries to simplify bessel-type functions. Currently it only works on the Bessel J and I functions, however. It works by looking at all such functions in turn, and eliminating factors of "I" and "-1" (actually their polar equivalents) in front of the argument. Then, functions of half-integer order are rewritten using strigonometric functions and functions of integer order (> 1) are rewritten using functions of low order. Finally, if the expression was changed, compute factorization of the result with factor(). >>> from sympy import besselj, besseli, besselsimp, polar_lift, I, S >>> from sympy.abc import z, nu >>> besselsimp(besselj(nu, z*polar_lift(-1))) exp(I*pi*nu)*besselj(nu, z) >>> besselsimp(besseli(nu, z*polar_lift(-I))) exp(-I*pi*nu/2)*besselj(nu, z) >>> besselsimp(besseli(S(-1)/2, z)) sqrt(2)*cosh(z)/(sqrt(pi)*sqrt(z)) >>> besselsimp(z*besseli(0, z) + z*(besseli(2, z))/2 + besseli(1, z)) 3*z*besseli(0, z)/2 """ # TODO # - better algorithm? # - simplify (cos(pi*b)*besselj(b,z) - besselj(-b,z))/sin(pi*b) ... # - use contiguity relations? def replacer(fro, to, factors): factors = set(factors) def repl(nu, z): if factors.intersection(Mul.make_args(z)): return to(nu, z) return fro(nu, z) return repl def torewrite(fro, to): def tofunc(nu, z): return fro(nu, z).rewrite(to) return tofunc def tominus(fro): def tofunc(nu, z): return exp(I*pi*nu)*fro(nu, exp_polar(-I*pi)*z) return tofunc orig_expr = expr ifactors = [I, exp_polar(I*pi/2), exp_polar(-I*pi/2)] expr = expr.replace( besselj, replacer(besselj, torewrite(besselj, besseli), ifactors)) expr = expr.replace( besseli, replacer(besseli, torewrite(besseli, besselj), ifactors)) minusfactors = [-1, exp_polar(I*pi)] expr = expr.replace( besselj, replacer(besselj, tominus(besselj), minusfactors)) expr = expr.replace( besseli, replacer(besseli, tominus(besseli), minusfactors)) z0 = Dummy('z') def expander(fro): def repl(nu, z): if (nu % 1) == S.Half: return simplify(trigsimp(unpolarify( fro(nu, z0).rewrite(besselj).rewrite(jn).expand( func=True)).subs(z0, z))) elif nu.is_Integer and nu > 1: return fro(nu, z).expand(func=True) return fro(nu, z) return repl expr = expr.replace(besselj, expander(besselj)) expr = expr.replace(bessely, expander(bessely)) expr = expr.replace(besseli, expander(besseli)) expr = expr.replace(besselk, expander(besselk)) def _bessel_simp_recursion(expr): def _use_recursion(bessel, expr): while True: bessels = expr.find(lambda x: isinstance(x, bessel)) try: for ba in sorted(bessels, key=lambda x: re(x.args[0])): a, x = ba.args bap1 = bessel(a+1, x) bap2 = bessel(a+2, x) if expr.has(bap1) and expr.has(bap2): expr = expr.subs(ba, 2*(a+1)/x*bap1 - bap2) break else: return expr except (ValueError, TypeError): return expr if expr.has(besselj): expr = _use_recursion(besselj, expr) if expr.has(bessely): expr = _use_recursion(bessely, expr) return expr expr = _bessel_simp_recursion(expr) if expr != orig_expr: expr = expr.factor() return expr def nthroot(expr, n, max_len=4, prec=15): """ compute a real nth-root of a sum of surds Parameters ========== expr : sum of surds n : integer max_len : maximum number of surds passed as constants to ``nsimplify`` Algorithm ========= First ``nsimplify`` is used to get a candidate root; if it is not a root the minimal polynomial is computed; the answer is one of its roots. Examples ======== >>> from sympy.simplify.simplify import nthroot >>> from sympy import Rational, sqrt >>> nthroot(90 + 34*sqrt(7), 3) sqrt(7) + 3 """ expr = sympify(expr) n = sympify(n) p = expr**Rational(1, n) if not n.is_integer: return p if not _is_sum_surds(expr): return p surds = [] coeff_muls = [x.as_coeff_Mul() for x in expr.args] for x, y in coeff_muls: if not x.is_rational: return p if y is S.One: continue if not (y.is_Pow and y.exp == S.Half and y.base.is_integer): return p surds.append(y) surds.sort() surds = surds[:max_len] if expr < 0 and n % 2 == 1: p = (-expr)**Rational(1, n) a = nsimplify(p, constants=surds) res = a if _mexpand(a**n) == _mexpand(-expr) else p return -res a = nsimplify(p, constants=surds) if _mexpand(a) is not _mexpand(p) and _mexpand(a**n) == _mexpand(expr): return _mexpand(a) expr = _nthroot_solve(expr, n, prec) if expr is None: return p return expr def nsimplify(expr, constants=(), tolerance=None, full=False, rational=None, rational_conversion='base10'): """ Find a simple representation for a number or, if there are free symbols or if rational=True, then replace Floats with their Rational equivalents. If no change is made and rational is not False then Floats will at least be converted to Rationals. For numerical expressions, a simple formula that numerically matches the given numerical expression is sought (and the input should be possible to evalf to a precision of at least 30 digits). Optionally, a list of (rationally independent) constants to include in the formula may be given. A lower tolerance may be set to find less exact matches. If no tolerance is given then the least precise value will set the tolerance (e.g. Floats default to 15 digits of precision, so would be tolerance=10**-15). With full=True, a more extensive search is performed (this is useful to find simpler numbers when the tolerance is set low). When converting to rational, if rational_conversion='base10' (the default), then convert floats to rationals using their base-10 (string) representation. When rational_conversion='exact' it uses the exact, base-2 representation. Examples ======== >>> from sympy import nsimplify, sqrt, GoldenRatio, exp, I, exp, pi >>> nsimplify(4/(1+sqrt(5)), [GoldenRatio]) -2 + 2*GoldenRatio >>> nsimplify((1/(exp(3*pi*I/5)+1))) 1/2 - I*sqrt(sqrt(5)/10 + 1/4) >>> nsimplify(I**I, [pi]) exp(-pi/2) >>> nsimplify(pi, tolerance=0.01) 22/7 >>> nsimplify(0.333333333333333, rational=True, rational_conversion='exact') 6004799503160655/18014398509481984 >>> nsimplify(0.333333333333333, rational=True) 1/3 See Also ======== sympy.core.function.nfloat """ try: return sympify(as_int(expr)) except (TypeError, ValueError): pass expr = sympify(expr).xreplace({ Float('inf'): S.Infinity, Float('-inf'): S.NegativeInfinity, }) if expr is S.Infinity or expr is S.NegativeInfinity: return expr if rational or expr.free_symbols: return _real_to_rational(expr, tolerance, rational_conversion) # SymPy's default tolerance for Rationals is 15; other numbers may have # lower tolerances set, so use them to pick the largest tolerance if None # was given if tolerance is None: tolerance = 10**-min([15] + [mpmath.libmp.libmpf.prec_to_dps(n._prec) for n in expr.atoms(Float)]) # XXX should prec be set independent of tolerance or should it be computed # from tolerance? prec = 30 bprec = int(prec*3.33) constants_dict = {} for constant in constants: constant = sympify(constant) v = constant.evalf(prec) if not v.is_Float: raise ValueError("constants must be real-valued") constants_dict[str(constant)] = v._to_mpmath(bprec) exprval = expr.evalf(prec, chop=True) re, im = exprval.as_real_imag() # safety check to make sure that this evaluated to a number if not (re.is_Number and im.is_Number): return expr def nsimplify_real(x): orig = mpmath.mp.dps xv = x._to_mpmath(bprec) try: # We'll be happy with low precision if a simple fraction if not (tolerance or full): mpmath.mp.dps = 15 rat = mpmath.pslq([xv, 1]) if rat is not None: return Rational(-int(rat[1]), int(rat[0])) mpmath.mp.dps = prec newexpr = mpmath.identify(xv, constants=constants_dict, tol=tolerance, full=full) if not newexpr: raise ValueError if full: newexpr = newexpr[0] expr = sympify(newexpr) if x and not expr: # don't let x become 0 raise ValueError if expr.is_finite is False and not xv in [mpmath.inf, mpmath.ninf]: raise ValueError return expr finally: # even though there are returns above, this is executed # before leaving mpmath.mp.dps = orig try: if re: re = nsimplify_real(re) if im: im = nsimplify_real(im) except ValueError: if rational is None: return _real_to_rational(expr, rational_conversion=rational_conversion) return expr rv = re + im*S.ImaginaryUnit # if there was a change or rational is explicitly not wanted # return the value, else return the Rational representation if rv != expr or rational is False: return rv return _real_to_rational(expr, rational_conversion=rational_conversion) def _real_to_rational(expr, tolerance=None, rational_conversion='base10'): """ Replace all reals in expr with rationals. Examples ======== >>> from sympy import Rational >>> from sympy.simplify.simplify import _real_to_rational >>> from sympy.abc import x >>> _real_to_rational(.76 + .1*x**.5) sqrt(x)/10 + 19/25 If rational_conversion='base10', this uses the base-10 string. If rational_conversion='exact', the exact, base-2 representation is used. >>> _real_to_rational(0.333333333333333, rational_conversion='exact') 6004799503160655/18014398509481984 >>> _real_to_rational(0.333333333333333) 1/3 """ expr = _sympify(expr) inf = Float('inf') p = expr reps = {} reduce_num = None if tolerance is not None and tolerance < 1: reduce_num = ceiling(1/tolerance) for fl in p.atoms(Float): key = fl if reduce_num is not None: r = Rational(fl).limit_denominator(reduce_num) elif (tolerance is not None and tolerance >= 1 and fl.is_Integer is False): r = Rational(tolerance*round(fl/tolerance) ).limit_denominator(int(tolerance)) else: if rational_conversion == 'exact': r = Rational(fl) reps[key] = r continue elif rational_conversion != 'base10': raise ValueError("rational_conversion must be 'base10' or 'exact'") r = nsimplify(fl, rational=False) # e.g. log(3).n() -> log(3) instead of a Rational if fl and not r: r = Rational(fl) elif not r.is_Rational: if fl == inf or fl == -inf: r = S.ComplexInfinity elif fl < 0: fl = -fl d = Pow(10, int((mpmath.log(fl)/mpmath.log(10)))) r = -Rational(str(fl/d))*d elif fl > 0: d = Pow(10, int((mpmath.log(fl)/mpmath.log(10)))) r = Rational(str(fl/d))*d else: r = Integer(0) reps[key] = r return p.subs(reps, simultaneous=True) def clear_coefficients(expr, rhs=S.Zero): """Return `p, r` where `p` is the expression obtained when Rational additive and multiplicative coefficients of `expr` have been stripped away in a naive fashion (i.e. without simplification). The operations needed to remove the coefficients will be applied to `rhs` and returned as `r`. Examples ======== >>> from sympy.simplify.simplify import clear_coefficients >>> from sympy.abc import x, y >>> from sympy import Dummy >>> expr = 4*y*(6*x + 3) >>> clear_coefficients(expr - 2) (y*(2*x + 1), 1/6) When solving 2 or more expressions like `expr = a`, `expr = b`, etc..., it is advantageous to provide a Dummy symbol for `rhs` and simply replace it with `a`, `b`, etc... in `r`. >>> rhs = Dummy('rhs') >>> clear_coefficients(expr, rhs) (y*(2*x + 1), _rhs/12) >>> _[1].subs(rhs, 2) 1/6 """ was = None free = expr.free_symbols if expr.is_Rational: return (S.Zero, rhs - expr) while expr and was != expr: was = expr m, expr = ( expr.as_content_primitive() if free else factor_terms(expr).as_coeff_Mul(rational=True)) rhs /= m c, expr = expr.as_coeff_Add(rational=True) rhs -= c expr = signsimp(expr, evaluate = False) if _coeff_isneg(expr): expr = -expr rhs = -rhs return expr, rhs def nc_simplify(expr, deep=True): ''' Simplify a non-commutative expression composed of multiplication and raising to a power by grouping repeated subterms into one power. Priority is given to simplifications that give the fewest number of arguments in the end (for example, in a*b*a*b*c*a*b*c simplifying to (a*b)**2*c*a*b*c gives 5 arguments while a*b*(a*b*c)**2 has 3). If `expr` is a sum of such terms, the sum of the simplified terms is returned. Keyword argument `deep` controls whether or not subexpressions nested deeper inside the main expression are simplified. See examples below. Setting `deep` to `False` can save time on nested expressions that don't need simplifying on all levels. Examples ======== >>> from sympy import symbols >>> from sympy.simplify.simplify import nc_simplify >>> a, b, c = symbols("a b c", commutative=False) >>> nc_simplify(a*b*a*b*c*a*b*c) a*b*(a*b*c)**2 >>> expr = a**2*b*a**4*b*a**4 >>> nc_simplify(expr) a**2*(b*a**4)**2 >>> nc_simplify(a*b*a*b*c**2*(a*b)**2*c**2) ((a*b)**2*c**2)**2 >>> nc_simplify(a*b*a*b + 2*a*c*a**2*c*a**2*c*a) (a*b)**2 + 2*(a*c*a)**3 >>> nc_simplify(b**-1*a**-1*(a*b)**2) a*b >>> nc_simplify(a**-1*b**-1*c*a) (b*a)**(-1)*c*a >>> expr = (a*b*a*b)**2*a*c*a*c >>> nc_simplify(expr) (a*b)**4*(a*c)**2 >>> nc_simplify(expr, deep=False) (a*b*a*b)**2*(a*c)**2 ''' from sympy.matrices.expressions import (MatrixExpr, MatAdd, MatMul, MatPow, MatrixSymbol) from sympy.core.exprtools import factor_nc if isinstance(expr, MatrixExpr): expr = expr.doit(inv_expand=False) _Add, _Mul, _Pow, _Symbol = MatAdd, MatMul, MatPow, MatrixSymbol else: _Add, _Mul, _Pow, _Symbol = Add, Mul, Pow, Symbol # =========== Auxiliary functions ======================== def _overlaps(args): # Calculate a list of lists m such that m[i][j] contains the lengths # of all possible overlaps between args[:i+1] and args[i+1+j:]. # An overlap is a suffix of the prefix that matches a prefix # of the suffix. # For example, let expr=c*a*b*a*b*a*b*a*b. Then m[3][0] contains # the lengths of overlaps of c*a*b*a*b with a*b*a*b. The overlaps # are a*b*a*b, a*b and the empty word so that m[3][0]=[4,2,0]. # All overlaps rather than only the longest one are recorded # because this information helps calculate other overlap lengths. m = [[([1, 0] if a == args[0] else [0]) for a in args[1:]]] for i in range(1, len(args)): overlaps = [] j = 0 for j in range(len(args) - i - 1): overlap = [] for v in m[i-1][j+1]: if j + i + 1 + v < len(args) and args[i] == args[j+i+1+v]: overlap.append(v + 1) overlap += [0] overlaps.append(overlap) m.append(overlaps) return m def _reduce_inverses(_args): # replace consecutive negative powers by an inverse # of a product of positive powers, e.g. a**-1*b**-1*c # will simplify to (a*b)**-1*c; # return that new args list and the number of negative # powers in it (inv_tot) inv_tot = 0 # total number of inverses inverses = [] args = [] for arg in _args: if isinstance(arg, _Pow) and arg.args[1] < 0: inverses = [arg**-1] + inverses inv_tot += 1 else: if len(inverses) == 1: args.append(inverses[0]**-1) elif len(inverses) > 1: args.append(_Pow(_Mul(*inverses), -1)) inv_tot -= len(inverses) - 1 inverses = [] args.append(arg) if inverses: args.append(_Pow(_Mul(*inverses), -1)) inv_tot -= len(inverses) - 1 return inv_tot, tuple(args) def get_score(s): # compute the number of arguments of s # (including in nested expressions) overall # but ignore exponents if isinstance(s, _Pow): return get_score(s.args[0]) elif isinstance(s, (_Add, _Mul)): return sum([get_score(a) for a in s.args]) return 1 def compare(s, alt_s): # compare two possible simplifications and return a # "better" one if s != alt_s and get_score(alt_s) < get_score(s): return alt_s return s # ======================================================== if not isinstance(expr, (_Add, _Mul, _Pow)) or expr.is_commutative: return expr args = expr.args[:] if isinstance(expr, _Pow): if deep: return _Pow(nc_simplify(args[0]), args[1]).doit() else: return expr elif isinstance(expr, _Add): return _Add(*[nc_simplify(a, deep=deep) for a in args]).doit() else: # get the non-commutative part c_args, args = expr.args_cnc() com_coeff = Mul(*c_args) if com_coeff != 1: return com_coeff*nc_simplify(expr/com_coeff, deep=deep) inv_tot, args = _reduce_inverses(args) # if most arguments are negative, work with the inverse # of the expression, e.g. a**-1*b*a**-1*c**-1 will become # (c*a*b**-1*a)**-1 at the end so can work with c*a*b**-1*a invert = False if inv_tot > len(args)/2: invert = True args = [a**-1 for a in args[::-1]] if deep: args = tuple(nc_simplify(a) for a in args) m = _overlaps(args) # simps will be {subterm: end} where `end` is the ending # index of a sequence of repetitions of subterm; # this is for not wasting time with subterms that are part # of longer, already considered sequences simps = {} post = 1 pre = 1 # the simplification coefficient is the number of # arguments by which contracting a given sequence # would reduce the word; e.g. in a*b*a*b*c*a*b*c, # contracting a*b*a*b to (a*b)**2 removes 3 arguments # while a*b*c*a*b*c to (a*b*c)**2 removes 6. It's # better to contract the latter so simplification # with a maximum simplification coefficient will be chosen max_simp_coeff = 0 simp = None # information about future simplification for i in range(1, len(args)): simp_coeff = 0 l = 0 # length of a subterm p = 0 # the power of a subterm if i < len(args) - 1: rep = m[i][0] start = i # starting index of the repeated sequence end = i+1 # ending index of the repeated sequence if i == len(args)-1 or rep == [0]: # no subterm is repeated at this stage, at least as # far as the arguments are concerned - there may be # a repetition if powers are taken into account if (isinstance(args[i], _Pow) and not isinstance(args[i].args[0], _Symbol)): subterm = args[i].args[0].args l = len(subterm) if args[i-l:i] == subterm: # e.g. a*b in a*b*(a*b)**2 is not repeated # in args (= [a, b, (a*b)**2]) but it # can be matched here p += 1 start -= l if args[i+1:i+1+l] == subterm: # e.g. a*b in (a*b)**2*a*b p += 1 end += l if p: p += args[i].args[1] else: continue else: l = rep[0] # length of the longest repeated subterm at this point start -= l - 1 subterm = args[start:end] p = 2 end += l if subterm in simps and simps[subterm] >= start: # the subterm is part of a sequence that # has already been considered continue # count how many times it's repeated while end < len(args): if l in m[end-1][0]: p += 1 end += l elif isinstance(args[end], _Pow) and args[end].args[0].args == subterm: # for cases like a*b*a*b*(a*b)**2*a*b p += args[end].args[1] end += 1 else: break # see if another match can be made, e.g. # for b*a**2 in b*a**2*b*a**3 or a*b in # a**2*b*a*b pre_exp = 0 pre_arg = 1 if start - l >= 0 and args[start-l+1:start] == subterm[1:]: if isinstance(subterm[0], _Pow): pre_arg = subterm[0].args[0] exp = subterm[0].args[1] else: pre_arg = subterm[0] exp = 1 if isinstance(args[start-l], _Pow) and args[start-l].args[0] == pre_arg: pre_exp = args[start-l].args[1] - exp start -= l p += 1 elif args[start-l] == pre_arg: pre_exp = 1 - exp start -= l p += 1 post_exp = 0 post_arg = 1 if end + l - 1 < len(args) and args[end:end+l-1] == subterm[:-1]: if isinstance(subterm[-1], _Pow): post_arg = subterm[-1].args[0] exp = subterm[-1].args[1] else: post_arg = subterm[-1] exp = 1 if isinstance(args[end+l-1], _Pow) and args[end+l-1].args[0] == post_arg: post_exp = args[end+l-1].args[1] - exp end += l p += 1 elif args[end+l-1] == post_arg: post_exp = 1 - exp end += l p += 1 # Consider a*b*a**2*b*a**2*b*a: # b*a**2 is explicitly repeated, but note # that in this case a*b*a is also repeated # so there are two possible simplifications: # a*(b*a**2)**3*a**-1 or (a*b*a)**3 # The latter is obviously simpler. # But in a*b*a**2*b**2*a**2 the simplifications are # a*(b*a**2)**2 and (a*b*a)**3*a in which case # it's better to stick with the shorter subterm if post_exp and exp % 2 == 0 and start > 0: exp = exp/2 _pre_exp = 1 _post_exp = 1 if isinstance(args[start-1], _Pow) and args[start-1].args[0] == post_arg: _post_exp = post_exp + exp _pre_exp = args[start-1].args[1] - exp elif args[start-1] == post_arg: _post_exp = post_exp + exp _pre_exp = 1 - exp if _pre_exp == 0 or _post_exp == 0: if not pre_exp: start -= 1 post_exp = _post_exp pre_exp = _pre_exp pre_arg = post_arg subterm = (post_arg**exp,) + subterm[:-1] + (post_arg**exp,) simp_coeff += end-start if post_exp: simp_coeff -= 1 if pre_exp: simp_coeff -= 1 simps[subterm] = end if simp_coeff > max_simp_coeff: max_simp_coeff = simp_coeff simp = (start, _Mul(*subterm), p, end, l) pre = pre_arg**pre_exp post = post_arg**post_exp if simp: subterm = _Pow(nc_simplify(simp[1], deep=deep), simp[2]) pre = nc_simplify(_Mul(*args[:simp[0]])*pre, deep=deep) post = post*nc_simplify(_Mul(*args[simp[3]:]), deep=deep) simp = pre*subterm*post if pre != 1 or post != 1: # new simplifications may be possible but no need # to recurse over arguments simp = nc_simplify(simp, deep=False) else: simp = _Mul(*args) if invert: simp = _Pow(simp, -1) # see if factor_nc(expr) is simplified better if not isinstance(expr, MatrixExpr): f_expr = factor_nc(expr) if f_expr != expr: alt_simp = nc_simplify(f_expr, deep=deep) simp = compare(simp, alt_simp) else: simp = simp.doit(inv_expand=False) return simp def dotprodsimp(expr, withsimp=False): """Simplification for a sum of products targeted at the kind of blowup that occurs during summation of products. Intended to reduce expression blowup during matrix multiplication or other similar operations. Only works with algebraic expressions and does not recurse into non. Parameters ========== withsimp : bool, optional Specifies whether a flag should be returned along with the expression to indicate roughly whether simplification was successful. It is used in ``MatrixArithmetic._eval_pow_by_recursion`` to avoid attempting to simplify an expression repetitively which does not simplify. """ def count_ops_alg(expr): """Optimized count algebraic operations with no recursion into non-algebraic args that ``core.function.count_ops`` does. Also returns whether rational functions may be present according to negative exponents of powers or non-number fractions. Returns ======= ops, ratfunc : int, bool ``ops`` is the number of algebraic operations starting at the top level expression (not recursing into non-alg children). ``ratfunc`` specifies whether the expression MAY contain rational functions which ``cancel`` MIGHT optimize. """ ops = 0 args = [expr] ratfunc = False while args: a = args.pop() if not isinstance(a, Basic): continue if a.is_Rational: if a is not S.One: # -1/3 = NEG + DIV ops += bool (a.p < 0) + bool (a.q != 1) elif a.is_Mul: if _coeff_isneg(a): ops += 1 if a.args[0] is S.NegativeOne: a = a.as_two_terms()[1] else: a = -a n, d = fraction(a) if n.is_Integer: ops += 1 + bool (n < 0) args.append(d) # won't be -Mul but could be Add elif d is not S.One: if not d.is_Integer: args.append(d) ratfunc=True ops += 1 args.append(n) # could be -Mul else: ops += len(a.args) - 1 args.extend(a.args) elif a.is_Add: laargs = len(a.args) negs = 0 for ai in a.args: if _coeff_isneg(ai): negs += 1 ai = -ai args.append(ai) ops += laargs - (negs != laargs) # -x - y = NEG + SUB elif a.is_Pow: ops += 1 args.append(a.base) if not ratfunc: ratfunc = a.exp.is_negative is not False return ops, ratfunc def nonalg_subs_dummies(expr, dummies): """Substitute dummy variables for non-algebraic expressions to avoid evaluation of non-algebraic terms that ``polys.polytools.cancel`` does. """ if not expr.args: return expr if expr.is_Add or expr.is_Mul or expr.is_Pow: args = None for i, a in enumerate(expr.args): c = nonalg_subs_dummies(a, dummies) if c is a: continue if args is None: args = list(expr.args) args[i] = c if args is None: return expr return expr.func(*args) return dummies.setdefault(expr, Dummy()) simplified = False # doesn't really mean simplified, rather "can simplify again" if isinstance(expr, Basic) and (expr.is_Add or expr.is_Mul or expr.is_Pow): expr2 = expr.expand(deep=True, modulus=None, power_base=False, power_exp=False, mul=True, log=False, multinomial=True, basic=False) if expr2 != expr: expr = expr2 simplified = True exprops, ratfunc = count_ops_alg(expr) if exprops >= 6: # empirically tested cutoff for expensive simplification if ratfunc: dummies = {} expr2 = nonalg_subs_dummies(expr, dummies) if expr2 is expr or count_ops_alg(expr2)[0] >= 6: # check again after substitution expr3 = cancel(expr2) if expr3 != expr2: expr = expr3.subs([(d, e) for e, d in dummies.items()]) simplified = True # very special case: x/(x-1) - 1/(x-1) -> 1 elif (exprops == 5 and expr.is_Add and expr.args [0].is_Mul and expr.args [1].is_Mul and expr.args [0].args [-1].is_Pow and expr.args [1].args [-1].is_Pow and expr.args [0].args [-1].exp is S.NegativeOne and expr.args [1].args [-1].exp is S.NegativeOne): expr2 = together (expr) expr2ops = count_ops_alg(expr2)[0] if expr2ops < exprops: expr = expr2 simplified = True else: simplified = True return (expr, simplified) if withsimp else expr
d5b01d7046e8c82bfbb587df8e7a74633fa2c6b3c09abd168f73aad06852c3e5
"""The module helps converting SymPy expressions into shorter forms of them. for example: the expression E**(pi*I) will be converted into -1 the expression (x+x)**2 will be converted into 4*x**2 """ from .simplify import (simplify, hypersimp, hypersimilar, logcombine, separatevars, posify, besselsimp, kroneckersimp, signsimp, bottom_up, nsimplify) from .fu import FU, fu from .sqrtdenest import sqrtdenest from .cse_main import cse from .traversaltools import use from .epathtools import epath, EPath from .hyperexpand import hyperexpand from .radsimp import collect, rcollect, radsimp, collect_const, fraction, numer, denom from .trigsimp import trigsimp, exptrigsimp from .powsimp import powsimp, powdenest from .combsimp import combsimp from .gammasimp import gammasimp from .ratsimp import ratsimp, ratsimpmodprime __all__ = [ 'simplify', 'hypersimp', 'hypersimilar', 'logcombine', 'separatevars', 'posify', 'besselsimp', 'kroneckersimp', 'signsimp', 'bottom_up', 'nsimplify', 'FU', 'fu', 'sqrtdenest', 'cse', 'use', 'epath', 'EPath', 'hyperexpand', 'collect', 'rcollect', 'radsimp', 'collect_const', 'fraction', 'numer', 'denom', 'trigsimp', 'exptrigsimp', 'powsimp', 'powdenest', 'combsimp', 'gammasimp', 'ratsimp', 'ratsimpmodprime', ]
27e6e3855be4cb2bafc402bf42abbaf41daf5ae756ceb1557a21b671761ef843
"""Tools for manipulation of expressions using paths. """ from __future__ import print_function, division from sympy.core import Basic class EPath(object): r""" Manipulate expressions using paths. EPath grammar in EBNF notation:: literal ::= /[A-Za-z_][A-Za-z_0-9]*/ number ::= /-?\d+/ type ::= literal attribute ::= literal "?" all ::= "*" slice ::= "[" number? (":" number? (":" number?)?)? "]" range ::= all | slice query ::= (type | attribute) ("|" (type | attribute))* selector ::= range | query range? path ::= "/" selector ("/" selector)* See the docstring of the epath() function. """ __slots__ = ("_path", "_epath") def __new__(cls, path): """Construct new EPath. """ if isinstance(path, EPath): return path if not path: raise ValueError("empty EPath") _path = path if path[0] == '/': path = path[1:] else: raise NotImplementedError("non-root EPath") epath = [] for selector in path.split('/'): selector = selector.strip() if not selector: raise ValueError("empty selector") index = 0 for c in selector: if c.isalnum() or c == '_' or c == '|' or c == '?': index += 1 else: break attrs = [] types = [] if index: elements = selector[:index] selector = selector[index:] for element in elements.split('|'): element = element.strip() if not element: raise ValueError("empty element") if element.endswith('?'): attrs.append(element[:-1]) else: types.append(element) span = None if selector == '*': pass else: if selector.startswith('['): try: i = selector.index(']') except ValueError: raise ValueError("expected ']', got EOL") _span, span = selector[1:i], [] if ':' not in _span: span = int(_span) else: for elt in _span.split(':', 3): if not elt: span.append(None) else: span.append(int(elt)) span = slice(*span) selector = selector[i + 1:] if selector: raise ValueError("trailing characters in selector") epath.append((attrs, types, span)) obj = object.__new__(cls) obj._path = _path obj._epath = epath return obj def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._path) def _get_ordered_args(self, expr): """Sort ``expr.args`` using printing order. """ if expr.is_Add: return expr.as_ordered_terms() elif expr.is_Mul: return expr.as_ordered_factors() else: return expr.args def _hasattrs(self, expr, attrs): """Check if ``expr`` has any of ``attrs``. """ for attr in attrs: if not hasattr(expr, attr): return False return True def _hastypes(self, expr, types): """Check if ``expr`` is any of ``types``. """ _types = [ cls.__name__ for cls in expr.__class__.mro() ] return bool(set(_types).intersection(types)) def _has(self, expr, attrs, types): """Apply ``_hasattrs`` and ``_hastypes`` to ``expr``. """ if not (attrs or types): return True if attrs and self._hasattrs(expr, attrs): return True if types and self._hastypes(expr, types): return True return False def apply(self, expr, func, args=None, kwargs=None): """ Modify parts of an expression selected by a path. Examples ======== >>> from sympy.simplify.epathtools import EPath >>> from sympy import sin, cos, E >>> from sympy.abc import x, y, z, t >>> path = EPath("/*/[0]/Symbol") >>> expr = [((x, 1), 2), ((3, y), z)] >>> path.apply(expr, lambda expr: expr**2) [((x**2, 1), 2), ((3, y**2), z)] >>> path = EPath("/*/*/Symbol") >>> expr = t + sin(x + 1) + cos(x + y + E) >>> path.apply(expr, lambda expr: 2*expr) t + sin(2*x + 1) + cos(2*x + 2*y + E) """ def _apply(path, expr, func): if not path: return func(expr) else: selector, path = path[0], path[1:] attrs, types, span = selector if isinstance(expr, Basic): if not expr.is_Atom: args, basic = self._get_ordered_args(expr), True else: return expr elif hasattr(expr, '__iter__'): args, basic = expr, False else: return expr args = list(args) if span is not None: if type(span) == slice: indices = range(*span.indices(len(args))) else: indices = [span] else: indices = range(len(args)) for i in indices: try: arg = args[i] except IndexError: continue if self._has(arg, attrs, types): args[i] = _apply(path, arg, func) if basic: return expr.func(*args) else: return expr.__class__(args) _args, _kwargs = args or (), kwargs or {} _func = lambda expr: func(expr, *_args, **_kwargs) return _apply(self._epath, expr, _func) def select(self, expr): """ Retrieve parts of an expression selected by a path. Examples ======== >>> from sympy.simplify.epathtools import EPath >>> from sympy import sin, cos, E >>> from sympy.abc import x, y, z, t >>> path = EPath("/*/[0]/Symbol") >>> expr = [((x, 1), 2), ((3, y), z)] >>> path.select(expr) [x, y] >>> path = EPath("/*/*/Symbol") >>> expr = t + sin(x + 1) + cos(x + y + E) >>> path.select(expr) [x, x, y] """ result = [] def _select(path, expr): if not path: result.append(expr) else: selector, path = path[0], path[1:] attrs, types, span = selector if isinstance(expr, Basic): args = self._get_ordered_args(expr) elif hasattr(expr, '__iter__'): args = expr else: return if span is not None: if type(span) == slice: args = args[span] else: try: args = [args[span]] except IndexError: return for arg in args: if self._has(arg, attrs, types): _select(path, arg) _select(self._epath, expr) return result def epath(path, expr=None, func=None, args=None, kwargs=None): r""" Manipulate parts of an expression selected by a path. This function allows to manipulate large nested expressions in single line of code, utilizing techniques to those applied in XML processing standards (e.g. XPath). If ``func`` is ``None``, :func:`epath` retrieves elements selected by the ``path``. Otherwise it applies ``func`` to each matching element. Note that it is more efficient to create an EPath object and use the select and apply methods of that object, since this will compile the path string only once. This function should only be used as a convenient shortcut for interactive use. This is the supported syntax: * select all: ``/*`` Equivalent of ``for arg in args:``. * select slice: ``/[0]`` or ``/[1:5]`` or ``/[1:5:2]`` Supports standard Python's slice syntax. * select by type: ``/list`` or ``/list|tuple`` Emulates ``isinstance()``. * select by attribute: ``/__iter__?`` Emulates ``hasattr()``. Parameters ========== path : str | EPath A path as a string or a compiled EPath. expr : Basic | iterable An expression or a container of expressions. func : callable (optional) A callable that will be applied to matching parts. args : tuple (optional) Additional positional arguments to ``func``. kwargs : dict (optional) Additional keyword arguments to ``func``. Examples ======== >>> from sympy.simplify.epathtools import epath >>> from sympy import sin, cos, E >>> from sympy.abc import x, y, z, t >>> path = "/*/[0]/Symbol" >>> expr = [((x, 1), 2), ((3, y), z)] >>> epath(path, expr) [x, y] >>> epath(path, expr, lambda expr: expr**2) [((x**2, 1), 2), ((3, y**2), z)] >>> path = "/*/*/Symbol" >>> expr = t + sin(x + 1) + cos(x + y + E) >>> epath(path, expr) [x, x, y] >>> epath(path, expr, lambda expr: 2*expr) t + sin(2*x + 1) + cos(2*x + 2*y + E) """ _epath = EPath(path) if expr is None: return _epath if func is None: return _epath.select(expr) else: return _epath.apply(expr, func, args, kwargs)
6995ea4efdb2c275a050bbaaefa45b4fb1d20eb883975c03c0056b2f7f18e5c5
from __future__ import print_function, division from sympy.core import S, sympify, Mul, Add, Expr from sympy.core.function import expand_mul, count_ops, _mexpand from sympy.core.symbol import Dummy from sympy.functions import sqrt, sign, root from sympy.polys import Poly, PolynomialError from sympy.utilities import default_sort_key def is_sqrt(expr): """Return True if expr is a sqrt, otherwise False.""" return expr.is_Pow and expr.exp.is_Rational and abs(expr.exp) is S.Half def sqrt_depth(p): """Return the maximum depth of any square root argument of p. >>> from sympy.functions.elementary.miscellaneous import sqrt >>> from sympy.simplify.sqrtdenest import sqrt_depth Neither of these square roots contains any other square roots so the depth is 1: >>> sqrt_depth(1 + sqrt(2)*(1 + sqrt(3))) 1 The sqrt(3) is contained within a square root so the depth is 2: >>> sqrt_depth(1 + sqrt(2)*sqrt(1 + sqrt(3))) 2 """ if p is S.ImaginaryUnit: return 1 if p.is_Atom: return 0 elif p.is_Add or p.is_Mul: return max([sqrt_depth(x) for x in p.args], key=default_sort_key) elif is_sqrt(p): return sqrt_depth(p.base) + 1 else: return 0 def is_algebraic(p): """Return True if p is comprised of only Rationals or square roots of Rationals and algebraic operations. Examples ======== >>> from sympy.functions.elementary.miscellaneous import sqrt >>> from sympy.simplify.sqrtdenest import is_algebraic >>> from sympy import cos >>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*sqrt(2)))) True >>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*cos(2)))) False """ if p.is_Rational: return True elif p.is_Atom: return False elif is_sqrt(p) or p.is_Pow and p.exp.is_Integer: return is_algebraic(p.base) elif p.is_Add or p.is_Mul: return all(is_algebraic(x) for x in p.args) else: return False def _subsets(n): """ Returns all possible subsets of the set (0, 1, ..., n-1) except the empty set, listed in reversed lexicographical order according to binary representation, so that the case of the fourth root is treated last. Examples ======== >>> from sympy.simplify.sqrtdenest import _subsets >>> _subsets(2) [[1, 0], [0, 1], [1, 1]] """ if n == 1: a = [[1]] elif n == 2: a = [[1, 0], [0, 1], [1, 1]] elif n == 3: a = [[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]] else: b = _subsets(n - 1) a0 = [x + [0] for x in b] a1 = [x + [1] for x in b] a = a0 + [[0]*(n - 1) + [1]] + a1 return a def sqrtdenest(expr, max_iter=3): """Denests sqrts in an expression that contain other square roots if possible, otherwise returns the expr unchanged. This is based on the algorithms of [1]. Examples ======== >>> from sympy.simplify.sqrtdenest import sqrtdenest >>> from sympy import sqrt >>> sqrtdenest(sqrt(5 + 2 * sqrt(6))) sqrt(2) + sqrt(3) See Also ======== sympy.solvers.solvers.unrad References ========== .. [1] http://researcher.watson.ibm.com/researcher/files/us-fagin/symb85.pdf .. [2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots by Denesting' (available at http://www.cybertester.com/data/denest.pdf) """ expr = expand_mul(sympify(expr)) for i in range(max_iter): z = _sqrtdenest0(expr) if expr == z: return expr expr = z return expr def _sqrt_match(p): """Return [a, b, r] for p.match(a + b*sqrt(r)) where, in addition to matching, sqrt(r) also has then maximal sqrt_depth among addends of p. Examples ======== >>> from sympy.functions.elementary.miscellaneous import sqrt >>> from sympy.simplify.sqrtdenest import _sqrt_match >>> _sqrt_match(1 + sqrt(2) + sqrt(2)*sqrt(3) + 2*sqrt(1+sqrt(5))) [1 + sqrt(2) + sqrt(6), 2, 1 + sqrt(5)] """ from sympy.simplify.radsimp import split_surds p = _mexpand(p) if p.is_Number: res = (p, S.Zero, S.Zero) elif p.is_Add: pargs = sorted(p.args, key=default_sort_key) sqargs = [x**2 for x in pargs] if all(sq.is_Rational and sq.is_positive for sq in sqargs): r, b, a = split_surds(p) res = a, b, r return list(res) # to make the process canonical, the argument is included in the tuple # so when the max is selected, it will be the largest arg having a # given depth v = [(sqrt_depth(x), x, i) for i, x in enumerate(pargs)] nmax = max(v, key=default_sort_key) if nmax[0] == 0: res = [] else: # select r depth, _, i = nmax r = pargs.pop(i) v.pop(i) b = S.One if r.is_Mul: bv = [] rv = [] for x in r.args: if sqrt_depth(x) < depth: bv.append(x) else: rv.append(x) b = Mul._from_args(bv) r = Mul._from_args(rv) # collect terms comtaining r a1 = [] b1 = [b] for x in v: if x[0] < depth: a1.append(x[1]) else: x1 = x[1] if x1 == r: b1.append(1) else: if x1.is_Mul: x1args = list(x1.args) if r in x1args: x1args.remove(r) b1.append(Mul(*x1args)) else: a1.append(x[1]) else: a1.append(x[1]) a = Add(*a1) b = Add(*b1) res = (a, b, r**2) else: b, r = p.as_coeff_Mul() if is_sqrt(r): res = (S.Zero, b, r**2) else: res = [] return list(res) class SqrtdenestStopIteration(StopIteration): pass def _sqrtdenest0(expr): """Returns expr after denesting its arguments.""" if is_sqrt(expr): n, d = expr.as_numer_denom() if d is S.One: # n is a square root if n.base.is_Add: args = sorted(n.base.args, key=default_sort_key) if len(args) > 2 and all((x**2).is_Integer for x in args): try: return _sqrtdenest_rec(n) except SqrtdenestStopIteration: pass expr = sqrt(_mexpand(Add(*[_sqrtdenest0(x) for x in args]))) return _sqrtdenest1(expr) else: n, d = [_sqrtdenest0(i) for i in (n, d)] return n/d if isinstance(expr, Add): cs = [] args = [] for arg in expr.args: c, a = arg.as_coeff_Mul() cs.append(c) args.append(a) if all(c.is_Rational for c in cs) and all(is_sqrt(arg) for arg in args): return _sqrt_ratcomb(cs, args) if isinstance(expr, Expr): args = expr.args if args: return expr.func(*[_sqrtdenest0(a) for a in args]) return expr def _sqrtdenest_rec(expr): """Helper that denests the square root of three or more surds. It returns the denested expression; if it cannot be denested it throws SqrtdenestStopIteration Algorithm: expr.base is in the extension Q_m = Q(sqrt(r_1),..,sqrt(r_k)); split expr.base = a + b*sqrt(r_k), where `a` and `b` are on Q_(m-1) = Q(sqrt(r_1),..,sqrt(r_(k-1))); then a**2 - b**2*r_k is on Q_(m-1); denest sqrt(a**2 - b**2*r_k) and so on. See [1], section 6. Examples ======== >>> from sympy import sqrt >>> from sympy.simplify.sqrtdenest import _sqrtdenest_rec >>> _sqrtdenest_rec(sqrt(-72*sqrt(2) + 158*sqrt(5) + 498)) -sqrt(10) + sqrt(2) + 9 + 9*sqrt(5) >>> w=-6*sqrt(55)-6*sqrt(35)-2*sqrt(22)-2*sqrt(14)+2*sqrt(77)+6*sqrt(10)+65 >>> _sqrtdenest_rec(sqrt(w)) -sqrt(11) - sqrt(7) + sqrt(2) + 3*sqrt(5) """ from sympy.simplify.radsimp import radsimp, rad_rationalize, split_surds if not expr.is_Pow: return sqrtdenest(expr) if expr.base < 0: return sqrt(-1)*_sqrtdenest_rec(sqrt(-expr.base)) g, a, b = split_surds(expr.base) a = a*sqrt(g) if a < b: a, b = b, a c2 = _mexpand(a**2 - b**2) if len(c2.args) > 2: g, a1, b1 = split_surds(c2) a1 = a1*sqrt(g) if a1 < b1: a1, b1 = b1, a1 c2_1 = _mexpand(a1**2 - b1**2) c_1 = _sqrtdenest_rec(sqrt(c2_1)) d_1 = _sqrtdenest_rec(sqrt(a1 + c_1)) num, den = rad_rationalize(b1, d_1) c = _mexpand(d_1/sqrt(2) + num/(den*sqrt(2))) else: c = _sqrtdenest1(sqrt(c2)) if sqrt_depth(c) > 1: raise SqrtdenestStopIteration ac = a + c if len(ac.args) >= len(expr.args): if count_ops(ac) >= count_ops(expr.base): raise SqrtdenestStopIteration d = sqrtdenest(sqrt(ac)) if sqrt_depth(d) > 1: raise SqrtdenestStopIteration num, den = rad_rationalize(b, d) r = d/sqrt(2) + num/(den*sqrt(2)) r = radsimp(r) return _mexpand(r) def _sqrtdenest1(expr, denester=True): """Return denested expr after denesting with simpler methods or, that failing, using the denester.""" from sympy.simplify.simplify import radsimp if not is_sqrt(expr): return expr a = expr.base if a.is_Atom: return expr val = _sqrt_match(a) if not val: return expr a, b, r = val # try a quick numeric denesting d2 = _mexpand(a**2 - b**2*r) if d2.is_Rational: if d2.is_positive: z = _sqrt_numeric_denest(a, b, r, d2) if z is not None: return z else: # fourth root case # sqrtdenest(sqrt(3 + 2*sqrt(3))) = # sqrt(2)*3**(1/4)/2 + sqrt(2)*3**(3/4)/2 dr2 = _mexpand(-d2*r) dr = sqrt(dr2) if dr.is_Rational: z = _sqrt_numeric_denest(_mexpand(b*r), a, r, dr2) if z is not None: return z/root(r, 4) else: z = _sqrt_symbolic_denest(a, b, r) if z is not None: return z if not denester or not is_algebraic(expr): return expr res = sqrt_biquadratic_denest(expr, a, b, r, d2) if res: return res # now call to the denester av0 = [a, b, r, d2] z = _denester([radsimp(expr**2)], av0, 0, sqrt_depth(expr))[0] if av0[1] is None: return expr if z is not None: if sqrt_depth(z) == sqrt_depth(expr) and count_ops(z) > count_ops(expr): return expr return z return expr def _sqrt_symbolic_denest(a, b, r): """Given an expression, sqrt(a + b*sqrt(b)), return the denested expression or None. Algorithm: If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with (y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and (cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2). Examples ======== >>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest >>> from sympy import sqrt, Symbol >>> from sympy.abc import x >>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55 >>> _sqrt_symbolic_denest(a, b, r) sqrt(11 - 2*sqrt(29)) + sqrt(5) If the expression is numeric, it will be simplified: >>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2) >>> sqrtdenest(sqrt((w**2).expand())) 1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3))) Otherwise, it will only be simplified if assumptions allow: >>> w = w.subs(sqrt(3), sqrt(x + 3)) >>> sqrtdenest(sqrt((w**2).expand())) sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2) Notice that the argument of the sqrt is a square. If x is made positive then the sqrt of the square is resolved: >>> _.subs(x, Symbol('x', positive=True)) sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2) """ a, b, r = map(sympify, (a, b, r)) rval = _sqrt_match(r) if not rval: return None ra, rb, rr = rval if rb: y = Dummy('y', positive=True) try: newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y) except PolynomialError: return None if newa.degree() == 2: ca, cb, cc = newa.all_coeffs() cb += b if _mexpand(cb**2 - 4*ca*cc).equals(0): z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2) if z.is_number: z = _mexpand(Mul._from_args(z.as_content_primitive())) return z def _sqrt_numeric_denest(a, b, r, d2): """Helper that denest expr = a + b*sqrt(r), with d2 = a**2 - b**2*r > 0 or returns None if not denested. """ from sympy.simplify.simplify import radsimp depthr = sqrt_depth(r) d = sqrt(d2) vad = a + d # sqrt_depth(res) <= sqrt_depth(vad) + 1 # sqrt_depth(expr) = depthr + 2 # there is denesting if sqrt_depth(vad)+1 < depthr + 2 # if vad**2 is Number there is a fourth root if sqrt_depth(vad) < depthr + 1 or (vad**2).is_Rational: vad1 = radsimp(1/vad) return (sqrt(vad/2) + sign(b)*sqrt((b**2*r*vad1/2).expand())).expand() def sqrt_biquadratic_denest(expr, a, b, r, d2): """denest expr = sqrt(a + b*sqrt(r)) where a, b, r are linear combinations of square roots of positive rationals on the rationals (SQRR) and r > 0, b != 0, d2 = a**2 - b**2*r > 0 If it cannot denest it returns None. ALGORITHM Search for a solution A of type SQRR of the biquadratic equation 4*A**4 - 4*a*A**2 + b**2*r = 0 (1) sqd = sqrt(a**2 - b**2*r) Choosing the sqrt to be positive, the possible solutions are A = sqrt(a/2 +/- sqd/2) Since a, b, r are SQRR, then a**2 - b**2*r is a SQRR, so if sqd can be denested, it is done by _sqrtdenest_rec, and the result is a SQRR. Similarly for A. Examples of solutions (in both cases a and sqd are positive): Example of expr with solution sqrt(a/2 + sqd/2) but not solution sqrt(a/2 - sqd/2): expr = sqrt(-sqrt(15) - sqrt(2)*sqrt(-sqrt(5) + 5) - sqrt(3) + 8) a = -sqrt(15) - sqrt(3) + 8; sqd = -2*sqrt(5) - 2 + 4*sqrt(3) Example of expr with solution sqrt(a/2 - sqd/2) but not solution sqrt(a/2 + sqd/2): w = 2 + r2 + r3 + (1 + r3)*sqrt(2 + r2 + 5*r3) expr = sqrt((w**2).expand()) a = 4*sqrt(6) + 8*sqrt(2) + 47 + 28*sqrt(3) sqd = 29 + 20*sqrt(3) Define B = b/2*A; eq.(1) implies a = A**2 + B**2*r; then expr**2 = a + b*sqrt(r) = (A + B*sqrt(r))**2 Examples ======== >>> from sympy import sqrt >>> from sympy.simplify.sqrtdenest import _sqrt_match, sqrt_biquadratic_denest >>> z = sqrt((2*sqrt(2) + 4)*sqrt(2 + sqrt(2)) + 5*sqrt(2) + 8) >>> a, b, r = _sqrt_match(z**2) >>> d2 = a**2 - b**2*r >>> sqrt_biquadratic_denest(z, a, b, r, d2) sqrt(2) + sqrt(sqrt(2) + 2) + 2 """ from sympy.simplify.radsimp import radsimp, rad_rationalize if r <= 0 or d2 < 0 or not b or sqrt_depth(expr.base) < 2: return None for x in (a, b, r): for y in x.args: y2 = y**2 if not y2.is_Integer or not y2.is_positive: return None sqd = _mexpand(sqrtdenest(sqrt(radsimp(d2)))) if sqrt_depth(sqd) > 1: return None x1, x2 = [a/2 + sqd/2, a/2 - sqd/2] # look for a solution A with depth 1 for x in (x1, x2): A = sqrtdenest(sqrt(x)) if sqrt_depth(A) > 1: continue Bn, Bd = rad_rationalize(b, _mexpand(2*A)) B = Bn/Bd z = A + B*sqrt(r) if z < 0: z = -z return _mexpand(z) return None def _denester(nested, av0, h, max_depth_level): """Denests a list of expressions that contain nested square roots. Algorithm based on <http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf>. It is assumed that all of the elements of 'nested' share the same bottom-level radicand. (This is stated in the paper, on page 177, in the paragraph immediately preceding the algorithm.) When evaluating all of the arguments in parallel, the bottom-level radicand only needs to be denested once. This means that calling _denester with x arguments results in a recursive invocation with x+1 arguments; hence _denester has polynomial complexity. However, if the arguments were evaluated separately, each call would result in two recursive invocations, and the algorithm would have exponential complexity. This is discussed in the paper in the middle paragraph of page 179. """ from sympy.simplify.simplify import radsimp if h > max_depth_level: return None, None if av0[1] is None: return None, None if (av0[0] is None and all(n.is_Number for n in nested)): # no arguments are nested for f in _subsets(len(nested)): # test subset 'f' of nested p = _mexpand(Mul(*[nested[i] for i in range(len(f)) if f[i]])) if f.count(1) > 1 and f[-1]: p = -p sqp = sqrt(p) if sqp.is_Rational: return sqp, f # got a perfect square so return its square root. # Otherwise, return the radicand from the previous invocation. return sqrt(nested[-1]), [0]*len(nested) else: R = None if av0[0] is not None: values = [av0[:2]] R = av0[2] nested2 = [av0[3], R] av0[0] = None else: values = list(filter(None, [_sqrt_match(expr) for expr in nested])) for v in values: if v[2]: # Since if b=0, r is not defined if R is not None: if R != v[2]: av0[1] = None return None, None else: R = v[2] if R is None: # return the radicand from the previous invocation return sqrt(nested[-1]), [0]*len(nested) nested2 = [_mexpand(v[0]**2) - _mexpand(R*v[1]**2) for v in values] + [R] d, f = _denester(nested2, av0, h + 1, max_depth_level) if not f: return None, None if not any(f[i] for i in range(len(nested))): v = values[-1] return sqrt(v[0] + _mexpand(v[1]*d)), f else: p = Mul(*[nested[i] for i in range(len(nested)) if f[i]]) v = _sqrt_match(p) if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested) - 1]: v[0] = -v[0] v[1] = -v[1] if not f[len(nested)]: # Solution denests with square roots vad = _mexpand(v[0] + d) if vad <= 0: # return the radicand from the previous invocation. return sqrt(nested[-1]), [0]*len(nested) if not(sqrt_depth(vad) <= sqrt_depth(R) + 1 or (vad**2).is_Number): av0[1] = None return None, None sqvad = _sqrtdenest1(sqrt(vad), denester=False) if not (sqrt_depth(sqvad) <= sqrt_depth(R) + 1): av0[1] = None return None, None sqvad1 = radsimp(1/sqvad) res = _mexpand(sqvad/sqrt(2) + (v[1]*sqrt(R)*sqvad1/sqrt(2))) return res, f # sign(v[1])*sqrt(_mexpand(v[1]**2*R*vad1/2))), f else: # Solution requires a fourth root s2 = _mexpand(v[1]*R) + d if s2 <= 0: return sqrt(nested[-1]), [0]*len(nested) FR, s = root(_mexpand(R), 4), sqrt(s2) return _mexpand(s/(sqrt(2)*FR) + v[0]*FR/(sqrt(2)*s)), f def _sqrt_ratcomb(cs, args): """Denest rational combinations of radicals. Based on section 5 of [1]. Examples ======== >>> from sympy import sqrt >>> from sympy.simplify.sqrtdenest import sqrtdenest >>> z = sqrt(1+sqrt(3)) + sqrt(3+3*sqrt(3)) - sqrt(10+6*sqrt(3)) >>> sqrtdenest(z) 0 """ from sympy.simplify.radsimp import radsimp # check if there exists a pair of sqrt that can be denested def find(a): n = len(a) for i in range(n - 1): for j in range(i + 1, n): s1 = a[i].base s2 = a[j].base p = _mexpand(s1 * s2) s = sqrtdenest(sqrt(p)) if s != sqrt(p): return s, i, j indices = find(args) if indices is None: return Add(*[c * arg for c, arg in zip(cs, args)]) s, i1, i2 = indices c2 = cs.pop(i2) args.pop(i2) a1 = args[i1] # replace a2 by s/a1 cs[i1] += radsimp(c2 * s / a1.base) return _sqrt_ratcomb(cs, args)
95c9a16da0491ebeb1aa85eb0b43ad8369d957ebde91fd2d34c3e6ce59c09414
""" Tools for doing common subexpression elimination. """ from __future__ import print_function, division from sympy.core import Basic, Mul, Add, Pow, sympify, Symbol from sympy.core.compatibility import iterable from sympy.core.containers import Tuple, OrderedSet from sympy.core.exprtools import factor_terms from sympy.core.function import _coeff_isneg from sympy.core.singleton import S from sympy.utilities.iterables import numbered_symbols, sift, \ topological_sort, ordered from . import cse_opts # (preprocessor, postprocessor) pairs which are commonly useful. They should # each take a sympy expression and return a possibly transformed expression. # When used in the function ``cse()``, the target expressions will be transformed # by each of the preprocessor functions in order. After the common # subexpressions are eliminated, each resulting expression will have the # postprocessor functions transform them in *reverse* order in order to undo the # transformation if necessary. This allows the algorithm to operate on # a representation of the expressions that allows for more optimization # opportunities. # ``None`` can be used to specify no transformation for either the preprocessor or # postprocessor. basic_optimizations = [(cse_opts.sub_pre, cse_opts.sub_post), (factor_terms, None)] # sometimes we want the output in a different format; non-trivial # transformations can be put here for users # =============================================================== def reps_toposort(r): """Sort replacements `r` so (k1, v1) appears before (k2, v2) if k2 is in v1's free symbols. This orders items in the way that cse returns its results (hence, in order to use the replacements in a substitution option it would make sense to reverse the order). Examples ======== >>> from sympy.simplify.cse_main import reps_toposort >>> from sympy.abc import x, y >>> from sympy import Eq >>> for l, r in reps_toposort([(x, y + 1), (y, 2)]): ... print(Eq(l, r)) ... Eq(y, 2) Eq(x, y + 1) """ r = sympify(r) E = [] for c1, (k1, v1) in enumerate(r): for c2, (k2, v2) in enumerate(r): if k1 in v2.free_symbols: E.append((c1, c2)) return [r[i] for i in topological_sort((range(len(r)), E))] def cse_separate(r, e): """Move expressions that are in the form (symbol, expr) out of the expressions and sort them into the replacements using the reps_toposort. Examples ======== >>> from sympy.simplify.cse_main import cse_separate >>> from sympy.abc import x, y, z >>> from sympy import cos, exp, cse, Eq, symbols >>> x0, x1 = symbols('x:2') >>> eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1)) >>> cse([eq, Eq(x, z + 1), z - 2], postprocess=cse_separate) in [ ... [[(x0, y + 1), (x, z + 1), (x1, x + 1)], ... [x1 + exp(x1/x0) + cos(x0), z - 2]], ... [[(x1, y + 1), (x, z + 1), (x0, x + 1)], ... [x0 + exp(x0/x1) + cos(x1), z - 2]]] ... True """ d = sift(e, lambda w: w.is_Equality and w.lhs.is_Symbol) r = r + [w.args for w in d[True]] e = d[False] return [reps_toposort(r), e] # ====end of cse postprocess idioms=========================== def preprocess_for_cse(expr, optimizations): """ Preprocess an expression to optimize for common subexpression elimination. Parameters ========== expr : sympy expression The target expression to optimize. optimizations : list of (callable, callable) pairs The (preprocessor, postprocessor) pairs. Returns ======= expr : sympy expression The transformed expression. """ for pre, post in optimizations: if pre is not None: expr = pre(expr) return expr def postprocess_for_cse(expr, optimizations): """ Postprocess an expression after common subexpression elimination to return the expression to canonical sympy form. Parameters ========== expr : sympy expression The target expression to transform. optimizations : list of (callable, callable) pairs, optional The (preprocessor, postprocessor) pairs. The postprocessors will be applied in reversed order to undo the effects of the preprocessors correctly. Returns ======= expr : sympy expression The transformed expression. """ for pre, post in reversed(optimizations): if post is not None: expr = post(expr) return expr class FuncArgTracker(object): """ A class which manages a mapping from functions to arguments and an inverse mapping from arguments to functions. """ def __init__(self, funcs): # To minimize the number of symbolic comparisons, all function arguments # get assigned a value number. self.value_numbers = {} self.value_number_to_value = [] # Both of these maps use integer indices for arguments / functions. self.arg_to_funcset = [] self.func_to_argset = [] for func_i, func in enumerate(funcs): func_argset = OrderedSet() for func_arg in func.args: arg_number = self.get_or_add_value_number(func_arg) func_argset.add(arg_number) self.arg_to_funcset[arg_number].add(func_i) self.func_to_argset.append(func_argset) def get_args_in_value_order(self, argset): """ Return the list of arguments in sorted order according to their value numbers. """ return [self.value_number_to_value[argn] for argn in sorted(argset)] def get_or_add_value_number(self, value): """ Return the value number for the given argument. """ nvalues = len(self.value_numbers) value_number = self.value_numbers.setdefault(value, nvalues) if value_number == nvalues: self.value_number_to_value.append(value) self.arg_to_funcset.append(OrderedSet()) return value_number def stop_arg_tracking(self, func_i): """ Remove the function func_i from the argument to function mapping. """ for arg in self.func_to_argset[func_i]: self.arg_to_funcset[arg].remove(func_i) def get_common_arg_candidates(self, argset, min_func_i=0): """Return a dict whose keys are function numbers. The entries of the dict are the number of arguments said function has in common with `argset`. Entries have at least 2 items in common. All keys have value at least `min_func_i`. """ from collections import defaultdict count_map = defaultdict(lambda: 0) funcsets = [self.arg_to_funcset[arg] for arg in argset] # As an optimization below, we handle the largest funcset separately from # the others. largest_funcset = max(funcsets, key=len) for funcset in funcsets: if largest_funcset is funcset: continue for func_i in funcset: if func_i >= min_func_i: count_map[func_i] += 1 # We pick the smaller of the two containers (count_map, largest_funcset) # to iterate over to reduce the number of iterations needed. (smaller_funcs_container, larger_funcs_container) = sorted( [largest_funcset, count_map], key=len) for func_i in smaller_funcs_container: # Not already in count_map? It can't possibly be in the output, so # skip it. if count_map[func_i] < 1: continue if func_i in larger_funcs_container: count_map[func_i] += 1 return dict((k, v) for k, v in count_map.items() if v >= 2) def get_subset_candidates(self, argset, restrict_to_funcset=None): """ Return a set of functions each of which whose argument list contains ``argset``, optionally filtered only to contain functions in ``restrict_to_funcset``. """ iarg = iter(argset) indices = OrderedSet( fi for fi in self.arg_to_funcset[next(iarg)]) if restrict_to_funcset is not None: indices &= restrict_to_funcset for arg in iarg: indices &= self.arg_to_funcset[arg] return indices def update_func_argset(self, func_i, new_argset): """ Update a function with a new set of arguments. """ new_args = OrderedSet(new_argset) old_args = self.func_to_argset[func_i] for deleted_arg in old_args - new_args: self.arg_to_funcset[deleted_arg].remove(func_i) for added_arg in new_args - old_args: self.arg_to_funcset[added_arg].add(func_i) self.func_to_argset[func_i].clear() self.func_to_argset[func_i].update(new_args) class Unevaluated(object): def __init__(self, func, args): self.func = func self.args = args def __str__(self): return "Uneval<{}>({})".format( self.func, ", ".join(str(a) for a in self.args)) def as_unevaluated_basic(self): return self.func(*self.args, evaluate=False) @property def free_symbols(self): return set().union(*[a.free_symbols for a in self.args]) __repr__ = __str__ def match_common_args(func_class, funcs, opt_subs): """ Recognize and extract common subexpressions of function arguments within a set of function calls. For instance, for the following function calls:: x + z + y sin(x + y) this will extract a common subexpression of `x + y`:: w = x + y w + z sin(w) The function we work with is assumed to be associative and commutative. Parameters ========== func_class: class The function class (e.g. Add, Mul) funcs: list of functions A list of function calls opt_subs: dict A dictionary of substitutions which this function may update """ # Sort to ensure that whole-function subexpressions come before the items # that use them. funcs = sorted(funcs, key=lambda f: len(f.args)) arg_tracker = FuncArgTracker(funcs) changed = OrderedSet() for i in range(len(funcs)): common_arg_candidates_counts = arg_tracker.get_common_arg_candidates( arg_tracker.func_to_argset[i], min_func_i=i + 1) # Sort the candidates in order of match size. # This makes us try combining smaller matches first. common_arg_candidates = OrderedSet(sorted( common_arg_candidates_counts.keys(), key=lambda k: (common_arg_candidates_counts[k], k))) while common_arg_candidates: j = common_arg_candidates.pop(last=False) com_args = arg_tracker.func_to_argset[i].intersection( arg_tracker.func_to_argset[j]) if len(com_args) <= 1: # This may happen if a set of common arguments was already # combined in a previous iteration. continue # For all sets, replace the common symbols by the function # over them, to allow recursive matches. diff_i = arg_tracker.func_to_argset[i].difference(com_args) if diff_i: # com_func needs to be unevaluated to allow for recursive matches. com_func = Unevaluated( func_class, arg_tracker.get_args_in_value_order(com_args)) com_func_number = arg_tracker.get_or_add_value_number(com_func) arg_tracker.update_func_argset(i, diff_i | OrderedSet([com_func_number])) changed.add(i) else: # Treat the whole expression as a CSE. # # The reason this needs to be done is somewhat subtle. Within # tree_cse(), to_eliminate only contains expressions that are # seen more than once. The problem is unevaluated expressions # do not compare equal to the evaluated equivalent. So # tree_cse() won't mark funcs[i] as a CSE if we use an # unevaluated version. com_func_number = arg_tracker.get_or_add_value_number(funcs[i]) diff_j = arg_tracker.func_to_argset[j].difference(com_args) arg_tracker.update_func_argset(j, diff_j | OrderedSet([com_func_number])) changed.add(j) for k in arg_tracker.get_subset_candidates( com_args, common_arg_candidates): diff_k = arg_tracker.func_to_argset[k].difference(com_args) arg_tracker.update_func_argset(k, diff_k | OrderedSet([com_func_number])) changed.add(k) if i in changed: opt_subs[funcs[i]] = Unevaluated(func_class, arg_tracker.get_args_in_value_order(arg_tracker.func_to_argset[i])) arg_tracker.stop_arg_tracking(i) def opt_cse(exprs, order='canonical'): """Find optimization opportunities in Adds, Muls, Pows and negative coefficient Muls Parameters ========== exprs : list of sympy expressions The expressions to optimize. order : string, 'none' or 'canonical' The order by which Mul and Add arguments are processed. For large expressions where speed is a concern, use the setting order='none'. Returns ======= opt_subs : dictionary of expression substitutions The expression substitutions which can be useful to optimize CSE. Examples ======== >>> from sympy.simplify.cse_main import opt_cse >>> from sympy.abc import x >>> opt_subs = opt_cse([x**-2]) >>> k, v = list(opt_subs.keys())[0], list(opt_subs.values())[0] >>> print((k, v.as_unevaluated_basic())) (x**(-2), 1/(x**2)) """ from sympy.matrices.expressions import MatAdd, MatMul, MatPow opt_subs = dict() adds = OrderedSet() muls = OrderedSet() seen_subexp = set() def _find_opts(expr): if not isinstance(expr, (Basic, Unevaluated)): return if expr.is_Atom or expr.is_Order: return if iterable(expr): list(map(_find_opts, expr)) return if expr in seen_subexp: return expr seen_subexp.add(expr) list(map(_find_opts, expr.args)) if _coeff_isneg(expr): neg_expr = -expr if not neg_expr.is_Atom: opt_subs[expr] = Unevaluated(Mul, (S.NegativeOne, neg_expr)) seen_subexp.add(neg_expr) expr = neg_expr if isinstance(expr, (Mul, MatMul)): muls.add(expr) elif isinstance(expr, (Add, MatAdd)): adds.add(expr) elif isinstance(expr, (Pow, MatPow)): base, exp = expr.base, expr.exp if _coeff_isneg(exp): opt_subs[expr] = Unevaluated(Pow, (Pow(base, -exp), -1)) for e in exprs: if isinstance(e, (Basic, Unevaluated)): _find_opts(e) # split muls into commutative commutative_muls = OrderedSet() for m in muls: c, nc = m.args_cnc(cset=False) if c: c_mul = m.func(*c) if nc: if c_mul == 1: new_obj = m.func(*nc) else: new_obj = m.func(c_mul, m.func(*nc), evaluate=False) opt_subs[m] = new_obj if len(c) > 1: commutative_muls.add(c_mul) match_common_args(Add, adds, opt_subs) match_common_args(Mul, commutative_muls, opt_subs) return opt_subs def tree_cse(exprs, symbols, opt_subs=None, order='canonical', ignore=()): """Perform raw CSE on expression tree, taking opt_subs into account. Parameters ========== exprs : list of sympy expressions The expressions to reduce. symbols : infinite iterator yielding unique Symbols The symbols used to label the common subexpressions which are pulled out. opt_subs : dictionary of expression substitutions The expressions to be substituted before any CSE action is performed. order : string, 'none' or 'canonical' The order by which Mul and Add arguments are processed. For large expressions where speed is a concern, use the setting order='none'. ignore : iterable of Symbols Substitutions containing any Symbol from ``ignore`` will be ignored. """ from sympy.matrices.expressions import MatrixExpr, MatrixSymbol, MatMul, MatAdd from sympy.polys.rootoftools import RootOf if opt_subs is None: opt_subs = dict() ## Find repeated sub-expressions to_eliminate = set() seen_subexp = set() excluded_symbols = set() def _find_repeated(expr): if not isinstance(expr, (Basic, Unevaluated)): return if isinstance(expr, RootOf): return if isinstance(expr, Basic) and (expr.is_Atom or expr.is_Order): if expr.is_Symbol: excluded_symbols.add(expr) return if iterable(expr): args = expr else: if expr in seen_subexp: for ign in ignore: if ign in expr.free_symbols: break else: to_eliminate.add(expr) return seen_subexp.add(expr) if expr in opt_subs: expr = opt_subs[expr] args = expr.args list(map(_find_repeated, args)) for e in exprs: if isinstance(e, Basic): _find_repeated(e) ## Rebuild tree # Remove symbols from the generator that conflict with names in the expressions. symbols = (symbol for symbol in symbols if symbol not in excluded_symbols) replacements = [] subs = dict() def _rebuild(expr): if not isinstance(expr, (Basic, Unevaluated)): return expr if not expr.args: return expr if iterable(expr): new_args = [_rebuild(arg) for arg in expr] return expr.func(*new_args) if expr in subs: return subs[expr] orig_expr = expr if expr in opt_subs: expr = opt_subs[expr] # If enabled, parse Muls and Adds arguments by order to ensure # replacement order independent from hashes if order != 'none': if isinstance(expr, (Mul, MatMul)): c, nc = expr.args_cnc() if c == [1]: args = nc else: args = list(ordered(c)) + nc elif isinstance(expr, (Add, MatAdd)): args = list(ordered(expr.args)) else: args = expr.args else: args = expr.args new_args = list(map(_rebuild, args)) if isinstance(expr, Unevaluated) or new_args != args: new_expr = expr.func(*new_args) else: new_expr = expr if orig_expr in to_eliminate: try: sym = next(symbols) except StopIteration: raise ValueError("Symbols iterator ran out of symbols.") if isinstance(orig_expr, MatrixExpr): sym = MatrixSymbol(sym.name, orig_expr.rows, orig_expr.cols) subs[orig_expr] = sym replacements.append((sym, new_expr)) return sym else: return new_expr reduced_exprs = [] for e in exprs: if isinstance(e, Basic): reduced_e = _rebuild(e) else: reduced_e = e reduced_exprs.append(reduced_e) return replacements, reduced_exprs def cse(exprs, symbols=None, optimizations=None, postprocess=None, order='canonical', ignore=()): """ Perform common subexpression elimination on an expression. Parameters ========== exprs : list of sympy expressions, or a single sympy expression The expressions to reduce. symbols : infinite iterator yielding unique Symbols The symbols used to label the common subexpressions which are pulled out. The ``numbered_symbols`` generator is useful. The default is a stream of symbols of the form "x0", "x1", etc. This must be an infinite iterator. optimizations : list of (callable, callable) pairs The (preprocessor, postprocessor) pairs of external optimization functions. Optionally 'basic' can be passed for a set of predefined basic optimizations. Such 'basic' optimizations were used by default in old implementation, however they can be really slow on larger expressions. Now, no pre or post optimizations are made by default. postprocess : a function which accepts the two return values of cse and returns the desired form of output from cse, e.g. if you want the replacements reversed the function might be the following lambda: lambda r, e: return reversed(r), e order : string, 'none' or 'canonical' The order by which Mul and Add arguments are processed. If set to 'canonical', arguments will be canonically ordered. If set to 'none', ordering will be faster but dependent on expressions hashes, thus machine dependent and variable. For large expressions where speed is a concern, use the setting order='none'. ignore : iterable of Symbols Substitutions containing any Symbol from ``ignore`` will be ignored. Returns ======= replacements : list of (Symbol, expression) pairs All of the common subexpressions that were replaced. Subexpressions earlier in this list might show up in subexpressions later in this list. reduced_exprs : list of sympy expressions The reduced expressions with all of the replacements above. Examples ======== >>> from sympy import cse, SparseMatrix >>> from sympy.abc import x, y, z, w >>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3) ([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3]) Note that currently, y + z will not get substituted if -y - z is used. >>> cse(((w + x + y + z)*(w - y - z))/(w + x)**3) ([(x0, w + x)], [(w - y - z)*(x0 + y + z)/x0**3]) List of expressions with recursive substitutions: >>> m = SparseMatrix([x + y, x + y + z]) >>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m]) ([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([ [x0], [x1]])]) Note: the type and mutability of input matrices is retained. >>> isinstance(_[1][-1], SparseMatrix) True The user may disallow substitutions containing certain symbols: >>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,)) ([(x0, x + 1)], [x0*y**2, 3*x0*y**2]) """ from sympy.matrices import (MatrixBase, Matrix, ImmutableMatrix, SparseMatrix, ImmutableSparseMatrix) if isinstance(exprs, (int, float)): exprs = sympify(exprs) # Handle the case if just one expression was passed. if isinstance(exprs, (Basic, MatrixBase)): exprs = [exprs] copy = exprs temp = [] for e in exprs: if isinstance(e, (Matrix, ImmutableMatrix)): temp.append(Tuple(*e._mat)) elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)): temp.append(Tuple(*e._smat.items())) else: temp.append(e) exprs = temp del temp if optimizations is None: optimizations = list() elif optimizations == 'basic': optimizations = basic_optimizations # Preprocess the expressions to give us better optimization opportunities. reduced_exprs = [preprocess_for_cse(e, optimizations) for e in exprs] if symbols is None: symbols = numbered_symbols(cls=Symbol) else: # In case we get passed an iterable with an __iter__ method instead of # an actual iterator. symbols = iter(symbols) # Find other optimization opportunities. opt_subs = opt_cse(reduced_exprs, order) # Main CSE algorithm. replacements, reduced_exprs = tree_cse(reduced_exprs, symbols, opt_subs, order, ignore) # Postprocess the expressions to return the expressions to canonical form. exprs = copy for i, (sym, subtree) in enumerate(replacements): subtree = postprocess_for_cse(subtree, optimizations) replacements[i] = (sym, subtree) reduced_exprs = [postprocess_for_cse(e, optimizations) for e in reduced_exprs] # Get the matrices back for i, e in enumerate(exprs): if isinstance(e, (Matrix, ImmutableMatrix)): reduced_exprs[i] = Matrix(e.rows, e.cols, reduced_exprs[i]) if isinstance(e, ImmutableMatrix): reduced_exprs[i] = reduced_exprs[i].as_immutable() elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)): m = SparseMatrix(e.rows, e.cols, {}) for k, v in reduced_exprs[i]: m[k] = v if isinstance(e, ImmutableSparseMatrix): m = m.as_immutable() reduced_exprs[i] = m if postprocess is None: return replacements, reduced_exprs return postprocess(replacements, reduced_exprs)
646a296a6e69f6ad3874d121d6c7ddf78df50ea0da355dd59948e70a9574b460
""" Implementation of the trigsimp algorithm by Fu et al. The idea behind the ``fu`` algorithm is to use a sequence of rules, applied in what is heuristically known to be a smart order, to select a simpler expression that is equivalent to the input. There are transform rules in which a single rule is applied to the expression tree. The following are just mnemonic in nature; see the docstrings for examples. TR0 - simplify expression TR1 - sec-csc to cos-sin TR2 - tan-cot to sin-cos ratio TR2i - sin-cos ratio to tan TR3 - angle canonicalization TR4 - functions at special angles TR5 - powers of sin to powers of cos TR6 - powers of cos to powers of sin TR7 - reduce cos power (increase angle) TR8 - expand products of sin-cos to sums TR9 - contract sums of sin-cos to products TR10 - separate sin-cos arguments TR10i - collect sin-cos arguments TR11 - reduce double angles TR12 - separate tan arguments TR12i - collect tan arguments TR13 - expand product of tan-cot TRmorrie - prod(cos(x*2**i), (i, 0, k - 1)) -> sin(2**k*x)/(2**k*sin(x)) TR14 - factored powers of sin or cos to cos or sin power TR15 - negative powers of sin to cot power TR16 - negative powers of cos to tan power TR22 - tan-cot powers to negative powers of sec-csc functions TR111 - negative sin-cos-tan powers to csc-sec-cot There are 4 combination transforms (CTR1 - CTR4) in which a sequence of transformations are applied and the simplest expression is selected from a few options. Finally, there are the 2 rule lists (RL1 and RL2), which apply a sequence of transformations and combined transformations, and the ``fu`` algorithm itself, which applies rules and rule lists and selects the best expressions. There is also a function ``L`` which counts the number of trigonometric functions that appear in the expression. Other than TR0, re-writing of expressions is not done by the transformations. e.g. TR10i finds pairs of terms in a sum that are in the form like ``cos(x)*cos(y) + sin(x)*sin(y)``. Such expression are targeted in a bottom-up traversal of the expression, but no manipulation to make them appear is attempted. For example, Set-up for examples below: >>> from sympy.simplify.fu import fu, L, TR9, TR10i, TR11 >>> from sympy import factor, sin, cos, powsimp >>> from sympy.abc import x, y, z, a >>> from time import time >>> eq = cos(x + y)/cos(x) >>> TR10i(eq.expand(trig=True)) -sin(x)*sin(y)/cos(x) + cos(y) If the expression is put in "normal" form (with a common denominator) then the transformation is successful: >>> TR10i(_.normal()) cos(x + y)/cos(x) TR11's behavior is similar. It rewrites double angles as smaller angles but doesn't do any simplification of the result. >>> TR11(sin(2)**a*cos(1)**(-a), 1) (2*sin(1)*cos(1))**a*cos(1)**(-a) >>> powsimp(_) (2*sin(1))**a The temptation is to try make these TR rules "smarter" but that should really be done at a higher level; the TR rules should try maintain the "do one thing well" principle. There is one exception, however. In TR10i and TR9 terms are recognized even when they are each multiplied by a common factor: >>> fu(a*cos(x)*cos(y) + a*sin(x)*sin(y)) a*cos(x - y) Factoring with ``factor_terms`` is used but it it "JIT"-like, being delayed until it is deemed necessary. Furthermore, if the factoring does not help with the simplification, it is not retained, so ``a*cos(x)*cos(y) + a*sin(x)*sin(z)`` does not become the factored (but unsimplified in the trigonometric sense) expression: >>> fu(a*cos(x)*cos(y) + a*sin(x)*sin(z)) a*sin(x)*sin(z) + a*cos(x)*cos(y) In some cases factoring might be a good idea, but the user is left to make that decision. For example: >>> expr=((15*sin(2*x) + 19*sin(x + y) + 17*sin(x + z) + 19*cos(x - z) + ... 25)*(20*sin(2*x) + 15*sin(x + y) + sin(y + z) + 14*cos(x - z) + ... 14*cos(y - z))*(9*sin(2*y) + 12*sin(y + z) + 10*cos(x - y) + 2*cos(y - ... z) + 18)).expand(trig=True).expand() In the expanded state, there are nearly 1000 trig functions: >>> L(expr) 932 If the expression where factored first, this would take time but the resulting expression would be transformed very quickly: >>> def clock(f, n=2): ... t=time(); f(); return round(time()-t, n) ... >>> clock(lambda: factor(expr)) # doctest: +SKIP 0.86 >>> clock(lambda: TR10i(expr), 3) # doctest: +SKIP 0.016 If the unexpanded expression is used, the transformation takes longer but not as long as it took to factor it and then transform it: >>> clock(lambda: TR10i(expr), 2) # doctest: +SKIP 0.28 So neither expansion nor factoring is used in ``TR10i``: if the expression is already factored (or partially factored) then expansion with ``trig=True`` would destroy what is already known and take longer; if the expression is expanded, factoring may take longer than simply applying the transformation itself. Although the algorithms should be canonical, always giving the same result, they may not yield the best result. This, in general, is the nature of simplification where searching all possible transformation paths is very expensive. Here is a simple example. There are 6 terms in the following sum: >>> expr = (sin(x)**2*cos(y)*cos(z) + sin(x)*sin(y)*cos(x)*cos(z) + ... sin(x)*sin(z)*cos(x)*cos(y) + sin(y)*sin(z)*cos(x)**2 + sin(y)*sin(z) + ... cos(y)*cos(z)) >>> args = expr.args Serendipitously, fu gives the best result: >>> fu(expr) 3*cos(y - z)/2 - cos(2*x + y + z)/2 But if different terms were combined, a less-optimal result might be obtained, requiring some additional work to get better simplification, but still less than optimal. The following shows an alternative form of ``expr`` that resists optimal simplification once a given step is taken since it leads to a dead end: >>> TR9(-cos(x)**2*cos(y + z) + 3*cos(y - z)/2 + ... cos(y + z)/2 + cos(-2*x + y + z)/4 - cos(2*x + y + z)/4) sin(2*x)*sin(y + z)/2 - cos(x)**2*cos(y + z) + 3*cos(y - z)/2 + cos(y + z)/2 Here is a smaller expression that exhibits the same behavior: >>> a = sin(x)*sin(z)*cos(x)*cos(y) + sin(x)*sin(y)*cos(x)*cos(z) >>> TR10i(a) sin(x)*sin(y + z)*cos(x) >>> newa = _ >>> TR10i(expr - a) # this combines two more of the remaining terms sin(x)**2*cos(y)*cos(z) + sin(y)*sin(z)*cos(x)**2 + cos(y - z) >>> TR10i(_ + newa) == _ + newa # but now there is no more simplification True Without getting lucky or trying all possible pairings of arguments, the final result may be less than optimal and impossible to find without better heuristics or brute force trial of all possibilities. Notes ===== This work was started by Dimitar Vlahovski at the Technological School "Electronic systems" (30.11.2011). References ========== Fu, Hongguang, Xiuqin Zhong, and Zhenbing Zeng. "Automated and readable simplification of trigonometric expressions." Mathematical and computer modelling 44.11 (2006): 1169-1177. http://rfdz.ph-noe.ac.at/fileadmin/Mathematik_Uploads/ACDCA/DESTIME2006/DES_contribs/Fu/simplification.pdf http://www.sosmath.com/trig/Trig5/trig5/pdf/pdf.html gives a formula sheet. """ from __future__ import print_function, division from collections import defaultdict from sympy.core.add import Add from sympy.core.basic import S from sympy.core.compatibility import ordered from sympy.core.expr import Expr from sympy.core.exprtools import Factors, gcd_terms, factor_terms from sympy.core.function import expand_mul from sympy.core.mul import Mul from sympy.core.numbers import pi, I from sympy.core.power import Pow from sympy.core.symbol import Dummy from sympy.core.sympify import sympify from sympy.functions.combinatorial.factorials import binomial from sympy.functions.elementary.hyperbolic import ( cosh, sinh, tanh, coth, sech, csch, HyperbolicFunction) from sympy.functions.elementary.trigonometric import ( cos, sin, tan, cot, sec, csc, sqrt, TrigonometricFunction) from sympy.ntheory.factor_ import perfect_power from sympy.polys.polytools import factor from sympy.simplify.simplify import bottom_up from sympy.strategies.tree import greedy from sympy.strategies.core import identity, debug from sympy import SYMPY_DEBUG # ================== Fu-like tools =========================== def TR0(rv): """Simplification of rational polynomials, trying to simplify the expression, e.g. combine things like 3*x + 2*x, etc.... """ # although it would be nice to use cancel, it doesn't work # with noncommutatives return rv.normal().factor().expand() def TR1(rv): """Replace sec, csc with 1/cos, 1/sin Examples ======== >>> from sympy.simplify.fu import TR1, sec, csc >>> from sympy.abc import x >>> TR1(2*csc(x) + sec(x)) 1/cos(x) + 2/sin(x) """ def f(rv): if isinstance(rv, sec): a = rv.args[0] return S.One/cos(a) elif isinstance(rv, csc): a = rv.args[0] return S.One/sin(a) return rv return bottom_up(rv, f) def TR2(rv): """Replace tan and cot with sin/cos and cos/sin Examples ======== >>> from sympy.simplify.fu import TR2 >>> from sympy.abc import x >>> from sympy import tan, cot, sin, cos >>> TR2(tan(x)) sin(x)/cos(x) >>> TR2(cot(x)) cos(x)/sin(x) >>> TR2(tan(tan(x) - sin(x)/cos(x))) 0 """ def f(rv): if isinstance(rv, tan): a = rv.args[0] return sin(a)/cos(a) elif isinstance(rv, cot): a = rv.args[0] return cos(a)/sin(a) return rv return bottom_up(rv, f) def TR2i(rv, half=False): """Converts ratios involving sin and cos as follows:: sin(x)/cos(x) -> tan(x) sin(x)/(cos(x) + 1) -> tan(x/2) if half=True Examples ======== >>> from sympy.simplify.fu import TR2i >>> from sympy.abc import x, a >>> from sympy import sin, cos >>> TR2i(sin(x)/cos(x)) tan(x) Powers of the numerator and denominator are also recognized >>> TR2i(sin(x)**2/(cos(x) + 1)**2, half=True) tan(x/2)**2 The transformation does not take place unless assumptions allow (i.e. the base must be positive or the exponent must be an integer for both numerator and denominator) >>> TR2i(sin(x)**a/(cos(x) + 1)**a) (cos(x) + 1)**(-a)*sin(x)**a """ def f(rv): if not rv.is_Mul: return rv n, d = rv.as_numer_denom() if n.is_Atom or d.is_Atom: return rv def ok(k, e): # initial filtering of factors return ( (e.is_integer or k.is_positive) and ( k.func in (sin, cos) or (half and k.is_Add and len(k.args) >= 2 and any(any(isinstance(ai, cos) or ai.is_Pow and ai.base is cos for ai in Mul.make_args(a)) for a in k.args)))) n = n.as_powers_dict() ndone = [(k, n.pop(k)) for k in list(n.keys()) if not ok(k, n[k])] if not n: return rv d = d.as_powers_dict() ddone = [(k, d.pop(k)) for k in list(d.keys()) if not ok(k, d[k])] if not d: return rv # factoring if necessary def factorize(d, ddone): newk = [] for k in d: if k.is_Add and len(k.args) > 1: knew = factor(k) if half else factor_terms(k) if knew != k: newk.append((k, knew)) if newk: for i, (k, knew) in enumerate(newk): del d[k] newk[i] = knew newk = Mul(*newk).as_powers_dict() for k in newk: v = d[k] + newk[k] if ok(k, v): d[k] = v else: ddone.append((k, v)) del newk factorize(n, ndone) factorize(d, ddone) # joining t = [] for k in n: if isinstance(k, sin): a = cos(k.args[0], evaluate=False) if a in d and d[a] == n[k]: t.append(tan(k.args[0])**n[k]) n[k] = d[a] = None elif half: a1 = 1 + a if a1 in d and d[a1] == n[k]: t.append((tan(k.args[0]/2))**n[k]) n[k] = d[a1] = None elif isinstance(k, cos): a = sin(k.args[0], evaluate=False) if a in d and d[a] == n[k]: t.append(tan(k.args[0])**-n[k]) n[k] = d[a] = None elif half and k.is_Add and k.args[0] is S.One and \ isinstance(k.args[1], cos): a = sin(k.args[1].args[0], evaluate=False) if a in d and d[a] == n[k] and (d[a].is_integer or \ a.is_positive): t.append(tan(a.args[0]/2)**-n[k]) n[k] = d[a] = None if t: rv = Mul(*(t + [b**e for b, e in n.items() if e]))/\ Mul(*[b**e for b, e in d.items() if e]) rv *= Mul(*[b**e for b, e in ndone])/Mul(*[b**e for b, e in ddone]) return rv return bottom_up(rv, f) def TR3(rv): """Induced formula: example sin(-a) = -sin(a) Examples ======== >>> from sympy.simplify.fu import TR3 >>> from sympy.abc import x, y >>> from sympy import pi >>> from sympy import cos >>> TR3(cos(y - x*(y - x))) cos(x*(x - y) + y) >>> cos(pi/2 + x) -sin(x) >>> cos(30*pi/2 + x) -cos(x) """ from sympy.simplify.simplify import signsimp # Negative argument (already automatic for funcs like sin(-x) -> -sin(x) # but more complicated expressions can use it, too). Also, trig angles # between pi/4 and pi/2 are not reduced to an angle between 0 and pi/4. # The following are automatically handled: # Argument of type: pi/2 +/- angle # Argument of type: pi +/- angle # Argument of type : 2k*pi +/- angle def f(rv): if not isinstance(rv, TrigonometricFunction): return rv rv = rv.func(signsimp(rv.args[0])) if not isinstance(rv, TrigonometricFunction): return rv if (rv.args[0] - S.Pi/4).is_positive is (S.Pi/2 - rv.args[0]).is_positive is True: fmap = {cos: sin, sin: cos, tan: cot, cot: tan, sec: csc, csc: sec} rv = fmap[rv.func](S.Pi/2 - rv.args[0]) return rv return bottom_up(rv, f) def TR4(rv): """Identify values of special angles. a= 0 pi/6 pi/4 pi/3 pi/2 ---------------------------------------------------- cos(a) 0 1/2 sqrt(2)/2 sqrt(3)/2 1 sin(a) 1 sqrt(3)/2 sqrt(2)/2 1/2 0 tan(a) 0 sqt(3)/3 1 sqrt(3) -- Examples ======== >>> from sympy.simplify.fu import TR4 >>> from sympy import pi >>> from sympy import cos, sin, tan, cot >>> for s in (0, pi/6, pi/4, pi/3, pi/2): ... print('%s %s %s %s' % (cos(s), sin(s), tan(s), cot(s))) ... 1 0 0 zoo sqrt(3)/2 1/2 sqrt(3)/3 sqrt(3) sqrt(2)/2 sqrt(2)/2 1 1 1/2 sqrt(3)/2 sqrt(3) sqrt(3)/3 0 1 zoo 0 """ # special values at 0, pi/6, pi/4, pi/3, pi/2 already handled return rv def _TR56(rv, f, g, h, max, pow): """Helper for TR5 and TR6 to replace f**2 with h(g**2) Options ======= max : controls size of exponent that can appear on f e.g. if max=4 then f**4 will be changed to h(g**2)**2. pow : controls whether the exponent must be a perfect power of 2 e.g. if pow=True (and max >= 6) then f**6 will not be changed but f**8 will be changed to h(g**2)**4 >>> from sympy.simplify.fu import _TR56 as T >>> from sympy.abc import x >>> from sympy import sin, cos >>> h = lambda x: 1 - x >>> T(sin(x)**3, sin, cos, h, 4, False) sin(x)**3 >>> T(sin(x)**6, sin, cos, h, 6, False) (1 - cos(x)**2)**3 >>> T(sin(x)**6, sin, cos, h, 6, True) sin(x)**6 >>> T(sin(x)**8, sin, cos, h, 10, True) (1 - cos(x)**2)**4 """ def _f(rv): # I'm not sure if this transformation should target all even powers # or only those expressible as powers of 2. Also, should it only # make the changes in powers that appear in sums -- making an isolated # change is not going to allow a simplification as far as I can tell. if not (rv.is_Pow and rv.base.func == f): return rv if not rv.exp.is_real: return rv if (rv.exp < 0) == True: return rv if (rv.exp > max) == True: return rv if rv.exp == 2: return h(g(rv.base.args[0])**2) else: if rv.exp == 4: e = 2 elif not pow: if rv.exp % 2: return rv e = rv.exp//2 else: p = perfect_power(rv.exp) if not p: return rv e = rv.exp//2 return h(g(rv.base.args[0])**2)**e return bottom_up(rv, _f) def TR5(rv, max=4, pow=False): """Replacement of sin**2 with 1 - cos(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR5 >>> from sympy.abc import x >>> from sympy import sin >>> TR5(sin(x)**2) 1 - cos(x)**2 >>> TR5(sin(x)**-2) # unchanged sin(x)**(-2) >>> TR5(sin(x)**4) (1 - cos(x)**2)**2 """ return _TR56(rv, sin, cos, lambda x: 1 - x, max=max, pow=pow) def TR6(rv, max=4, pow=False): """Replacement of cos**2 with 1 - sin(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR6 >>> from sympy.abc import x >>> from sympy import cos >>> TR6(cos(x)**2) 1 - sin(x)**2 >>> TR6(cos(x)**-2) #unchanged cos(x)**(-2) >>> TR6(cos(x)**4) (1 - sin(x)**2)**2 """ return _TR56(rv, cos, sin, lambda x: 1 - x, max=max, pow=pow) def TR7(rv): """Lowering the degree of cos(x)**2 Examples ======== >>> from sympy.simplify.fu import TR7 >>> from sympy.abc import x >>> from sympy import cos >>> TR7(cos(x)**2) cos(2*x)/2 + 1/2 >>> TR7(cos(x)**2 + 1) cos(2*x)/2 + 3/2 """ def f(rv): if not (rv.is_Pow and rv.base.func == cos and rv.exp == 2): return rv return (1 + cos(2*rv.base.args[0]))/2 return bottom_up(rv, f) def TR8(rv, first=True): """Converting products of ``cos`` and/or ``sin`` to a sum or difference of ``cos`` and or ``sin`` terms. Examples ======== >>> from sympy.simplify.fu import TR8, TR7 >>> from sympy import cos, sin >>> TR8(cos(2)*cos(3)) cos(5)/2 + cos(1)/2 >>> TR8(cos(2)*sin(3)) sin(5)/2 + sin(1)/2 >>> TR8(sin(2)*sin(3)) -cos(5)/2 + cos(1)/2 """ def f(rv): if not ( rv.is_Mul or rv.is_Pow and rv.base.func in (cos, sin) and (rv.exp.is_integer or rv.base.is_positive)): return rv if first: n, d = [expand_mul(i) for i in rv.as_numer_denom()] newn = TR8(n, first=False) newd = TR8(d, first=False) if newn != n or newd != d: rv = gcd_terms(newn/newd) if rv.is_Mul and rv.args[0].is_Rational and \ len(rv.args) == 2 and rv.args[1].is_Add: rv = Mul(*rv.as_coeff_Mul()) return rv args = {cos: [], sin: [], None: []} for a in ordered(Mul.make_args(rv)): if a.func in (cos, sin): args[a.func].append(a.args[0]) elif (a.is_Pow and a.exp.is_Integer and a.exp > 0 and \ a.base.func in (cos, sin)): # XXX this is ok but pathological expression could be handled # more efficiently as in TRmorrie args[a.base.func].extend([a.base.args[0]]*a.exp) else: args[None].append(a) c = args[cos] s = args[sin] if not (c and s or len(c) > 1 or len(s) > 1): return rv args = args[None] n = min(len(c), len(s)) for i in range(n): a1 = s.pop() a2 = c.pop() args.append((sin(a1 + a2) + sin(a1 - a2))/2) while len(c) > 1: a1 = c.pop() a2 = c.pop() args.append((cos(a1 + a2) + cos(a1 - a2))/2) if c: args.append(cos(c.pop())) while len(s) > 1: a1 = s.pop() a2 = s.pop() args.append((-cos(a1 + a2) + cos(a1 - a2))/2) if s: args.append(sin(s.pop())) return TR8(expand_mul(Mul(*args))) return bottom_up(rv, f) def TR9(rv): """Sum of ``cos`` or ``sin`` terms as a product of ``cos`` or ``sin``. Examples ======== >>> from sympy.simplify.fu import TR9 >>> from sympy import cos, sin >>> TR9(cos(1) + cos(2)) 2*cos(1/2)*cos(3/2) >>> TR9(cos(1) + 2*sin(1) + 2*sin(2)) cos(1) + 4*sin(3/2)*cos(1/2) If no change is made by TR9, no re-arrangement of the expression will be made. For example, though factoring of common term is attempted, if the factored expression wasn't changed, the original expression will be returned: >>> TR9(cos(3) + cos(3)*cos(2)) cos(3) + cos(2)*cos(3) """ def f(rv): if not rv.is_Add: return rv def do(rv, first=True): # cos(a)+/-cos(b) can be combined into a product of cosines and # sin(a)+/-sin(b) can be combined into a product of cosine and # sine. # # If there are more than two args, the pairs which "work" will # have a gcd extractable and the remaining two terms will have # the above structure -- all pairs must be checked to find the # ones that work. args that don't have a common set of symbols # are skipped since this doesn't lead to a simpler formula and # also has the arbitrariness of combining, for example, the x # and y term instead of the y and z term in something like # cos(x) + cos(y) + cos(z). if not rv.is_Add: return rv args = list(ordered(rv.args)) if len(args) != 2: hit = False for i in range(len(args)): ai = args[i] if ai is None: continue for j in range(i + 1, len(args)): aj = args[j] if aj is None: continue was = ai + aj new = do(was) if new != was: args[i] = new # update in place args[j] = None hit = True break # go to next i if hit: rv = Add(*[_f for _f in args if _f]) if rv.is_Add: rv = do(rv) return rv # two-arg Add split = trig_split(*args) if not split: return rv gcd, n1, n2, a, b, iscos = split # application of rule if possible if iscos: if n1 == n2: return gcd*n1*2*cos((a + b)/2)*cos((a - b)/2) if n1 < 0: a, b = b, a return -2*gcd*sin((a + b)/2)*sin((a - b)/2) else: if n1 == n2: return gcd*n1*2*sin((a + b)/2)*cos((a - b)/2) if n1 < 0: a, b = b, a return 2*gcd*cos((a + b)/2)*sin((a - b)/2) return process_common_addends(rv, do) # DON'T sift by free symbols return bottom_up(rv, f) def TR10(rv, first=True): """Separate sums in ``cos`` and ``sin``. Examples ======== >>> from sympy.simplify.fu import TR10 >>> from sympy.abc import a, b, c >>> from sympy import cos, sin >>> TR10(cos(a + b)) -sin(a)*sin(b) + cos(a)*cos(b) >>> TR10(sin(a + b)) sin(a)*cos(b) + sin(b)*cos(a) >>> TR10(sin(a + b + c)) (-sin(a)*sin(b) + cos(a)*cos(b))*sin(c) + \ (sin(a)*cos(b) + sin(b)*cos(a))*cos(c) """ def f(rv): if not rv.func in (cos, sin): return rv f = rv.func arg = rv.args[0] if arg.is_Add: if first: args = list(ordered(arg.args)) else: args = list(arg.args) a = args.pop() b = Add._from_args(args) if b.is_Add: if f == sin: return sin(a)*TR10(cos(b), first=False) + \ cos(a)*TR10(sin(b), first=False) else: return cos(a)*TR10(cos(b), first=False) - \ sin(a)*TR10(sin(b), first=False) else: if f == sin: return sin(a)*cos(b) + cos(a)*sin(b) else: return cos(a)*cos(b) - sin(a)*sin(b) return rv return bottom_up(rv, f) def TR10i(rv): """Sum of products to function of sum. Examples ======== >>> from sympy.simplify.fu import TR10i >>> from sympy import cos, sin, pi, Add, Mul, sqrt, Symbol >>> from sympy.abc import x, y >>> TR10i(cos(1)*cos(3) + sin(1)*sin(3)) cos(2) >>> TR10i(cos(1)*sin(3) + sin(1)*cos(3) + cos(3)) cos(3) + sin(4) >>> TR10i(sqrt(2)*cos(x)*x + sqrt(6)*sin(x)*x) 2*sqrt(2)*x*sin(x + pi/6) """ global _ROOT2, _ROOT3, _invROOT3 if _ROOT2 is None: _roots() def f(rv): if not rv.is_Add: return rv def do(rv, first=True): # args which can be expressed as A*(cos(a)*cos(b)+/-sin(a)*sin(b)) # or B*(cos(a)*sin(b)+/-cos(b)*sin(a)) can be combined into # A*f(a+/-b) where f is either sin or cos. # # If there are more than two args, the pairs which "work" will have # a gcd extractable and the remaining two terms will have the above # structure -- all pairs must be checked to find the ones that # work. if not rv.is_Add: return rv args = list(ordered(rv.args)) if len(args) != 2: hit = False for i in range(len(args)): ai = args[i] if ai is None: continue for j in range(i + 1, len(args)): aj = args[j] if aj is None: continue was = ai + aj new = do(was) if new != was: args[i] = new # update in place args[j] = None hit = True break # go to next i if hit: rv = Add(*[_f for _f in args if _f]) if rv.is_Add: rv = do(rv) return rv # two-arg Add split = trig_split(*args, two=True) if not split: return rv gcd, n1, n2, a, b, same = split # identify and get c1 to be cos then apply rule if possible if same: # coscos, sinsin gcd = n1*gcd if n1 == n2: return gcd*cos(a - b) return gcd*cos(a + b) else: #cossin, cossin gcd = n1*gcd if n1 == n2: return gcd*sin(a + b) return gcd*sin(b - a) rv = process_common_addends( rv, do, lambda x: tuple(ordered(x.free_symbols))) # need to check for inducible pairs in ratio of sqrt(3):1 that # appeared in different lists when sorting by coefficient while rv.is_Add: byrad = defaultdict(list) for a in rv.args: hit = 0 if a.is_Mul: for ai in a.args: if ai.is_Pow and ai.exp is S.Half and \ ai.base.is_Integer: byrad[ai].append(a) hit = 1 break if not hit: byrad[S.One].append(a) # no need to check all pairs -- just check for the onees # that have the right ratio args = [] for a in byrad: for b in [_ROOT3*a, _invROOT3]: if b in byrad: for i in range(len(byrad[a])): if byrad[a][i] is None: continue for j in range(len(byrad[b])): if byrad[b][j] is None: continue was = Add(byrad[a][i] + byrad[b][j]) new = do(was) if new != was: args.append(new) byrad[a][i] = None byrad[b][j] = None break if args: rv = Add(*(args + [Add(*[_f for _f in v if _f]) for v in byrad.values()])) else: rv = do(rv) # final pass to resolve any new inducible pairs break return rv return bottom_up(rv, f) def TR11(rv, base=None): """Function of double angle to product. The ``base`` argument can be used to indicate what is the un-doubled argument, e.g. if 3*pi/7 is the base then cosine and sine functions with argument 6*pi/7 will be replaced. Examples ======== >>> from sympy.simplify.fu import TR11 >>> from sympy import cos, sin, pi >>> from sympy.abc import x >>> TR11(sin(2*x)) 2*sin(x)*cos(x) >>> TR11(cos(2*x)) -sin(x)**2 + cos(x)**2 >>> TR11(sin(4*x)) 4*(-sin(x)**2 + cos(x)**2)*sin(x)*cos(x) >>> TR11(sin(4*x/3)) 4*(-sin(x/3)**2 + cos(x/3)**2)*sin(x/3)*cos(x/3) If the arguments are simply integers, no change is made unless a base is provided: >>> TR11(cos(2)) cos(2) >>> TR11(cos(4), 2) -sin(2)**2 + cos(2)**2 There is a subtle issue here in that autosimplification will convert some higher angles to lower angles >>> cos(6*pi/7) + cos(3*pi/7) -cos(pi/7) + cos(3*pi/7) The 6*pi/7 angle is now pi/7 but can be targeted with TR11 by supplying the 3*pi/7 base: >>> TR11(_, 3*pi/7) -sin(3*pi/7)**2 + cos(3*pi/7)**2 + cos(3*pi/7) """ def f(rv): if not rv.func in (cos, sin): return rv if base: f = rv.func t = f(base*2) co = S.One if t.is_Mul: co, t = t.as_coeff_Mul() if not t.func in (cos, sin): return rv if rv.args[0] == t.args[0]: c = cos(base) s = sin(base) if f is cos: return (c**2 - s**2)/co else: return 2*c*s/co return rv elif not rv.args[0].is_Number: # make a change if the leading coefficient's numerator is # divisible by 2 c, m = rv.args[0].as_coeff_Mul(rational=True) if c.p % 2 == 0: arg = c.p//2*m/c.q c = TR11(cos(arg)) s = TR11(sin(arg)) if rv.func == sin: rv = 2*s*c else: rv = c**2 - s**2 return rv return bottom_up(rv, f) def TR12(rv, first=True): """Separate sums in ``tan``. Examples ======== >>> from sympy.simplify.fu import TR12 >>> from sympy.abc import x, y >>> from sympy import tan >>> from sympy.simplify.fu import TR12 >>> TR12(tan(x + y)) (tan(x) + tan(y))/(-tan(x)*tan(y) + 1) """ def f(rv): if not rv.func == tan: return rv arg = rv.args[0] if arg.is_Add: if first: args = list(ordered(arg.args)) else: args = list(arg.args) a = args.pop() b = Add._from_args(args) if b.is_Add: tb = TR12(tan(b), first=False) else: tb = tan(b) return (tan(a) + tb)/(1 - tan(a)*tb) return rv return bottom_up(rv, f) def TR12i(rv): """Combine tan arguments as (tan(y) + tan(x))/(tan(x)*tan(y) - 1) -> -tan(x + y) Examples ======== >>> from sympy.simplify.fu import TR12i >>> from sympy import tan >>> from sympy.abc import a, b, c >>> ta, tb, tc = [tan(i) for i in (a, b, c)] >>> TR12i((ta + tb)/(-ta*tb + 1)) tan(a + b) >>> TR12i((ta + tb)/(ta*tb - 1)) -tan(a + b) >>> TR12i((-ta - tb)/(ta*tb - 1)) tan(a + b) >>> eq = (ta + tb)/(-ta*tb + 1)**2*(-3*ta - 3*tc)/(2*(ta*tc - 1)) >>> TR12i(eq.expand()) -3*tan(a + b)*tan(a + c)/(2*(tan(a) + tan(b) - 1)) """ from sympy import factor def f(rv): if not (rv.is_Add or rv.is_Mul or rv.is_Pow): return rv n, d = rv.as_numer_denom() if not d.args or not n.args: return rv dok = {} def ok(di): m = as_f_sign_1(di) if m: g, f, s = m if s is S.NegativeOne and f.is_Mul and len(f.args) == 2 and \ all(isinstance(fi, tan) for fi in f.args): return g, f d_args = list(Mul.make_args(d)) for i, di in enumerate(d_args): m = ok(di) if m: g, t = m s = Add(*[_.args[0] for _ in t.args]) dok[s] = S.One d_args[i] = g continue if di.is_Add: di = factor(di) if di.is_Mul: d_args.extend(di.args) d_args[i] = S.One elif di.is_Pow and (di.exp.is_integer or di.base.is_positive): m = ok(di.base) if m: g, t = m s = Add(*[_.args[0] for _ in t.args]) dok[s] = di.exp d_args[i] = g**di.exp else: di = factor(di) if di.is_Mul: d_args.extend(di.args) d_args[i] = S.One if not dok: return rv def ok(ni): if ni.is_Add and len(ni.args) == 2: a, b = ni.args if isinstance(a, tan) and isinstance(b, tan): return a, b n_args = list(Mul.make_args(factor_terms(n))) hit = False for i, ni in enumerate(n_args): m = ok(ni) if not m: m = ok(-ni) if m: n_args[i] = S.NegativeOne else: if ni.is_Add: ni = factor(ni) if ni.is_Mul: n_args.extend(ni.args) n_args[i] = S.One continue elif ni.is_Pow and ( ni.exp.is_integer or ni.base.is_positive): m = ok(ni.base) if m: n_args[i] = S.One else: ni = factor(ni) if ni.is_Mul: n_args.extend(ni.args) n_args[i] = S.One continue else: continue else: n_args[i] = S.One hit = True s = Add(*[_.args[0] for _ in m]) ed = dok[s] newed = ed.extract_additively(S.One) if newed is not None: if newed: dok[s] = newed else: dok.pop(s) n_args[i] *= -tan(s) if hit: rv = Mul(*n_args)/Mul(*d_args)/Mul(*[(Add(*[ tan(a) for a in i.args]) - 1)**e for i, e in dok.items()]) return rv return bottom_up(rv, f) def TR13(rv): """Change products of ``tan`` or ``cot``. Examples ======== >>> from sympy.simplify.fu import TR13 >>> from sympy import tan, cot, cos >>> TR13(tan(3)*tan(2)) -tan(2)/tan(5) - tan(3)/tan(5) + 1 >>> TR13(cot(3)*cot(2)) cot(2)*cot(5) + 1 + cot(3)*cot(5) """ def f(rv): if not rv.is_Mul: return rv # XXX handle products of powers? or let power-reducing handle it? args = {tan: [], cot: [], None: []} for a in ordered(Mul.make_args(rv)): if a.func in (tan, cot): args[a.func].append(a.args[0]) else: args[None].append(a) t = args[tan] c = args[cot] if len(t) < 2 and len(c) < 2: return rv args = args[None] while len(t) > 1: t1 = t.pop() t2 = t.pop() args.append(1 - (tan(t1)/tan(t1 + t2) + tan(t2)/tan(t1 + t2))) if t: args.append(tan(t.pop())) while len(c) > 1: t1 = c.pop() t2 = c.pop() args.append(1 + cot(t1)*cot(t1 + t2) + cot(t2)*cot(t1 + t2)) if c: args.append(cot(c.pop())) return Mul(*args) return bottom_up(rv, f) def TRmorrie(rv): """Returns cos(x)*cos(2*x)*...*cos(2**(k-1)*x) -> sin(2**k*x)/(2**k*sin(x)) Examples ======== >>> from sympy.simplify.fu import TRmorrie, TR8, TR3 >>> from sympy.abc import x >>> from sympy import Mul, cos, pi >>> TRmorrie(cos(x)*cos(2*x)) sin(4*x)/(4*sin(x)) >>> TRmorrie(7*Mul(*[cos(x) for x in range(10)])) 7*sin(12)*sin(16)*cos(5)*cos(7)*cos(9)/(64*sin(1)*sin(3)) Sometimes autosimplification will cause a power to be not recognized. e.g. in the following, cos(4*pi/7) automatically simplifies to -cos(3*pi/7) so only 2 of the 3 terms are recognized: >>> TRmorrie(cos(pi/7)*cos(2*pi/7)*cos(4*pi/7)) -sin(3*pi/7)*cos(3*pi/7)/(4*sin(pi/7)) A touch by TR8 resolves the expression to a Rational >>> TR8(_) -1/8 In this case, if eq is unsimplified, the answer is obtained directly: >>> eq = cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9) >>> TRmorrie(eq) 1/16 But if angles are made canonical with TR3 then the answer is not simplified without further work: >>> TR3(eq) sin(pi/18)*cos(pi/9)*cos(2*pi/9)/2 >>> TRmorrie(_) sin(pi/18)*sin(4*pi/9)/(8*sin(pi/9)) >>> TR8(_) cos(7*pi/18)/(16*sin(pi/9)) >>> TR3(_) 1/16 The original expression would have resolve to 1/16 directly with TR8, however: >>> TR8(eq) 1/16 References ========== https://en.wikipedia.org/wiki/Morrie%27s_law """ def f(rv, first=True): if not rv.is_Mul: return rv if first: n, d = rv.as_numer_denom() return f(n, 0)/f(d, 0) args = defaultdict(list) coss = {} other = [] for c in rv.args: b, e = c.as_base_exp() if e.is_Integer and isinstance(b, cos): co, a = b.args[0].as_coeff_Mul() args[a].append(co) coss[b] = e else: other.append(c) new = [] for a in args: c = args[a] c.sort() no = [] while c: k = 0 cc = ci = c[0] while cc in c: k += 1 cc *= 2 if k > 1: newarg = sin(2**k*ci*a)/2**k/sin(ci*a) # see how many times this can be taken take = None ccs = [] for i in range(k): cc /= 2 key = cos(a*cc, evaluate=False) ccs.append(cc) take = min(coss[key], take or coss[key]) # update exponent counts for i in range(k): cc = ccs.pop() key = cos(a*cc, evaluate=False) coss[key] -= take if not coss[key]: c.remove(cc) new.append(newarg**take) else: no.append(c.pop(0)) c[:] = no if new: rv = Mul(*(new + other + [ cos(k*a, evaluate=False) for a in args for k in args[a]])) return rv return bottom_up(rv, f) def TR14(rv, first=True): """Convert factored powers of sin and cos identities into simpler expressions. Examples ======== >>> from sympy.simplify.fu import TR14 >>> from sympy.abc import x, y >>> from sympy import cos, sin >>> TR14((cos(x) - 1)*(cos(x) + 1)) -sin(x)**2 >>> TR14((sin(x) - 1)*(sin(x) + 1)) -cos(x)**2 >>> p1 = (cos(x) + 1)*(cos(x) - 1) >>> p2 = (cos(y) - 1)*2*(cos(y) + 1) >>> p3 = (3*(cos(y) - 1))*(3*(cos(y) + 1)) >>> TR14(p1*p2*p3*(x - 1)) -18*(x - 1)*sin(x)**2*sin(y)**4 """ def f(rv): if not rv.is_Mul: return rv if first: # sort them by location in numerator and denominator # so the code below can just deal with positive exponents n, d = rv.as_numer_denom() if d is not S.One: newn = TR14(n, first=False) newd = TR14(d, first=False) if newn != n or newd != d: rv = newn/newd return rv other = [] process = [] for a in rv.args: if a.is_Pow: b, e = a.as_base_exp() if not (e.is_integer or b.is_positive): other.append(a) continue a = b else: e = S.One m = as_f_sign_1(a) if not m or m[1].func not in (cos, sin): if e is S.One: other.append(a) else: other.append(a**e) continue g, f, si = m process.append((g, e.is_Number, e, f, si, a)) # sort them to get like terms next to each other process = list(ordered(process)) # keep track of whether there was any change nother = len(other) # access keys keys = (g, t, e, f, si, a) = list(range(6)) while process: A = process.pop(0) if process: B = process[0] if A[e].is_Number and B[e].is_Number: # both exponents are numbers if A[f] == B[f]: if A[si] != B[si]: B = process.pop(0) take = min(A[e], B[e]) # reinsert any remainder # the B will likely sort after A so check it first if B[e] != take: rem = [B[i] for i in keys] rem[e] -= take process.insert(0, rem) elif A[e] != take: rem = [A[i] for i in keys] rem[e] -= take process.insert(0, rem) if isinstance(A[f], cos): t = sin else: t = cos other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take) continue elif A[e] == B[e]: # both exponents are equal symbols if A[f] == B[f]: if A[si] != B[si]: B = process.pop(0) take = A[e] if isinstance(A[f], cos): t = sin else: t = cos other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take) continue # either we are done or neither condition above applied other.append(A[a]**A[e]) if len(other) != nother: rv = Mul(*other) return rv return bottom_up(rv, f) def TR15(rv, max=4, pow=False): """Convert sin(x)*-2 to 1 + cot(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR15 >>> from sympy.abc import x >>> from sympy import cos, sin >>> TR15(1 - 1/sin(x)**2) -cot(x)**2 """ def f(rv): if not (isinstance(rv, Pow) and isinstance(rv.base, sin)): return rv ia = 1/rv a = _TR56(ia, sin, cot, lambda x: 1 + x, max=max, pow=pow) if a != ia: rv = a return rv return bottom_up(rv, f) def TR16(rv, max=4, pow=False): """Convert cos(x)*-2 to 1 + tan(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR16 >>> from sympy.abc import x >>> from sympy import cos, sin >>> TR16(1 - 1/cos(x)**2) -tan(x)**2 """ def f(rv): if not (isinstance(rv, Pow) and isinstance(rv.base, cos)): return rv ia = 1/rv a = _TR56(ia, cos, tan, lambda x: 1 + x, max=max, pow=pow) if a != ia: rv = a return rv return bottom_up(rv, f) def TR111(rv): """Convert f(x)**-i to g(x)**i where either ``i`` is an integer or the base is positive and f, g are: tan, cot; sin, csc; or cos, sec. Examples ======== >>> from sympy.simplify.fu import TR111 >>> from sympy.abc import x >>> from sympy import tan >>> TR111(1 - 1/tan(x)**2) 1 - cot(x)**2 """ def f(rv): if not ( isinstance(rv, Pow) and (rv.base.is_positive or rv.exp.is_integer and rv.exp.is_negative)): return rv if isinstance(rv.base, tan): return cot(rv.base.args[0])**-rv.exp elif isinstance(rv.base, sin): return csc(rv.base.args[0])**-rv.exp elif isinstance(rv.base, cos): return sec(rv.base.args[0])**-rv.exp return rv return bottom_up(rv, f) def TR22(rv, max=4, pow=False): """Convert tan(x)**2 to sec(x)**2 - 1 and cot(x)**2 to csc(x)**2 - 1. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR22 >>> from sympy.abc import x >>> from sympy import tan, cot >>> TR22(1 + tan(x)**2) sec(x)**2 >>> TR22(1 + cot(x)**2) csc(x)**2 """ def f(rv): if not (isinstance(rv, Pow) and rv.base.func in (cot, tan)): return rv rv = _TR56(rv, tan, sec, lambda x: x - 1, max=max, pow=pow) rv = _TR56(rv, cot, csc, lambda x: x - 1, max=max, pow=pow) return rv return bottom_up(rv, f) def TRpower(rv): """Convert sin(x)**n and cos(x)**n with positive n to sums. Examples ======== >>> from sympy.simplify.fu import TRpower >>> from sympy.abc import x >>> from sympy import cos, sin >>> TRpower(sin(x)**6) -15*cos(2*x)/32 + 3*cos(4*x)/16 - cos(6*x)/32 + 5/16 >>> TRpower(sin(x)**3*cos(2*x)**4) (3*sin(x)/4 - sin(3*x)/4)*(cos(4*x)/2 + cos(8*x)/8 + 3/8) References ========== https://en.wikipedia.org/wiki/List_of_trigonometric_identities#Power-reduction_formulae """ def f(rv): if not (isinstance(rv, Pow) and isinstance(rv.base, (sin, cos))): return rv b, n = rv.as_base_exp() x = b.args[0] if n.is_Integer and n.is_positive: if n.is_odd and isinstance(b, cos): rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x) for k in range((n + 1)/2)]) elif n.is_odd and isinstance(b, sin): rv = 2**(1-n)*(-1)**((n-1)/2)*Add(*[binomial(n, k)* (-1)**k*sin((n - 2*k)*x) for k in range((n + 1)/2)]) elif n.is_even and isinstance(b, cos): rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x) for k in range(n/2)]) elif n.is_even and isinstance(b, sin): rv = 2**(1-n)*(-1)**(n/2)*Add(*[binomial(n, k)* (-1)**k*cos((n - 2*k)*x) for k in range(n/2)]) if n.is_even: rv += 2**(-n)*binomial(n, n/2) return rv return bottom_up(rv, f) def L(rv): """Return count of trigonometric functions in expression. Examples ======== >>> from sympy.simplify.fu import L >>> from sympy.abc import x >>> from sympy import cos, sin >>> L(cos(x)+sin(x)) 2 """ return S(rv.count(TrigonometricFunction)) # ============== end of basic Fu-like tools ===================== if SYMPY_DEBUG: (TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13, TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22 )= list(map(debug, (TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13, TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22))) # tuples are chains -- (f, g) -> lambda x: g(f(x)) # lists are choices -- [f, g] -> lambda x: min(f(x), g(x), key=objective) CTR1 = [(TR5, TR0), (TR6, TR0), identity] CTR2 = (TR11, [(TR5, TR0), (TR6, TR0), TR0]) CTR3 = [(TRmorrie, TR8, TR0), (TRmorrie, TR8, TR10i, TR0), identity] CTR4 = [(TR4, TR10i), identity] RL1 = (TR4, TR3, TR4, TR12, TR4, TR13, TR4, TR0) # XXX it's a little unclear how this one is to be implemented # see Fu paper of reference, page 7. What is the Union symbol referring to? # The diagram shows all these as one chain of transformations, but the # text refers to them being applied independently. Also, a break # if L starts to increase has not been implemented. RL2 = [ (TR4, TR3, TR10, TR4, TR3, TR11), (TR5, TR7, TR11, TR4), (CTR3, CTR1, TR9, CTR2, TR4, TR9, TR9, CTR4), identity, ] def fu(rv, measure=lambda x: (L(x), x.count_ops())): """Attempt to simplify expression by using transformation rules given in the algorithm by Fu et al. :func:`fu` will try to minimize the objective function ``measure``. By default this first minimizes the number of trig terms and then minimizes the number of total operations. Examples ======== >>> from sympy.simplify.fu import fu >>> from sympy import cos, sin, tan, pi, S, sqrt >>> from sympy.abc import x, y, a, b >>> fu(sin(50)**2 + cos(50)**2 + sin(pi/6)) 3/2 >>> fu(sqrt(6)*cos(x) + sqrt(2)*sin(x)) 2*sqrt(2)*sin(x + pi/3) CTR1 example >>> eq = sin(x)**4 - cos(y)**2 + sin(y)**2 + 2*cos(x)**2 >>> fu(eq) cos(x)**4 - 2*cos(y)**2 + 2 CTR2 example >>> fu(S.Half - cos(2*x)/2) sin(x)**2 CTR3 example >>> fu(sin(a)*(cos(b) - sin(b)) + cos(a)*(sin(b) + cos(b))) sqrt(2)*sin(a + b + pi/4) CTR4 example >>> fu(sqrt(3)*cos(x)/2 + sin(x)/2) sin(x + pi/3) Example 1 >>> fu(1-sin(2*x)**2/4-sin(y)**2-cos(x)**4) -cos(x)**2 + cos(y)**2 Example 2 >>> fu(cos(4*pi/9)) sin(pi/18) >>> fu(cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9)) 1/16 Example 3 >>> fu(tan(7*pi/18)+tan(5*pi/18)-sqrt(3)*tan(5*pi/18)*tan(7*pi/18)) -sqrt(3) Objective function example >>> fu(sin(x)/cos(x)) # default objective function tan(x) >>> fu(sin(x)/cos(x), measure=lambda x: -x.count_ops()) # maximize op count sin(x)/cos(x) References ========== http://rfdz.ph-noe.ac.at/fileadmin/Mathematik_Uploads/ACDCA/ DESTIME2006/DES_contribs/Fu/simplification.pdf """ fRL1 = greedy(RL1, measure) fRL2 = greedy(RL2, measure) was = rv rv = sympify(rv) if not isinstance(rv, Expr): return rv.func(*[fu(a, measure=measure) for a in rv.args]) rv = TR1(rv) if rv.has(tan, cot): rv1 = fRL1(rv) if (measure(rv1) < measure(rv)): rv = rv1 if rv.has(tan, cot): rv = TR2(rv) if rv.has(sin, cos): rv1 = fRL2(rv) rv2 = TR8(TRmorrie(rv1)) rv = min([was, rv, rv1, rv2], key=measure) return min(TR2i(rv), rv, key=measure) def process_common_addends(rv, do, key2=None, key1=True): """Apply ``do`` to addends of ``rv`` that (if key1=True) share at least a common absolute value of their coefficient and the value of ``key2`` when applied to the argument. If ``key1`` is False ``key2`` must be supplied and will be the only key applied. """ # collect by absolute value of coefficient and key2 absc = defaultdict(list) if key1: for a in rv.args: c, a = a.as_coeff_Mul() if c < 0: c = -c a = -a # put the sign on `a` absc[(c, key2(a) if key2 else 1)].append(a) elif key2: for a in rv.args: absc[(S.One, key2(a))].append(a) else: raise ValueError('must have at least one key') args = [] hit = False for k in absc: v = absc[k] c, _ = k if len(v) > 1: e = Add(*v, evaluate=False) new = do(e) if new != e: e = new hit = True args.append(c*e) else: args.append(c*v[0]) if hit: rv = Add(*args) return rv fufuncs = ''' TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 TR8 TR9 TR10 TR10i TR11 TR12 TR13 L TR2i TRmorrie TR12i TR14 TR15 TR16 TR111 TR22'''.split() FU = dict(list(zip(fufuncs, list(map(locals().get, fufuncs))))) def _roots(): global _ROOT2, _ROOT3, _invROOT3 _ROOT2, _ROOT3 = sqrt(2), sqrt(3) _invROOT3 = 1/_ROOT3 _ROOT2 = None def trig_split(a, b, two=False): """Return the gcd, s1, s2, a1, a2, bool where If two is False (default) then:: a + b = gcd*(s1*f(a1) + s2*f(a2)) where f = cos if bool else sin else: if bool, a + b was +/- cos(a1)*cos(a2) +/- sin(a1)*sin(a2) and equals n1*gcd*cos(a - b) if n1 == n2 else n1*gcd*cos(a + b) else a + b was +/- cos(a1)*sin(a2) +/- sin(a1)*cos(a2) and equals n1*gcd*sin(a + b) if n1 = n2 else n1*gcd*sin(b - a) Examples ======== >>> from sympy.simplify.fu import trig_split >>> from sympy.abc import x, y, z >>> from sympy import cos, sin, sqrt >>> trig_split(cos(x), cos(y)) (1, 1, 1, x, y, True) >>> trig_split(2*cos(x), -2*cos(y)) (2, 1, -1, x, y, True) >>> trig_split(cos(x)*sin(y), cos(y)*sin(y)) (sin(y), 1, 1, x, y, True) >>> trig_split(cos(x), -sqrt(3)*sin(x), two=True) (2, 1, -1, x, pi/6, False) >>> trig_split(cos(x), sin(x), two=True) (sqrt(2), 1, 1, x, pi/4, False) >>> trig_split(cos(x), -sin(x), two=True) (sqrt(2), 1, -1, x, pi/4, False) >>> trig_split(sqrt(2)*cos(x), -sqrt(6)*sin(x), two=True) (2*sqrt(2), 1, -1, x, pi/6, False) >>> trig_split(-sqrt(6)*cos(x), -sqrt(2)*sin(x), two=True) (-2*sqrt(2), 1, 1, x, pi/3, False) >>> trig_split(cos(x)/sqrt(6), sin(x)/sqrt(2), two=True) (sqrt(6)/3, 1, 1, x, pi/6, False) >>> trig_split(-sqrt(6)*cos(x)*sin(y), -sqrt(2)*sin(x)*sin(y), two=True) (-2*sqrt(2)*sin(y), 1, 1, x, pi/3, False) >>> trig_split(cos(x), sin(x)) >>> trig_split(cos(x), sin(z)) >>> trig_split(2*cos(x), -sin(x)) >>> trig_split(cos(x), -sqrt(3)*sin(x)) >>> trig_split(cos(x)*cos(y), sin(x)*sin(z)) >>> trig_split(cos(x)*cos(y), sin(x)*sin(y)) >>> trig_split(-sqrt(6)*cos(x), sqrt(2)*sin(x)*sin(y), two=True) """ global _ROOT2, _ROOT3, _invROOT3 if _ROOT2 is None: _roots() a, b = [Factors(i) for i in (a, b)] ua, ub = a.normal(b) gcd = a.gcd(b).as_expr() n1 = n2 = 1 if S.NegativeOne in ua.factors: ua = ua.quo(S.NegativeOne) n1 = -n1 elif S.NegativeOne in ub.factors: ub = ub.quo(S.NegativeOne) n2 = -n2 a, b = [i.as_expr() for i in (ua, ub)] def pow_cos_sin(a, two): """Return ``a`` as a tuple (r, c, s) such that ``a = (r or 1)*(c or 1)*(s or 1)``. Three arguments are returned (radical, c-factor, s-factor) as long as the conditions set by ``two`` are met; otherwise None is returned. If ``two`` is True there will be one or two non-None values in the tuple: c and s or c and r or s and r or s or c with c being a cosine function (if possible) else a sine, and s being a sine function (if possible) else oosine. If ``two`` is False then there will only be a c or s term in the tuple. ``two`` also require that either two cos and/or sin be present (with the condition that if the functions are the same the arguments are different or vice versa) or that a single cosine or a single sine be present with an optional radical. If the above conditions dictated by ``two`` are not met then None is returned. """ c = s = None co = S.One if a.is_Mul: co, a = a.as_coeff_Mul() if len(a.args) > 2 or not two: return None if a.is_Mul: args = list(a.args) else: args = [a] a = args.pop(0) if isinstance(a, cos): c = a elif isinstance(a, sin): s = a elif a.is_Pow and a.exp is S.Half: # autoeval doesn't allow -1/2 co *= a else: return None if args: b = args[0] if isinstance(b, cos): if c: s = b else: c = b elif isinstance(b, sin): if s: c = b else: s = b elif b.is_Pow and b.exp is S.Half: co *= b else: return None return co if co is not S.One else None, c, s elif isinstance(a, cos): c = a elif isinstance(a, sin): s = a if c is None and s is None: return co = co if co is not S.One else None return co, c, s # get the parts m = pow_cos_sin(a, two) if m is None: return coa, ca, sa = m m = pow_cos_sin(b, two) if m is None: return cob, cb, sb = m # check them if (not ca) and cb or ca and isinstance(ca, sin): coa, ca, sa, cob, cb, sb = cob, cb, sb, coa, ca, sa n1, n2 = n2, n1 if not two: # need cos(x) and cos(y) or sin(x) and sin(y) c = ca or sa s = cb or sb if not isinstance(c, s.func): return None return gcd, n1, n2, c.args[0], s.args[0], isinstance(c, cos) else: if not coa and not cob: if (ca and cb and sa and sb): if isinstance(ca, sa.func) is not isinstance(cb, sb.func): return args = {j.args for j in (ca, sa)} if not all(i.args in args for i in (cb, sb)): return return gcd, n1, n2, ca.args[0], sa.args[0], isinstance(ca, sa.func) if ca and sa or cb and sb or \ two and (ca is None and sa is None or cb is None and sb is None): return c = ca or sa s = cb or sb if c.args != s.args: return if not coa: coa = S.One if not cob: cob = S.One if coa is cob: gcd *= _ROOT2 return gcd, n1, n2, c.args[0], pi/4, False elif coa/cob == _ROOT3: gcd *= 2*cob return gcd, n1, n2, c.args[0], pi/3, False elif coa/cob == _invROOT3: gcd *= 2*coa return gcd, n1, n2, c.args[0], pi/6, False def as_f_sign_1(e): """If ``e`` is a sum that can be written as ``g*(a + s)`` where ``s`` is ``+/-1``, return ``g``, ``a``, and ``s`` where ``a`` does not have a leading negative coefficient. Examples ======== >>> from sympy.simplify.fu import as_f_sign_1 >>> from sympy.abc import x >>> as_f_sign_1(x + 1) (1, x, 1) >>> as_f_sign_1(x - 1) (1, x, -1) >>> as_f_sign_1(-x + 1) (-1, x, -1) >>> as_f_sign_1(-x - 1) (-1, x, 1) >>> as_f_sign_1(2*x + 2) (2, x, 1) """ if not e.is_Add or len(e.args) != 2: return # exact match a, b = e.args if a in (S.NegativeOne, S.One): g = S.One if b.is_Mul and b.args[0].is_Number and b.args[0] < 0: a, b = -a, -b g = -g return g, b, a # gcd match a, b = [Factors(i) for i in e.args] ua, ub = a.normal(b) gcd = a.gcd(b).as_expr() if S.NegativeOne in ua.factors: ua = ua.quo(S.NegativeOne) n1 = -1 n2 = 1 elif S.NegativeOne in ub.factors: ub = ub.quo(S.NegativeOne) n1 = 1 n2 = -1 else: n1 = n2 = 1 a, b = [i.as_expr() for i in (ua, ub)] if a is S.One: a, b = b, a n1, n2 = n2, n1 if n1 == -1: gcd = -gcd n2 = -n2 if b is S.One: return gcd, a, n2 def _osborne(e, d): """Replace all hyperbolic functions with trig functions using the Osborne rule. Notes ===== ``d`` is a dummy variable to prevent automatic evaluation of trigonometric/hyperbolic functions. References ========== https://en.wikipedia.org/wiki/Hyperbolic_function """ def f(rv): if not isinstance(rv, HyperbolicFunction): return rv a = rv.args[0] a = a*d if not a.is_Add else Add._from_args([i*d for i in a.args]) if isinstance(rv, sinh): return I*sin(a) elif isinstance(rv, cosh): return cos(a) elif isinstance(rv, tanh): return I*tan(a) elif isinstance(rv, coth): return cot(a)/I elif isinstance(rv, sech): return sec(a) elif isinstance(rv, csch): return csc(a)/I else: raise NotImplementedError('unhandled %s' % rv.func) return bottom_up(e, f) def _osbornei(e, d): """Replace all trig functions with hyperbolic functions using the Osborne rule. Notes ===== ``d`` is a dummy variable to prevent automatic evaluation of trigonometric/hyperbolic functions. References ========== https://en.wikipedia.org/wiki/Hyperbolic_function """ def f(rv): if not isinstance(rv, TrigonometricFunction): return rv const, x = rv.args[0].as_independent(d, as_Add=True) a = x.xreplace({d: S.One}) + const*I if isinstance(rv, sin): return sinh(a)/I elif isinstance(rv, cos): return cosh(a) elif isinstance(rv, tan): return tanh(a)/I elif isinstance(rv, cot): return coth(a)*I elif isinstance(rv, sec): return sech(a) elif isinstance(rv, csc): return csch(a)*I else: raise NotImplementedError('unhandled %s' % rv.func) return bottom_up(e, f) def hyper_as_trig(rv): """Return an expression containing hyperbolic functions in terms of trigonometric functions. Any trigonometric functions initially present are replaced with Dummy symbols and the function to undo the masking and the conversion back to hyperbolics is also returned. It should always be true that:: t, f = hyper_as_trig(expr) expr == f(t) Examples ======== >>> from sympy.simplify.fu import hyper_as_trig, fu >>> from sympy.abc import x >>> from sympy import cosh, sinh >>> eq = sinh(x)**2 + cosh(x)**2 >>> t, f = hyper_as_trig(eq) >>> f(fu(t)) cosh(2*x) References ========== https://en.wikipedia.org/wiki/Hyperbolic_function """ from sympy.simplify.simplify import signsimp from sympy.simplify.radsimp import collect # mask off trig functions trigs = rv.atoms(TrigonometricFunction) reps = [(t, Dummy()) for t in trigs] masked = rv.xreplace(dict(reps)) # get inversion substitutions in place reps = [(v, k) for k, v in reps] d = Dummy() return _osborne(masked, d), lambda x: collect(signsimp( _osbornei(x, d).xreplace(dict(reps))), S.ImaginaryUnit) def sincos_to_sum(expr): """Convert products and powers of sin and cos to sums. Applied power reduction TRpower first, then expands products, and converts products to sums with TR8. Examples ======== >>> from sympy.simplify.fu import sincos_to_sum >>> from sympy.abc import x >>> from sympy import cos, sin >>> sincos_to_sum(16*sin(x)**3*cos(2*x)**2) 7*sin(x) - 5*sin(3*x) + 3*sin(5*x) - sin(7*x) """ if not expr.has(cos, sin): return expr else: return TR8(expand_mul(TRpower(expr)))
c0fcb9fc335a95f656437c3145e973441ad144c0048b74c4d0afd9afda6dc69f
r""" This module contains the functionality to arrange the nodes of a diagram on an abstract grid, and then to produce a graphical representation of the grid. The currently supported back-ends are Xy-pic [Xypic]. Layout Algorithm ================ This section provides an overview of the algorithms implemented in :class:`DiagramGrid` to lay out diagrams. The first step of the algorithm is the removal composite and identity morphisms which do not have properties in the supplied diagram. The premises and conclusions of the diagram are then merged. The generic layout algorithm begins with the construction of the "skeleton" of the diagram. The skeleton is an undirected graph which has the objects of the diagram as vertices and has an (undirected) edge between each pair of objects between which there exist morphisms. The direction of the morphisms does not matter at this stage. The skeleton also includes an edge between each pair of vertices `A` and `C` such that there exists an object `B` which is connected via a morphism to `A`, and via a morphism to `C`. The skeleton constructed in this way has the property that every object is a vertex of a triangle formed by three edges of the skeleton. This property lies at the base of the generic layout algorithm. After the skeleton has been constructed, the algorithm lists all triangles which can be formed. Note that some triangles will not have all edges corresponding to morphisms which will actually be drawn. Triangles which have only one edge or less which will actually be drawn are immediately discarded. The list of triangles is sorted according to the number of edges which correspond to morphisms, then the triangle with the least number of such edges is selected. One of such edges is picked and the corresponding objects are placed horizontally, on a grid. This edge is recorded to be in the fringe. The algorithm then finds a "welding" of a triangle to the fringe. A welding is an edge in the fringe where a triangle could be attached. If the algorithm succeeds in finding such a welding, it adds to the grid that vertex of the triangle which was not yet included in any edge in the fringe and records the two new edges in the fringe. This process continues iteratively until all objects of the diagram has been placed or until no more weldings can be found. An edge is only removed from the fringe when a welding to this edge has been found, and there is no room around this edge to place another vertex. When no more weldings can be found, but there are still triangles left, the algorithm searches for a possibility of attaching one of the remaining triangles to the existing structure by a vertex. If such a possibility is found, the corresponding edge of the found triangle is placed in the found space and the iterative process of welding triangles restarts. When logical groups are supplied, each of these groups is laid out independently. Then a diagram is constructed in which groups are objects and any two logical groups between which there exist morphisms are connected via a morphism. This diagram is laid out. Finally, the grid which includes all objects of the initial diagram is constructed by replacing the cells which contain logical groups with the corresponding laid out grids, and by correspondingly expanding the rows and columns. The sequential layout algorithm begins by constructing the underlying undirected graph defined by the morphisms obtained after simplifying premises and conclusions and merging them (see above). The vertex with the minimal degree is then picked up and depth-first search is started from it. All objects which are located at distance `n` from the root in the depth-first search tree, are positioned in the `n`-th column of the resulting grid. The sequential layout will therefore attempt to lay the objects out along a line. References ========== [Xypic] http://xy-pic.sourceforge.net/ """ from __future__ import print_function, division from sympy.categories import (CompositeMorphism, IdentityMorphism, NamedMorphism, Diagram) from sympy.core import Dict, Symbol from sympy.core.compatibility import iterable from sympy.printing import latex from sympy.sets import FiniteSet from sympy.utilities import default_sort_key from sympy.utilities.decorator import doctest_depends_on from itertools import chain __doctest_requires__ = {('preview_diagram',): 'pyglet'} class _GrowableGrid(object): """ Holds a growable grid of objects. It is possible to append or prepend a row or a column to the grid using the corresponding methods. Prepending rows or columns has the effect of changing the coordinates of the already existing elements. This class currently represents a naive implementation of the functionality with little attempt at optimisation. """ def __init__(self, width, height): self._width = width self._height = height self._array = [[None for j in range(width)] for i in range(height)] @property def width(self): return self._width @property def height(self): return self._height def __getitem__(self, i_j): """ Returns the element located at in the i-th line and j-th column. """ i, j = i_j return self._array[i][j] def __setitem__(self, i_j, newvalue): """ Sets the element located at in the i-th line and j-th column. """ i, j = i_j self._array[i][j] = newvalue def append_row(self): """ Appends an empty row to the grid. """ self._height += 1 self._array.append([None for j in range(self._width)]) def append_column(self): """ Appends an empty column to the grid. """ self._width += 1 for i in range(self._height): self._array[i].append(None) def prepend_row(self): """ Prepends the grid with an empty row. """ self._height += 1 self._array.insert(0, [None for j in range(self._width)]) def prepend_column(self): """ Prepends the grid with an empty column. """ self._width += 1 for i in range(self._height): self._array[i].insert(0, None) class DiagramGrid(object): r""" Constructs and holds the fitting of the diagram into a grid. The mission of this class is to analyse the structure of the supplied diagram and to place its objects on a grid such that, when the objects and the morphisms are actually drawn, the diagram would be "readable", in the sense that there will not be many intersections of moprhisms. This class does not perform any actual drawing. It does strive nevertheless to offer sufficient metadata to draw a diagram. Consider the following simple diagram. >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> from sympy import pprint >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) The simplest way to have a diagram laid out is the following: >>> grid = DiagramGrid(diagram) >>> (grid.width, grid.height) (2, 2) >>> pprint(grid) A B <BLANKLINE> C Sometimes one sees the diagram as consisting of logical groups. One can advise ``DiagramGrid`` as to such groups by employing the ``groups`` keyword argument. Consider the following diagram: >>> D = Object("D") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> h = NamedMorphism(D, A, "h") >>> k = NamedMorphism(D, B, "k") >>> diagram = Diagram([f, g, h, k]) Lay it out with generic layout: >>> grid = DiagramGrid(diagram) >>> pprint(grid) A B D <BLANKLINE> C Now, we can group the objects `A` and `D` to have them near one another: >>> grid = DiagramGrid(diagram, groups=[[A, D], B, C]) >>> pprint(grid) B C <BLANKLINE> A D Note how the positioning of the other objects changes. Further indications can be supplied to the constructor of :class:`DiagramGrid` using keyword arguments. The currently supported hints are explained in the following paragraphs. :class:`DiagramGrid` does not automatically guess which layout would suit the supplied diagram better. Consider, for example, the following linear diagram: >>> E = Object("E") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> h = NamedMorphism(C, D, "h") >>> i = NamedMorphism(D, E, "i") >>> diagram = Diagram([f, g, h, i]) When laid out with the generic layout, it does not get to look linear: >>> grid = DiagramGrid(diagram) >>> pprint(grid) A B <BLANKLINE> C D <BLANKLINE> E To get it laid out in a line, use ``layout="sequential"``: >>> grid = DiagramGrid(diagram, layout="sequential") >>> pprint(grid) A B C D E One may sometimes need to transpose the resulting layout. While this can always be done by hand, :class:`DiagramGrid` provides a hint for that purpose: >>> grid = DiagramGrid(diagram, layout="sequential", transpose=True) >>> pprint(grid) A <BLANKLINE> B <BLANKLINE> C <BLANKLINE> D <BLANKLINE> E Separate hints can also be provided for each group. For an example, refer to ``tests/test_drawing.py``, and see the different ways in which the five lemma [FiveLemma] can be laid out. See Also ======== Diagram References ========== [FiveLemma] https://en.wikipedia.org/wiki/Five_lemma """ @staticmethod def _simplify_morphisms(morphisms): """ Given a dictionary mapping morphisms to their properties, returns a new dictionary in which there are no morphisms which do not have properties, and which are compositions of other morphisms included in the dictionary. Identities are dropped as well. """ newmorphisms = {} for morphism, props in morphisms.items(): if isinstance(morphism, CompositeMorphism) and not props: continue elif isinstance(morphism, IdentityMorphism): continue else: newmorphisms[morphism] = props return newmorphisms @staticmethod def _merge_premises_conclusions(premises, conclusions): """ Given two dictionaries of morphisms and their properties, produces a single dictionary which includes elements from both dictionaries. If a morphism has some properties in premises and also in conclusions, the properties in conclusions take priority. """ return dict(chain(premises.items(), conclusions.items())) @staticmethod def _juxtapose_edges(edge1, edge2): """ If ``edge1`` and ``edge2`` have precisely one common endpoint, returns an edge which would form a triangle with ``edge1`` and ``edge2``. If ``edge1`` and ``edge2`` don't have a common endpoint, returns ``None``. If ``edge1`` and ``edge`` are the same edge, returns ``None``. """ intersection = edge1 & edge2 if len(intersection) != 1: # The edges either have no common points or are equal. return None # The edges have a common endpoint. Extract the different # endpoints and set up the new edge. return (edge1 - intersection) | (edge2 - intersection) @staticmethod def _add_edge_append(dictionary, edge, elem): """ If ``edge`` is not in ``dictionary``, adds ``edge`` to the dictionary and sets its value to ``[elem]``. Otherwise appends ``elem`` to the value of existing entry. Note that edges are undirected, thus `(A, B) = (B, A)`. """ if edge in dictionary: dictionary[edge].append(elem) else: dictionary[edge] = [elem] @staticmethod def _build_skeleton(morphisms): """ Creates a dictionary which maps edges to corresponding morphisms. Thus for a morphism `f:A\rightarrow B`, the edge `(A, B)` will be associated with `f`. This function also adds to the list those edges which are formed by juxtaposition of two edges already in the list. These new edges are not associated with any morphism and are only added to assure that the diagram can be decomposed into triangles. """ edges = {} # Create edges for morphisms. for morphism in morphisms: DiagramGrid._add_edge_append( edges, frozenset([morphism.domain, morphism.codomain]), morphism) # Create new edges by juxtaposing existing edges. edges1 = dict(edges) for w in edges1: for v in edges1: wv = DiagramGrid._juxtapose_edges(w, v) if wv and wv not in edges: edges[wv] = [] return edges @staticmethod def _list_triangles(edges): """ Builds the set of triangles formed by the supplied edges. The triangles are arbitrary and need not be commutative. A triangle is a set that contains all three of its sides. """ triangles = set() for w in edges: for v in edges: wv = DiagramGrid._juxtapose_edges(w, v) if wv and wv in edges: triangles.add(frozenset([w, v, wv])) return triangles @staticmethod def _drop_redundant_triangles(triangles, skeleton): """ Returns a list which contains only those triangles who have morphisms associated with at least two edges. """ return [tri for tri in triangles if len([e for e in tri if skeleton[e]]) >= 2] @staticmethod def _morphism_length(morphism): """ Returns the length of a morphism. The length of a morphism is the number of components it consists of. A non-composite morphism is of length 1. """ if isinstance(morphism, CompositeMorphism): return len(morphism.components) else: return 1 @staticmethod def _compute_triangle_min_sizes(triangles, edges): r""" Returns a dictionary mapping triangles to their minimal sizes. The minimal size of a triangle is the sum of maximal lengths of morphisms associated to the sides of the triangle. The length of a morphism is the number of components it consists of. A non-composite morphism is of length 1. Sorting triangles by this metric attempts to address two aspects of layout. For triangles with only simple morphisms in the edge, this assures that triangles with all three edges visible will get typeset after triangles with less visible edges, which sometimes minimizes the necessity in diagonal arrows. For triangles with composite morphisms in the edges, this assures that objects connected with shorter morphisms will be laid out first, resulting the visual proximity of those objects which are connected by shorter morphisms. """ triangle_sizes = {} for triangle in triangles: size = 0 for e in triangle: morphisms = edges[e] if morphisms: size += max(DiagramGrid._morphism_length(m) for m in morphisms) triangle_sizes[triangle] = size return triangle_sizes @staticmethod def _triangle_objects(triangle): """ Given a triangle, returns the objects included in it. """ # A triangle is a frozenset of three two-element frozensets # (the edges). This chains the three edges together and # creates a frozenset from the iterator, thus producing a # frozenset of objects of the triangle. return frozenset(chain(*tuple(triangle))) @staticmethod def _other_vertex(triangle, edge): """ Given a triangle and an edge of it, returns the vertex which opposes the edge. """ # This gets the set of objects of the triangle and then # subtracts the set of objects employed in ``edge`` to get the # vertex opposite to ``edge``. return list(DiagramGrid._triangle_objects(triangle) - set(edge))[0] @staticmethod def _empty_point(pt, grid): """ Checks if the cell at coordinates ``pt`` is either empty or out of the bounds of the grid. """ if (pt[0] < 0) or (pt[1] < 0) or \ (pt[0] >= grid.height) or (pt[1] >= grid.width): return True return grid[pt] is None @staticmethod def _put_object(coords, obj, grid, fringe): """ Places an object at the coordinate ``cords`` in ``grid``, growing the grid and updating ``fringe``, if necessary. Returns (0, 0) if no row or column has been prepended, (1, 0) if a row was prepended, (0, 1) if a column was prepended and (1, 1) if both a column and a row were prepended. """ (i, j) = coords offset = (0, 0) if i == -1: grid.prepend_row() i = 0 offset = (1, 0) for k in range(len(fringe)): ((i1, j1), (i2, j2)) = fringe[k] fringe[k] = ((i1 + 1, j1), (i2 + 1, j2)) elif i == grid.height: grid.append_row() if j == -1: j = 0 offset = (offset[0], 1) grid.prepend_column() for k in range(len(fringe)): ((i1, j1), (i2, j2)) = fringe[k] fringe[k] = ((i1, j1 + 1), (i2, j2 + 1)) elif j == grid.width: grid.append_column() grid[i, j] = obj return offset @staticmethod def _choose_target_cell(pt1, pt2, edge, obj, skeleton, grid): """ Given two points, ``pt1`` and ``pt2``, and the welding edge ``edge``, chooses one of the two points to place the opposing vertex ``obj`` of the triangle. If neither of this points fits, returns ``None``. """ pt1_empty = DiagramGrid._empty_point(pt1, grid) pt2_empty = DiagramGrid._empty_point(pt2, grid) if pt1_empty and pt2_empty: # Both cells are empty. Of these two, choose that cell # which will assure that a visible edge of the triangle # will be drawn perpendicularly to the current welding # edge. A = grid[edge[0]] if skeleton.get(frozenset([A, obj])): return pt1 else: return pt2 if pt1_empty: return pt1 elif pt2_empty: return pt2 else: return None @staticmethod def _find_triangle_to_weld(triangles, fringe, grid): """ Finds, if possible, a triangle and an edge in the fringe to which the triangle could be attached. Returns the tuple containing the triangle and the index of the corresponding edge in the fringe. This function relies on the fact that objects are unique in the diagram. """ for triangle in triangles: for (a, b) in fringe: if frozenset([grid[a], grid[b]]) in triangle: return (triangle, (a, b)) return None @staticmethod def _weld_triangle(tri, welding_edge, fringe, grid, skeleton): """ If possible, welds the triangle ``tri`` to ``fringe`` and returns ``False``. If this method encounters a degenerate situation in the fringe and corrects it such that a restart of the search is required, it returns ``True`` (which means that a restart in finding triangle weldings is required). A degenerate situation is a situation when an edge listed in the fringe does not belong to the visual boundary of the diagram. """ a, b = welding_edge target_cell = None obj = DiagramGrid._other_vertex(tri, (grid[a], grid[b])) # We now have a triangle and an edge where it can be welded to # the fringe. Decide where to place the other vertex of the # triangle and check for degenerate situations en route. if (abs(a[0] - b[0]) == 1) and (abs(a[1] - b[1]) == 1): # A diagonal edge. target_cell = (a[0], b[1]) if grid[target_cell]: # That cell is already occupied. target_cell = (b[0], a[1]) if grid[target_cell]: # Degenerate situation, this edge is not # on the actual fringe. Correct the # fringe and go on. fringe.remove((a, b)) return True elif a[0] == b[0]: # A horizontal edge. We first attempt to build the # triangle in the downward direction. down_left = a[0] + 1, a[1] down_right = a[0] + 1, b[1] target_cell = DiagramGrid._choose_target_cell( down_left, down_right, (a, b), obj, skeleton, grid) if not target_cell: # No room below this edge. Check above. up_left = a[0] - 1, a[1] up_right = a[0] - 1, b[1] target_cell = DiagramGrid._choose_target_cell( up_left, up_right, (a, b), obj, skeleton, grid) if not target_cell: # This edge is not in the fringe, remove it # and restart. fringe.remove((a, b)) return True elif a[1] == b[1]: # A vertical edge. We will attempt to place the other # vertex of the triangle to the right of this edge. right_up = a[0], a[1] + 1 right_down = b[0], a[1] + 1 target_cell = DiagramGrid._choose_target_cell( right_up, right_down, (a, b), obj, skeleton, grid) if not target_cell: # No room to the left. See what's to the right. left_up = a[0], a[1] - 1 left_down = b[0], a[1] - 1 target_cell = DiagramGrid._choose_target_cell( left_up, left_down, (a, b), obj, skeleton, grid) if not target_cell: # This edge is not in the fringe, remove it # and restart. fringe.remove((a, b)) return True # We now know where to place the other vertex of the # triangle. offset = DiagramGrid._put_object(target_cell, obj, grid, fringe) # Take care of the displacement of coordinates if a row or # a column was prepended. target_cell = (target_cell[0] + offset[0], target_cell[1] + offset[1]) a = (a[0] + offset[0], a[1] + offset[1]) b = (b[0] + offset[0], b[1] + offset[1]) fringe.extend([(a, target_cell), (b, target_cell)]) # No restart is required. return False @staticmethod def _triangle_key(tri, triangle_sizes): """ Returns a key for the supplied triangle. It should be the same independently of the hash randomisation. """ objects = sorted( DiagramGrid._triangle_objects(tri), key=default_sort_key) return (triangle_sizes[tri], default_sort_key(objects)) @staticmethod def _pick_root_edge(tri, skeleton): """ For a given triangle always picks the same root edge. The root edge is the edge that will be placed first on the grid. """ candidates = [sorted(e, key=default_sort_key) for e in tri if skeleton[e]] sorted_candidates = sorted(candidates, key=default_sort_key) # Don't forget to assure the proper ordering of the vertices # in this edge. return tuple(sorted(sorted_candidates[0], key=default_sort_key)) @staticmethod def _drop_irrelevant_triangles(triangles, placed_objects): """ Returns only those triangles whose set of objects is not completely included in ``placed_objects``. """ return [tri for tri in triangles if not placed_objects.issuperset( DiagramGrid._triangle_objects(tri))] @staticmethod def _grow_pseudopod(triangles, fringe, grid, skeleton, placed_objects): """ Starting from an object in the existing structure on the grid, adds an edge to which a triangle from ``triangles`` could be welded. If this method has found a way to do so, it returns the object it has just added. This method should be applied when ``_weld_triangle`` cannot find weldings any more. """ for i in range(grid.height): for j in range(grid.width): obj = grid[i, j] if not obj: continue # Here we need to choose a triangle which has only # ``obj`` in common with the existing structure. The # situations when this is not possible should be # handled elsewhere. def good_triangle(tri): objs = DiagramGrid._triangle_objects(tri) return obj in objs and \ placed_objects & (objs - {obj}) == set() tris = [tri for tri in triangles if good_triangle(tri)] if not tris: # This object is not interesting. continue # Pick the "simplest" of the triangles which could be # attached. Remember that the list of triangles is # sorted according to their "simplicity" (see # _compute_triangle_min_sizes for the metric). # # Note that ``tris`` are sequentially built from # ``triangles``, so we don't have to worry about hash # randomisation. tri = tris[0] # We have found a triangle which could be attached to # the existing structure by a vertex. candidates = sorted([e for e in tri if skeleton[e]], key=lambda e: FiniteSet(*e).sort_key()) edges = [e for e in candidates if obj in e] # Note that a meaningful edge (i.e., and edge that is # associated with a morphism) containing ``obj`` # always exists. That's because all triangles are # guaranteed to have at least two meaningful edges. # See _drop_redundant_triangles. # Get the object at the other end of the edge. edge = edges[0] other_obj = tuple(edge - frozenset([obj]))[0] # Now check for free directions. When checking for # free directions, prefer the horizontal and vertical # directions. neighbours = [(i - 1, j), (i, j + 1), (i + 1, j), (i, j - 1), (i - 1, j - 1), (i - 1, j + 1), (i + 1, j - 1), (i + 1, j + 1)] for pt in neighbours: if DiagramGrid._empty_point(pt, grid): # We have a found a place to grow the # pseudopod into. offset = DiagramGrid._put_object( pt, other_obj, grid, fringe) i += offset[0] j += offset[1] pt = (pt[0] + offset[0], pt[1] + offset[1]) fringe.append(((i, j), pt)) return other_obj # This diagram is actually cooler that I can handle. Fail cowardly. return None @staticmethod def _handle_groups(diagram, groups, merged_morphisms, hints): """ Given the slightly preprocessed morphisms of the diagram, produces a grid laid out according to ``groups``. If a group has hints, it is laid out with those hints only, without any influence from ``hints``. Otherwise, it is laid out with ``hints``. """ def lay_out_group(group, local_hints): """ If ``group`` is a set of objects, uses a ``DiagramGrid`` to lay it out and returns the grid. Otherwise returns the object (i.e., ``group``). If ``local_hints`` is not empty, it is supplied to ``DiagramGrid`` as the dictionary of hints. Otherwise, the ``hints`` argument of ``_handle_groups`` is used. """ if isinstance(group, FiniteSet): # Set up the corresponding object-to-group # mappings. for obj in group: obj_groups[obj] = group # Lay out the current group. if local_hints: groups_grids[group] = DiagramGrid( diagram.subdiagram_from_objects(group), **local_hints) else: groups_grids[group] = DiagramGrid( diagram.subdiagram_from_objects(group), **hints) else: obj_groups[group] = group def group_to_finiteset(group): """ Converts ``group`` to a :class:``FiniteSet`` if it is an iterable. """ if iterable(group): return FiniteSet(*group) else: return group obj_groups = {} groups_grids = {} # We would like to support various containers to represent # groups. To achieve that, before laying each group out, it # should be converted to a FiniteSet, because that is what the # following code expects. if isinstance(groups, dict) or isinstance(groups, Dict): finiteset_groups = {} for group, local_hints in groups.items(): finiteset_group = group_to_finiteset(group) finiteset_groups[finiteset_group] = local_hints lay_out_group(group, local_hints) groups = finiteset_groups else: finiteset_groups = [] for group in groups: finiteset_group = group_to_finiteset(group) finiteset_groups.append(finiteset_group) lay_out_group(finiteset_group, None) groups = finiteset_groups new_morphisms = [] for morphism in merged_morphisms: dom = obj_groups[morphism.domain] cod = obj_groups[morphism.codomain] # Note that we are not really interested in morphisms # which do not employ two different groups, because # these do not influence the layout. if dom != cod: # These are essentially unnamed morphisms; they are # not going to mess in the final layout. By giving # them the same names, we avoid unnecessary # duplicates. new_morphisms.append(NamedMorphism(dom, cod, "dummy")) # Lay out the new diagram. Since these are dummy morphisms, # properties and conclusions are irrelevant. top_grid = DiagramGrid(Diagram(new_morphisms)) # We now have to substitute the groups with the corresponding # grids, laid out at the beginning of this function. Compute # the size of each row and column in the grid, so that all # nested grids fit. def group_size(group): """ For the supplied group (or object, eventually), returns the size of the cell that will hold this group (object). """ if group in groups_grids: grid = groups_grids[group] return (grid.height, grid.width) else: return (1, 1) row_heights = [max(group_size(top_grid[i, j])[0] for j in range(top_grid.width)) for i in range(top_grid.height)] column_widths = [max(group_size(top_grid[i, j])[1] for i in range(top_grid.height)) for j in range(top_grid.width)] grid = _GrowableGrid(sum(column_widths), sum(row_heights)) real_row = 0 real_column = 0 for logical_row in range(top_grid.height): for logical_column in range(top_grid.width): obj = top_grid[logical_row, logical_column] if obj in groups_grids: # This is a group. Copy the corresponding grid in # place. local_grid = groups_grids[obj] for i in range(local_grid.height): for j in range(local_grid.width): grid[real_row + i, real_column + j] = local_grid[i, j] else: # This is an object. Just put it there. grid[real_row, real_column] = obj real_column += column_widths[logical_column] real_column = 0 real_row += row_heights[logical_row] return grid @staticmethod def _generic_layout(diagram, merged_morphisms): """ Produces the generic layout for the supplied diagram. """ all_objects = set(diagram.objects) if len(all_objects) == 1: # There only one object in the diagram, just put in on 1x1 # grid. grid = _GrowableGrid(1, 1) grid[0, 0] = tuple(all_objects)[0] return grid skeleton = DiagramGrid._build_skeleton(merged_morphisms) grid = _GrowableGrid(2, 1) if len(skeleton) == 1: # This diagram contains only one morphism. Draw it # horizontally. objects = sorted(all_objects, key=default_sort_key) grid[0, 0] = objects[0] grid[0, 1] = objects[1] return grid triangles = DiagramGrid._list_triangles(skeleton) triangles = DiagramGrid._drop_redundant_triangles(triangles, skeleton) triangle_sizes = DiagramGrid._compute_triangle_min_sizes( triangles, skeleton) triangles = sorted(triangles, key=lambda tri: DiagramGrid._triangle_key(tri, triangle_sizes)) # Place the first edge on the grid. root_edge = DiagramGrid._pick_root_edge(triangles[0], skeleton) grid[0, 0], grid[0, 1] = root_edge fringe = [((0, 0), (0, 1))] # Record which objects we now have on the grid. placed_objects = set(root_edge) while placed_objects != all_objects: welding = DiagramGrid._find_triangle_to_weld( triangles, fringe, grid) if welding: (triangle, welding_edge) = welding restart_required = DiagramGrid._weld_triangle( triangle, welding_edge, fringe, grid, skeleton) if restart_required: continue placed_objects.update( DiagramGrid._triangle_objects(triangle)) else: # No more weldings found. Try to attach triangles by # vertices. new_obj = DiagramGrid._grow_pseudopod( triangles, fringe, grid, skeleton, placed_objects) if not new_obj: # No more triangles can be attached, not even by # the edge. We will set up a new diagram out of # what has been left, laid it out independently, # and then attach it to this one. remaining_objects = all_objects - placed_objects remaining_diagram = diagram.subdiagram_from_objects( FiniteSet(*remaining_objects)) remaining_grid = DiagramGrid(remaining_diagram) # Now, let's glue ``remaining_grid`` to ``grid``. final_width = grid.width + remaining_grid.width final_height = max(grid.height, remaining_grid.height) final_grid = _GrowableGrid(final_width, final_height) for i in range(grid.width): for j in range(grid.height): final_grid[i, j] = grid[i, j] start_j = grid.width for i in range(remaining_grid.height): for j in range(remaining_grid.width): final_grid[i, start_j + j] = remaining_grid[i, j] return final_grid placed_objects.add(new_obj) triangles = DiagramGrid._drop_irrelevant_triangles( triangles, placed_objects) return grid @staticmethod def _get_undirected_graph(objects, merged_morphisms): """ Given the objects and the relevant morphisms of a diagram, returns the adjacency lists of the underlying undirected graph. """ adjlists = {} for obj in objects: adjlists[obj] = [] for morphism in merged_morphisms: adjlists[morphism.domain].append(morphism.codomain) adjlists[morphism.codomain].append(morphism.domain) # Assure that the objects in the adjacency list are always in # the same order. for obj in adjlists.keys(): adjlists[obj].sort(key=default_sort_key) return adjlists @staticmethod def _sequential_layout(diagram, merged_morphisms): r""" Lays out the diagram in "sequential" layout. This method will attempt to produce a result as close to a line as possible. For linear diagrams, the result will actually be a line. """ objects = diagram.objects sorted_objects = sorted(objects, key=default_sort_key) # Set up the adjacency lists of the underlying undirected # graph of ``merged_morphisms``. adjlists = DiagramGrid._get_undirected_graph(objects, merged_morphisms) # Find an object with the minimal degree. This is going to be # the root. root = sorted_objects[0] mindegree = len(adjlists[root]) for obj in sorted_objects: current_degree = len(adjlists[obj]) if current_degree < mindegree: root = obj mindegree = current_degree grid = _GrowableGrid(1, 1) grid[0, 0] = root placed_objects = {root} def place_objects(pt, placed_objects): """ Does depth-first search in the underlying graph of the diagram and places the objects en route. """ # We will start placing new objects from here. new_pt = (pt[0], pt[1] + 1) for adjacent_obj in adjlists[grid[pt]]: if adjacent_obj in placed_objects: # This object has already been placed. continue DiagramGrid._put_object(new_pt, adjacent_obj, grid, []) placed_objects.add(adjacent_obj) placed_objects.update(place_objects(new_pt, placed_objects)) new_pt = (new_pt[0] + 1, new_pt[1]) return placed_objects place_objects((0, 0), placed_objects) return grid @staticmethod def _drop_inessential_morphisms(merged_morphisms): r""" Removes those morphisms which should appear in the diagram, but which have no relevance to object layout. Currently this removes "loop" morphisms: the non-identity morphisms with the same domains and codomains. """ morphisms = [m for m in merged_morphisms if m.domain != m.codomain] return morphisms @staticmethod def _get_connected_components(objects, merged_morphisms): """ Given a container of morphisms, returns a list of connected components formed by these morphisms. A connected component is represented by a diagram consisting of the corresponding morphisms. """ component_index = {} for o in objects: component_index[o] = None # Get the underlying undirected graph of the diagram. adjlist = DiagramGrid._get_undirected_graph(objects, merged_morphisms) def traverse_component(object, current_index): """ Does a depth-first search traversal of the component containing ``object``. """ component_index[object] = current_index for o in adjlist[object]: if component_index[o] is None: traverse_component(o, current_index) # Traverse all components. current_index = 0 for o in adjlist: if component_index[o] is None: traverse_component(o, current_index) current_index += 1 # List the objects of the components. component_objects = [[] for i in range(current_index)] for o, idx in component_index.items(): component_objects[idx].append(o) # Finally, list the morphisms belonging to each component. # # Note: If some objects are isolated, they will not get any # morphisms at this stage, and since the layout algorithm # relies, we are essentially going to lose this object. # Therefore, check if there are isolated objects and, for each # of them, provide the trivial identity morphism. It will get # discarded later, but the object will be there. component_morphisms = [] for component in component_objects: current_morphisms = {} for m in merged_morphisms: if (m.domain in component) and (m.codomain in component): current_morphisms[m] = merged_morphisms[m] if len(component) == 1: # Let's add an identity morphism, for the sake of # surely having morphisms in this component. current_morphisms[IdentityMorphism(component[0])] = FiniteSet() component_morphisms.append(Diagram(current_morphisms)) return component_morphisms def __init__(self, diagram, groups=None, **hints): premises = DiagramGrid._simplify_morphisms(diagram.premises) conclusions = DiagramGrid._simplify_morphisms(diagram.conclusions) all_merged_morphisms = DiagramGrid._merge_premises_conclusions( premises, conclusions) merged_morphisms = DiagramGrid._drop_inessential_morphisms( all_merged_morphisms) # Store the merged morphisms for later use. self._morphisms = all_merged_morphisms components = DiagramGrid._get_connected_components( diagram.objects, all_merged_morphisms) if groups and (groups != diagram.objects): # Lay out the diagram according to the groups. self._grid = DiagramGrid._handle_groups( diagram, groups, merged_morphisms, hints) elif len(components) > 1: # Note that we check for connectedness _before_ checking # the layout hints because the layout strategies don't # know how to deal with disconnected diagrams. # The diagram is disconnected. Lay out the components # independently. grids = [] # Sort the components to eventually get the grids arranged # in a fixed, hash-independent order. components = sorted(components, key=default_sort_key) for component in components: grid = DiagramGrid(component, **hints) grids.append(grid) # Throw the grids together, in a line. total_width = sum(g.width for g in grids) total_height = max(g.height for g in grids) grid = _GrowableGrid(total_width, total_height) start_j = 0 for g in grids: for i in range(g.height): for j in range(g.width): grid[i, start_j + j] = g[i, j] start_j += g.width self._grid = grid elif "layout" in hints: if hints["layout"] == "sequential": self._grid = DiagramGrid._sequential_layout( diagram, merged_morphisms) else: self._grid = DiagramGrid._generic_layout(diagram, merged_morphisms) if hints.get("transpose"): # Transpose the resulting grid. grid = _GrowableGrid(self._grid.height, self._grid.width) for i in range(self._grid.height): for j in range(self._grid.width): grid[j, i] = self._grid[i, j] self._grid = grid @property def width(self): """ Returns the number of columns in this diagram layout. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> grid.width 2 """ return self._grid.width @property def height(self): """ Returns the number of rows in this diagram layout. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> grid.height 2 """ return self._grid.height def __getitem__(self, i_j): """ Returns the object placed in the row ``i`` and column ``j``. The indices are 0-based. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> (grid[0, 0], grid[0, 1]) (Object("A"), Object("B")) >>> (grid[1, 0], grid[1, 1]) (None, Object("C")) """ i, j = i_j return self._grid[i, j] @property def morphisms(self): """ Returns those morphisms (and their properties) which are sufficiently meaningful to be drawn. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> grid.morphisms {NamedMorphism(Object("A"), Object("B"), "f"): EmptySet, NamedMorphism(Object("B"), Object("C"), "g"): EmptySet} """ return self._morphisms def __str__(self): """ Produces a string representation of this class. This method returns a string representation of the underlying list of lists of objects. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> print(grid) [[Object("A"), Object("B")], [None, Object("C")]] """ return repr(self._grid._array) class ArrowStringDescription(object): r""" Stores the information necessary for producing an Xy-pic description of an arrow. The principal goal of this class is to abstract away the string representation of an arrow and to also provide the functionality to produce the actual Xy-pic string. ``unit`` sets the unit which will be used to specify the amount of curving and other distances. ``horizontal_direction`` should be a string of ``"r"`` or ``"l"`` specifying the horizontal offset of the target cell of the arrow relatively to the current one. ``vertical_direction`` should specify the vertical offset using a series of either ``"d"`` or ``"u"``. ``label_position`` should be either ``"^"``, ``"_"``, or ``"|"`` to specify that the label should be positioned above the arrow, below the arrow or just over the arrow, in a break. Note that the notions "above" and "below" are relative to arrow direction. ``label`` stores the morphism label. This works as follows (disregard the yet unexplained arguments): >>> from sympy.categories.diagram_drawing import ArrowStringDescription >>> astr = ArrowStringDescription( ... unit="mm", curving=None, curving_amount=None, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> print(str(astr)) \ar[dr]_{f} ``curving`` should be one of ``"^"``, ``"_"`` to specify in which direction the arrow is going to curve. ``curving_amount`` is a number describing how many ``unit``'s the morphism is going to curve: >>> astr = ArrowStringDescription( ... unit="mm", curving="^", curving_amount=12, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> print(str(astr)) \ar@/^12mm/[dr]_{f} ``looping_start`` and ``looping_end`` are currently only used for loop morphisms, those which have the same domain and codomain. These two attributes should store a valid Xy-pic direction and specify, correspondingly, the direction the arrow gets out into and the direction the arrow gets back from: >>> astr = ArrowStringDescription( ... unit="mm", curving=None, curving_amount=None, ... looping_start="u", looping_end="l", horizontal_direction="", ... vertical_direction="", label_position="_", label="f") >>> print(str(astr)) \ar@(u,l)[]_{f} ``label_displacement`` controls how far the arrow label is from the ends of the arrow. For example, to position the arrow label near the arrow head, use ">": >>> astr = ArrowStringDescription( ... unit="mm", curving="^", curving_amount=12, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> astr.label_displacement = ">" >>> print(str(astr)) \ar@/^12mm/[dr]_>{f} Finally, ``arrow_style`` is used to specify the arrow style. To get a dashed arrow, for example, use "{-->}" as arrow style: >>> astr = ArrowStringDescription( ... unit="mm", curving="^", curving_amount=12, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> astr.arrow_style = "{-->}" >>> print(str(astr)) \ar@/^12mm/@{-->}[dr]_{f} Notes ===== Instances of :class:`ArrowStringDescription` will be constructed by :class:`XypicDiagramDrawer` and provided for further use in formatters. The user is not expected to construct instances of :class:`ArrowStringDescription` themselves. To be able to properly utilise this class, the reader is encouraged to checkout the Xy-pic user guide, available at [Xypic]. See Also ======== XypicDiagramDrawer References ========== [Xypic] http://xy-pic.sourceforge.net/ """ def __init__(self, unit, curving, curving_amount, looping_start, looping_end, horizontal_direction, vertical_direction, label_position, label): self.unit = unit self.curving = curving self.curving_amount = curving_amount self.looping_start = looping_start self.looping_end = looping_end self.horizontal_direction = horizontal_direction self.vertical_direction = vertical_direction self.label_position = label_position self.label = label self.label_displacement = "" self.arrow_style = "" # This flag shows that the position of the label of this # morphism was set while typesetting a curved morphism and # should not be modified later. self.forced_label_position = False def __str__(self): if self.curving: curving_str = "@/%s%d%s/" % (self.curving, self.curving_amount, self.unit) else: curving_str = "" if self.looping_start and self.looping_end: looping_str = "@(%s,%s)" % (self.looping_start, self.looping_end) else: looping_str = "" if self.arrow_style: style_str = "@" + self.arrow_style else: style_str = "" return "\\ar%s%s%s[%s%s]%s%s{%s}" % \ (curving_str, looping_str, style_str, self.horizontal_direction, self.vertical_direction, self.label_position, self.label_displacement, self.label) class XypicDiagramDrawer(object): r""" Given a :class:`~.Diagram` and the corresponding :class:`DiagramGrid`, produces the Xy-pic representation of the diagram. The most important method in this class is ``draw``. Consider the following triangle diagram: >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import DiagramGrid, XypicDiagramDrawer >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g], {g * f: "unique"}) To draw this diagram, its objects need to be laid out with a :class:`DiagramGrid`:: >>> grid = DiagramGrid(diagram) Finally, the drawing: >>> drawer = XypicDiagramDrawer() >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } For further details see the docstring of this method. To control the appearance of the arrows, formatters are used. The dictionary ``arrow_formatters`` maps morphisms to formatter functions. A formatter is accepts an :class:`ArrowStringDescription` and is allowed to modify any of the arrow properties exposed thereby. For example, to have all morphisms with the property ``unique`` appear as dashed arrows, and to have their names prepended with `\exists !`, the following should be done: >>> def formatter(astr): ... astr.label = r"\exists !" + astr.label ... astr.arrow_style = "{-->}" >>> drawer.arrow_formatters["unique"] = formatter >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar@{-->}[d]_{\exists !g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } To modify the appearance of all arrows in the diagram, set ``default_arrow_formatter``. For example, to place all morphism labels a little bit farther from the arrow head so that they look more centred, do as follows: >>> def default_formatter(astr): ... astr.label_displacement = "(0.45)" >>> drawer.default_arrow_formatter = default_formatter >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar@{-->}[d]_(0.45){\exists !g\circ f} \ar[r]^(0.45){f} & B \ar[ld]^(0.45){g} \\ C & } In some diagrams some morphisms are drawn as curved arrows. Consider the following diagram: >>> D = Object("D") >>> E = Object("E") >>> h = NamedMorphism(D, A, "h") >>> k = NamedMorphism(D, B, "k") >>> diagram = Diagram([f, g, h, k]) >>> grid = DiagramGrid(diagram) >>> drawer = XypicDiagramDrawer() >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[r]_{f} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_3mm/[ll]_{h} \\ & C & } To control how far the morphisms are curved by default, one can use the ``unit`` and ``default_curving_amount`` attributes: >>> drawer.unit = "cm" >>> drawer.default_curving_amount = 1 >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[r]_{f} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_1cm/[ll]_{h} \\ & C & } In some diagrams, there are multiple curved morphisms between the same two objects. To control by how much the curving changes between two such successive morphisms, use ``default_curving_step``: >>> drawer.default_curving_step = 1 >>> h1 = NamedMorphism(A, D, "h1") >>> diagram = Diagram([f, g, h, k, h1]) >>> grid = DiagramGrid(diagram) >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[r]_{f} \ar@/^1cm/[rr]^{h_{1}} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_2cm/[ll]_{h} \\ & C & } The default value of ``default_curving_step`` is 4 units. See Also ======== draw, ArrowStringDescription """ def __init__(self): self.unit = "mm" self.default_curving_amount = 3 self.default_curving_step = 4 # This dictionary maps properties to the corresponding arrow # formatters. self.arrow_formatters = {} # This is the default arrow formatter which will be applied to # each arrow independently of its properties. self.default_arrow_formatter = None @staticmethod def _process_loop_morphism(i, j, grid, morphisms_str_info, object_coords): """ Produces the information required for constructing the string representation of a loop morphism. This function is invoked from ``_process_morphism``. See Also ======== _process_morphism """ curving = "" label_pos = "^" looping_start = "" looping_end = "" # This is a loop morphism. Count how many morphisms stick # in each of the four quadrants. Note that straight # vertical and horizontal morphisms count in two quadrants # at the same time (i.e., a morphism going up counts both # in the first and the second quadrants). # The usual numbering (counterclockwise) of quadrants # applies. quadrant = [0, 0, 0, 0] obj = grid[i, j] for m, m_str_info in morphisms_str_info.items(): if (m.domain == obj) and (m.codomain == obj): # That's another loop morphism. Check how it # loops and mark the corresponding quadrants as # busy. (l_s, l_e) = (m_str_info.looping_start, m_str_info.looping_end) if (l_s, l_e) == ("r", "u"): quadrant[0] += 1 elif (l_s, l_e) == ("u", "l"): quadrant[1] += 1 elif (l_s, l_e) == ("l", "d"): quadrant[2] += 1 elif (l_s, l_e) == ("d", "r"): quadrant[3] += 1 continue if m.domain == obj: (end_i, end_j) = object_coords[m.codomain] goes_out = True elif m.codomain == obj: (end_i, end_j) = object_coords[m.domain] goes_out = False else: continue d_i = end_i - i d_j = end_j - j m_curving = m_str_info.curving if (d_i != 0) and (d_j != 0): # This is really a diagonal morphism. Detect the # quadrant. if (d_i > 0) and (d_j > 0): quadrant[0] += 1 elif (d_i > 0) and (d_j < 0): quadrant[1] += 1 elif (d_i < 0) and (d_j < 0): quadrant[2] += 1 elif (d_i < 0) and (d_j > 0): quadrant[3] += 1 elif d_i == 0: # Knowing where the other end of the morphism is # and which way it goes, we now have to decide # which quadrant is now the upper one and which is # the lower one. if d_j > 0: if goes_out: upper_quadrant = 0 lower_quadrant = 3 else: upper_quadrant = 3 lower_quadrant = 0 else: if goes_out: upper_quadrant = 2 lower_quadrant = 1 else: upper_quadrant = 1 lower_quadrant = 2 if m_curving: if m_curving == "^": quadrant[upper_quadrant] += 1 elif m_curving == "_": quadrant[lower_quadrant] += 1 else: # This morphism counts in both upper and lower # quadrants. quadrant[upper_quadrant] += 1 quadrant[lower_quadrant] += 1 elif d_j == 0: # Knowing where the other end of the morphism is # and which way it goes, we now have to decide # which quadrant is now the left one and which is # the right one. if d_i < 0: if goes_out: left_quadrant = 1 right_quadrant = 0 else: left_quadrant = 0 right_quadrant = 1 else: if goes_out: left_quadrant = 3 right_quadrant = 2 else: left_quadrant = 2 right_quadrant = 3 if m_curving: if m_curving == "^": quadrant[left_quadrant] += 1 elif m_curving == "_": quadrant[right_quadrant] += 1 else: # This morphism counts in both upper and lower # quadrants. quadrant[left_quadrant] += 1 quadrant[right_quadrant] += 1 # Pick the freest quadrant to curve our morphism into. freest_quadrant = 0 for i in range(4): if quadrant[i] < quadrant[freest_quadrant]: freest_quadrant = i # Now set up proper looping. (looping_start, looping_end) = [("r", "u"), ("u", "l"), ("l", "d"), ("d", "r")][freest_quadrant] return (curving, label_pos, looping_start, looping_end) @staticmethod def _process_horizontal_morphism(i, j, target_j, grid, morphisms_str_info, object_coords): """ Produces the information required for constructing the string representation of a horizontal morphism. This function is invoked from ``_process_morphism``. See Also ======== _process_morphism """ # The arrow is horizontal. Check if it goes from left to # right (``backwards == False``) or from right to left # (``backwards == True``). backwards = False start = j end = target_j if end < start: (start, end) = (end, start) backwards = True # Let's see which objects are there between ``start`` and # ``end``, and then count how many morphisms stick out # upwards, and how many stick out downwards. # # For example, consider the situation: # # B1 C1 # | | # A--B--C--D # | # B2 # # Between the objects `A` and `D` there are two objects: # `B` and `C`. Further, there are two morphisms which # stick out upward (the ones between `B1` and `B` and # between `C` and `C1`) and one morphism which sticks out # downward (the one between `B and `B2`). # # We need this information to decide how to curve the # arrow between `A` and `D`. First of all, since there # are two objects between `A` and `D``, we must curve the # arrow. Then, we will have it curve downward, because # there is more space (less morphisms stick out downward # than upward). up = [] down = [] straight_horizontal = [] for k in range(start + 1, end): obj = grid[i, k] if not obj: continue for m in morphisms_str_info: if m.domain == obj: (end_i, end_j) = object_coords[m.codomain] elif m.codomain == obj: (end_i, end_j) = object_coords[m.domain] else: continue if end_i > i: down.append(m) elif end_i < i: up.append(m) elif not morphisms_str_info[m].curving: # This is a straight horizontal morphism, # because it has no curving. straight_horizontal.append(m) if len(up) < len(down): # More morphisms stick out downward than upward, let's # curve the morphism up. if backwards: curving = "_" label_pos = "_" else: curving = "^" label_pos = "^" # Assure that the straight horizontal morphisms have # their labels on the lower side of the arrow. for m in straight_horizontal: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if j1 < j2: m_str_info.label_position = "_" else: m_str_info.label_position = "^" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True else: # More morphisms stick out downward than upward, let's # curve the morphism up. if backwards: curving = "^" label_pos = "^" else: curving = "_" label_pos = "_" # Assure that the straight horizontal morphisms have # their labels on the upper side of the arrow. for m in straight_horizontal: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if j1 < j2: m_str_info.label_position = "^" else: m_str_info.label_position = "_" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True return (curving, label_pos) @staticmethod def _process_vertical_morphism(i, j, target_i, grid, morphisms_str_info, object_coords): """ Produces the information required for constructing the string representation of a vertical morphism. This function is invoked from ``_process_morphism``. See Also ======== _process_morphism """ # This arrow is vertical. Check if it goes from top to # bottom (``backwards == False``) or from bottom to top # (``backwards == True``). backwards = False start = i end = target_i if end < start: (start, end) = (end, start) backwards = True # Let's see which objects are there between ``start`` and # ``end``, and then count how many morphisms stick out to # the left, and how many stick out to the right. # # See the corresponding comment in the previous branch of # this if-statement for more details. left = [] right = [] straight_vertical = [] for k in range(start + 1, end): obj = grid[k, j] if not obj: continue for m in morphisms_str_info: if m.domain == obj: (end_i, end_j) = object_coords[m.codomain] elif m.codomain == obj: (end_i, end_j) = object_coords[m.domain] else: continue if end_j > j: right.append(m) elif end_j < j: left.append(m) elif not morphisms_str_info[m].curving: # This is a straight vertical morphism, # because it has no curving. straight_vertical.append(m) if len(left) < len(right): # More morphisms stick out to the left than to the # right, let's curve the morphism to the right. if backwards: curving = "^" label_pos = "^" else: curving = "_" label_pos = "_" # Assure that the straight vertical morphisms have # their labels on the left side of the arrow. for m in straight_vertical: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if i1 < i2: m_str_info.label_position = "^" else: m_str_info.label_position = "_" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True else: # More morphisms stick out to the right than to the # left, let's curve the morphism to the left. if backwards: curving = "_" label_pos = "_" else: curving = "^" label_pos = "^" # Assure that the straight vertical morphisms have # their labels on the right side of the arrow. for m in straight_vertical: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if i1 < i2: m_str_info.label_position = "_" else: m_str_info.label_position = "^" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True return (curving, label_pos) def _process_morphism(self, diagram, grid, morphism, object_coords, morphisms, morphisms_str_info): """ Given the required information, produces the string representation of ``morphism``. """ def repeat_string_cond(times, str_gt, str_lt): """ If ``times > 0``, repeats ``str_gt`` ``times`` times. Otherwise, repeats ``str_lt`` ``-times`` times. """ if times > 0: return str_gt * times else: return str_lt * (-times) def count_morphisms_undirected(A, B): """ Counts how many processed morphisms there are between the two supplied objects. """ return len([m for m in morphisms_str_info if set([m.domain, m.codomain]) == set([A, B])]) def count_morphisms_filtered(dom, cod, curving): """ Counts the processed morphisms which go out of ``dom`` into ``cod`` with curving ``curving``. """ return len([m for m, m_str_info in morphisms_str_info.items() if (m.domain, m.codomain) == (dom, cod) and (m_str_info.curving == curving)]) (i, j) = object_coords[morphism.domain] (target_i, target_j) = object_coords[morphism.codomain] # We now need to determine the direction of # the arrow. delta_i = target_i - i delta_j = target_j - j vertical_direction = repeat_string_cond(delta_i, "d", "u") horizontal_direction = repeat_string_cond(delta_j, "r", "l") curving = "" label_pos = "^" looping_start = "" looping_end = "" if (delta_i == 0) and (delta_j == 0): # This is a loop morphism. (curving, label_pos, looping_start, looping_end) = XypicDiagramDrawer._process_loop_morphism( i, j, grid, morphisms_str_info, object_coords) elif (delta_i == 0) and (abs(j - target_j) > 1): # This is a horizontal morphism. (curving, label_pos) = XypicDiagramDrawer._process_horizontal_morphism( i, j, target_j, grid, morphisms_str_info, object_coords) elif (delta_j == 0) and (abs(i - target_i) > 1): # This is a vertical morphism. (curving, label_pos) = XypicDiagramDrawer._process_vertical_morphism( i, j, target_i, grid, morphisms_str_info, object_coords) count = count_morphisms_undirected(morphism.domain, morphism.codomain) curving_amount = "" if curving: # This morphisms should be curved anyway. curving_amount = self.default_curving_amount + count * \ self.default_curving_step elif count: # There are no objects between the domain and codomain of # the current morphism, but this is not there already are # some morphisms with the same domain and codomain, so we # have to curve this one. curving = "^" filtered_morphisms = count_morphisms_filtered( morphism.domain, morphism.codomain, curving) curving_amount = self.default_curving_amount + \ filtered_morphisms * \ self.default_curving_step # Let's now get the name of the morphism. morphism_name = "" if isinstance(morphism, IdentityMorphism): morphism_name = "id_{%s}" + latex(grid[i, j]) elif isinstance(morphism, CompositeMorphism): component_names = [latex(Symbol(component.name)) for component in morphism.components] component_names.reverse() morphism_name = "\\circ ".join(component_names) elif isinstance(morphism, NamedMorphism): morphism_name = latex(Symbol(morphism.name)) return ArrowStringDescription( self.unit, curving, curving_amount, looping_start, looping_end, horizontal_direction, vertical_direction, label_pos, morphism_name) @staticmethod def _check_free_space_horizontal(dom_i, dom_j, cod_j, grid): """ For a horizontal morphism, checks whether there is free space (i.e., space not occupied by any objects) above the morphism or below it. """ if dom_j < cod_j: (start, end) = (dom_j, cod_j) backwards = False else: (start, end) = (cod_j, dom_j) backwards = True # Check for free space above. if dom_i == 0: free_up = True else: free_up = all([grid[dom_i - 1, j] for j in range(start, end + 1)]) # Check for free space below. if dom_i == grid.height - 1: free_down = True else: free_down = all([not grid[dom_i + 1, j] for j in range(start, end + 1)]) return (free_up, free_down, backwards) @staticmethod def _check_free_space_vertical(dom_i, cod_i, dom_j, grid): """ For a vertical morphism, checks whether there is free space (i.e., space not occupied by any objects) to the left of the morphism or to the right of it. """ if dom_i < cod_i: (start, end) = (dom_i, cod_i) backwards = False else: (start, end) = (cod_i, dom_i) backwards = True # Check if there's space to the left. if dom_j == 0: free_left = True else: free_left = all([not grid[i, dom_j - 1] for i in range(start, end + 1)]) if dom_j == grid.width - 1: free_right = True else: free_right = all([not grid[i, dom_j + 1] for i in range(start, end + 1)]) return (free_left, free_right, backwards) @staticmethod def _check_free_space_diagonal(dom_i, cod_i, dom_j, cod_j, grid): """ For a diagonal morphism, checks whether there is free space (i.e., space not occupied by any objects) above the morphism or below it. """ def abs_xrange(start, end): if start < end: return range(start, end + 1) else: return range(end, start + 1) if dom_i < cod_i and dom_j < cod_j: # This morphism goes from top-left to # bottom-right. (start_i, start_j) = (dom_i, dom_j) (end_i, end_j) = (cod_i, cod_j) backwards = False elif dom_i > cod_i and dom_j > cod_j: # This morphism goes from bottom-right to # top-left. (start_i, start_j) = (cod_i, cod_j) (end_i, end_j) = (dom_i, dom_j) backwards = True if dom_i < cod_i and dom_j > cod_j: # This morphism goes from top-right to # bottom-left. (start_i, start_j) = (dom_i, dom_j) (end_i, end_j) = (cod_i, cod_j) backwards = True elif dom_i > cod_i and dom_j < cod_j: # This morphism goes from bottom-left to # top-right. (start_i, start_j) = (cod_i, cod_j) (end_i, end_j) = (dom_i, dom_j) backwards = False # This is an attempt at a fast and furious strategy to # decide where there is free space on the two sides of # a diagonal morphism. For a diagonal morphism # starting at ``(start_i, start_j)`` and ending at # ``(end_i, end_j)`` the rectangle defined by these # two points is considered. The slope of the diagonal # ``alpha`` is then computed. Then, for every cell # ``(i, j)`` within the rectangle, the slope # ``alpha1`` of the line through ``(start_i, # start_j)`` and ``(i, j)`` is considered. If # ``alpha1`` is between 0 and ``alpha``, the point # ``(i, j)`` is above the diagonal, if ``alpha1`` is # between ``alpha`` and infinity, the point is below # the diagonal. Also note that, with some beforehand # precautions, this trick works for both the main and # the secondary diagonals of the rectangle. # I have considered the possibility to only follow the # shorter diagonals immediately above and below the # main (or secondary) diagonal. This, however, # wouldn't have resulted in much performance gain or # better detection of outer edges, because of # relatively small sizes of diagram grids, while the # code would have become harder to understand. alpha = float(end_i - start_i)/(end_j - start_j) free_up = True free_down = True for i in abs_xrange(start_i, end_i): if not free_up and not free_down: break for j in abs_xrange(start_j, end_j): if not free_up and not free_down: break if (i, j) == (start_i, start_j): continue if j == start_j: alpha1 = "inf" else: alpha1 = float(i - start_i)/(j - start_j) if grid[i, j]: if (alpha1 == "inf") or (abs(alpha1) > abs(alpha)): free_down = False elif abs(alpha1) < abs(alpha): free_up = False return (free_up, free_down, backwards) def _push_labels_out(self, morphisms_str_info, grid, object_coords): """ For all straight morphisms which form the visual boundary of the laid out diagram, puts their labels on their outer sides. """ def set_label_position(free1, free2, pos1, pos2, backwards, m_str_info): """ Given the information about room available to one side and to the other side of a morphism (``free1`` and ``free2``), sets the position of the morphism label in such a way that it is on the freer side. This latter operations involves choice between ``pos1`` and ``pos2``, taking ``backwards`` in consideration. Thus this function will do nothing if either both ``free1 == True`` and ``free2 == True`` or both ``free1 == False`` and ``free2 == False``. In either case, choosing one side over the other presents no advantage. """ if backwards: (pos1, pos2) = (pos2, pos1) if free1 and not free2: m_str_info.label_position = pos1 elif free2 and not free1: m_str_info.label_position = pos2 for m, m_str_info in morphisms_str_info.items(): if m_str_info.curving or m_str_info.forced_label_position: # This is either a curved morphism, and curved # morphisms have other magic, or the position of this # label has already been fixed. continue if m.domain == m.codomain: # This is a loop morphism, their labels, again have a # different magic. continue (dom_i, dom_j) = object_coords[m.domain] (cod_i, cod_j) = object_coords[m.codomain] if dom_i == cod_i: # Horizontal morphism. (free_up, free_down, backwards) = XypicDiagramDrawer._check_free_space_horizontal( dom_i, dom_j, cod_j, grid) set_label_position(free_up, free_down, "^", "_", backwards, m_str_info) elif dom_j == cod_j: # Vertical morphism. (free_left, free_right, backwards) = XypicDiagramDrawer._check_free_space_vertical( dom_i, cod_i, dom_j, grid) set_label_position(free_left, free_right, "_", "^", backwards, m_str_info) else: # A diagonal morphism. (free_up, free_down, backwards) = XypicDiagramDrawer._check_free_space_diagonal( dom_i, cod_i, dom_j, cod_j, grid) set_label_position(free_up, free_down, "^", "_", backwards, m_str_info) @staticmethod def _morphism_sort_key(morphism, object_coords): """ Provides a morphism sorting key such that horizontal or vertical morphisms between neighbouring objects come first, then horizontal or vertical morphisms between more far away objects, and finally, all other morphisms. """ (i, j) = object_coords[morphism.domain] (target_i, target_j) = object_coords[morphism.codomain] if morphism.domain == morphism.codomain: # Loop morphisms should get after diagonal morphisms # so that the proper direction in which to curve the # loop can be determined. return (3, 0, default_sort_key(morphism)) if target_i == i: return (1, abs(target_j - j), default_sort_key(morphism)) if target_j == j: return (1, abs(target_i - i), default_sort_key(morphism)) # Diagonal morphism. return (2, 0, default_sort_key(morphism)) @staticmethod def _build_xypic_string(diagram, grid, morphisms, morphisms_str_info, diagram_format): """ Given a collection of :class:`ArrowStringDescription` describing the morphisms of a diagram and the object layout information of a diagram, produces the final Xy-pic picture. """ # Build the mapping between objects and morphisms which have # them as domains. object_morphisms = {} for obj in diagram.objects: object_morphisms[obj] = [] for morphism in morphisms: object_morphisms[morphism.domain].append(morphism) result = "\\xymatrix%s{\n" % diagram_format for i in range(grid.height): for j in range(grid.width): obj = grid[i, j] if obj: result += latex(obj) + " " morphisms_to_draw = object_morphisms[obj] for morphism in morphisms_to_draw: result += str(morphisms_str_info[morphism]) + " " # Don't put the & after the last column. if j < grid.width - 1: result += "& " # Don't put the line break after the last row. if i < grid.height - 1: result += "\\\\" result += "\n" result += "}\n" return result def draw(self, diagram, grid, masked=None, diagram_format=""): r""" Returns the Xy-pic representation of ``diagram`` laid out in ``grid``. Consider the following simple triangle diagram. >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import DiagramGrid, XypicDiagramDrawer >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g], {g * f: "unique"}) To draw this diagram, its objects need to be laid out with a :class:`DiagramGrid`:: >>> grid = DiagramGrid(diagram) Finally, the drawing: >>> drawer = XypicDiagramDrawer() >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } The argument ``masked`` can be used to skip morphisms in the presentation of the diagram: >>> print(drawer.draw(diagram, grid, masked=[g * f])) \xymatrix{ A \ar[r]^{f} & B \ar[ld]^{g} \\ C & } Finally, the ``diagram_format`` argument can be used to specify the format string of the diagram. For example, to increase the spacing by 1 cm, proceeding as follows: >>> print(drawer.draw(diagram, grid, diagram_format="@+1cm")) \xymatrix@+1cm{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } """ # This method works in several steps. It starts by removing # the masked morphisms, if necessary, and then maps objects to # their positions in the grid (coordinate tuples). Remember # that objects are unique in ``Diagram`` and in the layout # produced by ``DiagramGrid``, so every object is mapped to a # single coordinate pair. # # The next step is the central step and is concerned with # analysing the morphisms of the diagram and deciding how to # draw them. For example, how to curve the arrows is decided # at this step. The bulk of the analysis is implemented in # ``_process_morphism``, to the result of which the # appropriate formatters are applied. # # The result of the previous step is a list of # ``ArrowStringDescription``. After the analysis and # application of formatters, some extra logic tries to assure # better positioning of morphism labels (for example, an # attempt is made to avoid the situations when arrows cross # labels). This functionality constitutes the next step and # is implemented in ``_push_labels_out``. Note that label # positions which have been set via a formatter are not # affected in this step. # # Finally, at the closing step, the array of # ``ArrowStringDescription`` and the layout information # incorporated in ``DiagramGrid`` are combined to produce the # resulting Xy-pic picture. This part of code lies in # ``_build_xypic_string``. if not masked: morphisms_props = grid.morphisms else: morphisms_props = {} for m, props in grid.morphisms.items(): if m in masked: continue morphisms_props[m] = props # Build the mapping between objects and their position in the # grid. object_coords = {} for i in range(grid.height): for j in range(grid.width): if grid[i, j]: object_coords[grid[i, j]] = (i, j) morphisms = sorted(morphisms_props, key=lambda m: XypicDiagramDrawer._morphism_sort_key( m, object_coords)) # Build the tuples defining the string representations of # morphisms. morphisms_str_info = {} for morphism in morphisms: string_description = self._process_morphism( diagram, grid, morphism, object_coords, morphisms, morphisms_str_info) if self.default_arrow_formatter: self.default_arrow_formatter(string_description) for prop in morphisms_props[morphism]: # prop is a Symbol. TODO: Find out why. if prop.name in self.arrow_formatters: formatter = self.arrow_formatters[prop.name] formatter(string_description) morphisms_str_info[morphism] = string_description # Reposition the labels a bit. self._push_labels_out(morphisms_str_info, grid, object_coords) return XypicDiagramDrawer._build_xypic_string( diagram, grid, morphisms, morphisms_str_info, diagram_format) def xypic_draw_diagram(diagram, masked=None, diagram_format="", groups=None, **hints): r""" Provides a shortcut combining :class:`DiagramGrid` and :class:`XypicDiagramDrawer`. Returns an Xy-pic presentation of ``diagram``. The argument ``masked`` is a list of morphisms which will be not be drawn. The argument ``diagram_format`` is the format string inserted after "\xymatrix". ``groups`` should be a set of logical groups. The ``hints`` will be passed directly to the constructor of :class:`DiagramGrid`. For more information about the arguments, see the docstrings of :class:`DiagramGrid` and ``XypicDiagramDrawer.draw``. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import xypic_draw_diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g], {g * f: "unique"}) >>> print(xypic_draw_diagram(diagram)) \xymatrix{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } See Also ======== XypicDiagramDrawer, DiagramGrid """ grid = DiagramGrid(diagram, groups, **hints) drawer = XypicDiagramDrawer() return drawer.draw(diagram, grid, masked, diagram_format) @doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',)) def preview_diagram(diagram, masked=None, diagram_format="", groups=None, output='png', viewer=None, euler=True, **hints): """ Combines the functionality of ``xypic_draw_diagram`` and ``sympy.printing.preview``. The arguments ``masked``, ``diagram_format``, ``groups``, and ``hints`` are passed to ``xypic_draw_diagram``, while ``output``, ``viewer, and ``euler`` are passed to ``preview``. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import preview_diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {g * f: "unique"}) >>> preview_diagram(d) See Also ======== XypicDiagramDrawer """ from sympy.printing import preview latex_output = xypic_draw_diagram(diagram, masked, diagram_format, groups, **hints) preview(latex_output, output, viewer, euler, ("xypic",))
b727f90a1a3a80e7cb1b4a743e19563461d3b717722a6e0dcaa6806056721a66
from __future__ import print_function, division from sympy.core import S, Basic, Dict, Symbol, Tuple, sympify from sympy.core.compatibility import iterable from sympy.sets import Set, FiniteSet, EmptySet class Class(Set): r""" The base class for any kind of class in the set-theoretic sense. In axiomatic set theories, everything is a class. A class which can be a member of another class is a set. A class which is not a member of another class is a proper class. The class `\{1, 2\}` is a set; the class of all sets is a proper class. This class is essentially a synonym for :class:`sympy.core.Set`. The goal of this class is to assure easier migration to the eventual proper implementation of set theory. """ is_proper = False class Object(Symbol): """ The base class for any kind of object in an abstract category. While technically any instance of :class:`~.Basic` will do, this class is the recommended way to create abstract objects in abstract categories. """ class Morphism(Basic): """ The base class for any morphism in an abstract category. In abstract categories, a morphism is an arrow between two category objects. The object where the arrow starts is called the domain, while the object where the arrow ends is called the codomain. Two morphisms between the same pair of objects are considered to be the same morphisms. To distinguish between morphisms between the same objects use :class:`NamedMorphism`. It is prohibited to instantiate this class. Use one of the derived classes instead. See Also ======== IdentityMorphism, NamedMorphism, CompositeMorphism """ def __new__(cls, domain, codomain): raise(NotImplementedError( "Cannot instantiate Morphism. Use derived classes instead.")) @property def domain(self): """ Returns the domain of the morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f.domain Object("A") """ return self.args[0] @property def codomain(self): """ Returns the codomain of the morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f.codomain Object("B") """ return self.args[1] def compose(self, other): r""" Composes self with the supplied morphism. The order of elements in the composition is the usual order, i.e., to construct `g\circ f` use ``g.compose(f)``. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> g * f CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"), NamedMorphism(Object("B"), Object("C"), "g"))) >>> (g * f).domain Object("A") >>> (g * f).codomain Object("C") """ return CompositeMorphism(other, self) def __mul__(self, other): r""" Composes self with the supplied morphism. The semantics of this operation is given by the following equation: ``g * f == g.compose(f)`` for composable morphisms ``g`` and ``f``. See Also ======== compose """ return self.compose(other) class IdentityMorphism(Morphism): """ Represents an identity morphism. An identity morphism is a morphism with equal domain and codomain, which acts as an identity with respect to composition. Examples ======== >>> from sympy.categories import Object, NamedMorphism, IdentityMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> id_A = IdentityMorphism(A) >>> id_B = IdentityMorphism(B) >>> f * id_A == f True >>> id_B * f == f True See Also ======== Morphism """ def __new__(cls, domain): return Basic.__new__(cls, domain) @property def codomain(self): return self.domain class NamedMorphism(Morphism): """ Represents a morphism which has a name. Names are used to distinguish between morphisms which have the same domain and codomain: two named morphisms are equal if they have the same domains, codomains, and names. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f NamedMorphism(Object("A"), Object("B"), "f") >>> f.name 'f' See Also ======== Morphism """ def __new__(cls, domain, codomain, name): if not name: raise ValueError("Empty morphism names not allowed.") if not isinstance(name, Symbol): name = Symbol(name) return Basic.__new__(cls, domain, codomain, name) @property def name(self): """ Returns the name of the morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f.name 'f' """ return self.args[2].name class CompositeMorphism(Morphism): r""" Represents a morphism which is a composition of other morphisms. Two composite morphisms are equal if the morphisms they were obtained from (components) are the same and were listed in the same order. The arguments to the constructor for this class should be listed in diagram order: to obtain the composition `g\circ f` from the instances of :class:`Morphism` ``g`` and ``f`` use ``CompositeMorphism(f, g)``. Examples ======== >>> from sympy.categories import Object, NamedMorphism, CompositeMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> g * f CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"), NamedMorphism(Object("B"), Object("C"), "g"))) >>> CompositeMorphism(f, g) == g * f True """ @staticmethod def _add_morphism(t, morphism): """ Intelligently adds ``morphism`` to tuple ``t``. If ``morphism`` is a composite morphism, its components are added to the tuple. If ``morphism`` is an identity, nothing is added to the tuple. No composability checks are performed. """ if isinstance(morphism, CompositeMorphism): # ``morphism`` is a composite morphism; we have to # denest its components. return t + morphism.components elif isinstance(morphism, IdentityMorphism): # ``morphism`` is an identity. Nothing happens. return t else: return t + Tuple(morphism) def __new__(cls, *components): if components and not isinstance(components[0], Morphism): # Maybe the user has explicitly supplied a list of # morphisms. return CompositeMorphism.__new__(cls, *components[0]) normalised_components = Tuple() for current, following in zip(components, components[1:]): if not isinstance(current, Morphism) or \ not isinstance(following, Morphism): raise TypeError("All components must be morphisms.") if current.codomain != following.domain: raise ValueError("Uncomposable morphisms.") normalised_components = CompositeMorphism._add_morphism( normalised_components, current) # We haven't added the last morphism to the list of normalised # components. Add it now. normalised_components = CompositeMorphism._add_morphism( normalised_components, components[-1]) if not normalised_components: # If ``normalised_components`` is empty, only identities # were supplied. Since they all were composable, they are # all the same identities. return components[0] elif len(normalised_components) == 1: # No sense to construct a whole CompositeMorphism. return normalised_components[0] return Basic.__new__(cls, normalised_components) @property def components(self): """ Returns the components of this composite morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).components (NamedMorphism(Object("A"), Object("B"), "f"), NamedMorphism(Object("B"), Object("C"), "g")) """ return self.args[0] @property def domain(self): """ Returns the domain of this composite morphism. The domain of the composite morphism is the domain of its first component. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).domain Object("A") """ return self.components[0].domain @property def codomain(self): """ Returns the codomain of this composite morphism. The codomain of the composite morphism is the codomain of its last component. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).codomain Object("C") """ return self.components[-1].codomain def flatten(self, new_name): """ Forgets the composite structure of this morphism. If ``new_name`` is not empty, returns a :class:`NamedMorphism` with the supplied name, otherwise returns a :class:`Morphism`. In both cases the domain of the new morphism is the domain of this composite morphism and the codomain of the new morphism is the codomain of this composite morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).flatten("h") NamedMorphism(Object("A"), Object("C"), "h") """ return NamedMorphism(self.domain, self.codomain, new_name) class Category(Basic): r""" An (abstract) category. A category [JoyOfCats] is a quadruple `\mbox{K} = (O, \hom, id, \circ)` consisting of * a (set-theoretical) class `O`, whose members are called `K`-objects, * for each pair `(A, B)` of `K`-objects, a set `\hom(A, B)` whose members are called `K`-morphisms from `A` to `B`, * for a each `K`-object `A`, a morphism `id:A\rightarrow A`, called the `K`-identity of `A`, * a composition law `\circ` associating with every `K`-morphisms `f:A\rightarrow B` and `g:B\rightarrow C` a `K`-morphism `g\circ f:A\rightarrow C`, called the composite of `f` and `g`. Composition is associative, `K`-identities are identities with respect to composition, and the sets `\hom(A, B)` are pairwise disjoint. This class knows nothing about its objects and morphisms. Concrete cases of (abstract) categories should be implemented as classes derived from this one. Certain instances of :class:`Diagram` can be asserted to be commutative in a :class:`Category` by supplying the argument ``commutative_diagrams`` in the constructor. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> K = Category("K", commutative_diagrams=[d]) >>> K.commutative_diagrams == FiniteSet(d) True See Also ======== Diagram """ def __new__(cls, symbol, objects=EmptySet, commutative_diagrams=EmptySet): if not symbol: raise ValueError("A Category cannot have an empty name.") if not isinstance(symbol, Symbol): symbol = Symbol(symbol) if not isinstance(objects, Class): objects = Class(objects) new_category = Basic.__new__(cls, symbol, objects, FiniteSet(*commutative_diagrams)) return new_category @property def name(self): """ Returns the name of this category. Examples ======== >>> from sympy.categories import Category >>> K = Category("K") >>> K.name 'K' """ return self.args[0].name @property def objects(self): """ Returns the class of objects of this category. Examples ======== >>> from sympy.categories import Object, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> K = Category("K", FiniteSet(A, B)) >>> K.objects Class(FiniteSet(Object("A"), Object("B"))) """ return self.args[1] @property def commutative_diagrams(self): """ Returns the :class:`~.FiniteSet` of diagrams which are known to be commutative in this category. >>> from sympy.categories import Object, NamedMorphism, Diagram, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> K = Category("K", commutative_diagrams=[d]) >>> K.commutative_diagrams == FiniteSet(d) True """ return self.args[2] def hom(self, A, B): raise NotImplementedError( "hom-sets are not implemented in Category.") def all_morphisms(self): raise NotImplementedError( "Obtaining the class of morphisms is not implemented in Category.") class Diagram(Basic): r""" Represents a diagram in a certain category. Informally, a diagram is a collection of objects of a category and certain morphisms between them. A diagram is still a monoid with respect to morphism composition; i.e., identity morphisms, as well as all composites of morphisms included in the diagram belong to the diagram. For a more formal approach to this notion see [Pare1970]. The components of composite morphisms are also added to the diagram. No properties are assigned to such morphisms by default. A commutative diagram is often accompanied by a statement of the following kind: "if such morphisms with such properties exist, then such morphisms which such properties exist and the diagram is commutative". To represent this, an instance of :class:`Diagram` includes a collection of morphisms which are the premises and another collection of conclusions. ``premises`` and ``conclusions`` associate morphisms belonging to the corresponding categories with the :class:`~.FiniteSet`'s of their properties. The set of properties of a composite morphism is the intersection of the sets of properties of its components. The domain and codomain of a conclusion morphism should be among the domains and codomains of the morphisms listed as the premises of a diagram. No checks are carried out of whether the supplied object and morphisms do belong to one and the same category. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy import FiniteSet, pprint, default_sort_key >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> premises_keys = sorted(d.premises.keys(), key=default_sort_key) >>> pprint(premises_keys, use_unicode=False) [g*f:A-->C, id:A-->A, id:B-->B, id:C-->C, f:A-->B, g:B-->C] >>> pprint(d.premises, use_unicode=False) {g*f:A-->C: EmptySet, id:A-->A: EmptySet, id:B-->B: EmptySet, id:C-->C: EmptyS et, f:A-->B: EmptySet, g:B-->C: EmptySet} >>> d = Diagram([f, g], {g * f: "unique"}) >>> pprint(d.conclusions) {g*f:A-->C: {unique}} References ========== [Pare1970] B. Pareigis: Categories and functors. Academic Press, 1970. """ @staticmethod def _set_dict_union(dictionary, key, value): """ If ``key`` is in ``dictionary``, set the new value of ``key`` to be the union between the old value and ``value``. Otherwise, set the value of ``key`` to ``value. Returns ``True`` if the key already was in the dictionary and ``False`` otherwise. """ if key in dictionary: dictionary[key] = dictionary[key] | value return True else: dictionary[key] = value return False @staticmethod def _add_morphism_closure(morphisms, morphism, props, add_identities=True, recurse_composites=True): """ Adds a morphism and its attributes to the supplied dictionary ``morphisms``. If ``add_identities`` is True, also adds the identity morphisms for the domain and the codomain of ``morphism``. """ if not Diagram._set_dict_union(morphisms, morphism, props): # We have just added a new morphism. if isinstance(morphism, IdentityMorphism): if props: # Properties for identity morphisms don't really # make sense, because very much is known about # identity morphisms already, so much that they # are trivial. Having properties for identity # morphisms would only be confusing. raise ValueError( "Instances of IdentityMorphism cannot have properties.") return if add_identities: empty = EmptySet id_dom = IdentityMorphism(morphism.domain) id_cod = IdentityMorphism(morphism.codomain) Diagram._set_dict_union(morphisms, id_dom, empty) Diagram._set_dict_union(morphisms, id_cod, empty) for existing_morphism, existing_props in list(morphisms.items()): new_props = existing_props & props if morphism.domain == existing_morphism.codomain: left = morphism * existing_morphism Diagram._set_dict_union(morphisms, left, new_props) if morphism.codomain == existing_morphism.domain: right = existing_morphism * morphism Diagram._set_dict_union(morphisms, right, new_props) if isinstance(morphism, CompositeMorphism) and recurse_composites: # This is a composite morphism, add its components as # well. empty = EmptySet for component in morphism.components: Diagram._add_morphism_closure(morphisms, component, empty, add_identities) def __new__(cls, *args): """ Construct a new instance of Diagram. If no arguments are supplied, an empty diagram is created. If at least an argument is supplied, ``args[0]`` is interpreted as the premises of the diagram. If ``args[0]`` is a list, it is interpreted as a list of :class:`Morphism`'s, in which each :class:`Morphism` has an empty set of properties. If ``args[0]`` is a Python dictionary or a :class:`Dict`, it is interpreted as a dictionary associating to some :class:`Morphism`'s some properties. If at least two arguments are supplied ``args[1]`` is interpreted as the conclusions of the diagram. The type of ``args[1]`` is interpreted in exactly the same way as the type of ``args[0]``. If only one argument is supplied, the diagram has no conclusions. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import IdentityMorphism, Diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> IdentityMorphism(A) in d.premises.keys() True >>> g * f in d.premises.keys() True >>> d = Diagram([f, g], {g * f: "unique"}) >>> d.conclusions[g * f] FiniteSet(unique) """ premises = {} conclusions = {} # Here we will keep track of the objects which appear in the # premises. objects = EmptySet if len(args) >= 1: # We've got some premises in the arguments. premises_arg = args[0] if isinstance(premises_arg, list): # The user has supplied a list of morphisms, none of # which have any attributes. empty = EmptySet for morphism in premises_arg: objects |= FiniteSet(morphism.domain, morphism.codomain) Diagram._add_morphism_closure(premises, morphism, empty) elif isinstance(premises_arg, dict) or isinstance(premises_arg, Dict): # The user has supplied a dictionary of morphisms and # their properties. for morphism, props in premises_arg.items(): objects |= FiniteSet(morphism.domain, morphism.codomain) Diagram._add_morphism_closure( premises, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props)) if len(args) >= 2: # We also have some conclusions. conclusions_arg = args[1] if isinstance(conclusions_arg, list): # The user has supplied a list of morphisms, none of # which have any attributes. empty = EmptySet for morphism in conclusions_arg: # Check that no new objects appear in conclusions. if ((sympify(objects.contains(morphism.domain)) is S.true) and (sympify(objects.contains(morphism.codomain)) is S.true)): # No need to add identities and recurse # composites this time. Diagram._add_morphism_closure( conclusions, morphism, empty, add_identities=False, recurse_composites=False) elif isinstance(conclusions_arg, dict) or \ isinstance(conclusions_arg, Dict): # The user has supplied a dictionary of morphisms and # their properties. for morphism, props in conclusions_arg.items(): # Check that no new objects appear in conclusions. if (morphism.domain in objects) and \ (morphism.codomain in objects): # No need to add identities and recurse # composites this time. Diagram._add_morphism_closure( conclusions, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props), add_identities=False, recurse_composites=False) return Basic.__new__(cls, Dict(premises), Dict(conclusions), objects) @property def premises(self): """ Returns the premises of this diagram. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import IdentityMorphism, Diagram >>> from sympy import pretty >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> id_A = IdentityMorphism(A) >>> id_B = IdentityMorphism(B) >>> d = Diagram([f]) >>> print(pretty(d.premises, use_unicode=False)) {id:A-->A: EmptySet, id:B-->B: EmptySet, f:A-->B: EmptySet} """ return self.args[0] @property def conclusions(self): """ Returns the conclusions of this diagram. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import IdentityMorphism, Diagram >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> IdentityMorphism(A) in d.premises.keys() True >>> g * f in d.premises.keys() True >>> d = Diagram([f, g], {g * f: "unique"}) >>> d.conclusions[g * f] == FiniteSet("unique") True """ return self.args[1] @property def objects(self): """ Returns the :class:`~.FiniteSet` of objects that appear in this diagram. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> d.objects FiniteSet(Object("A"), Object("B"), Object("C")) """ return self.args[2] def hom(self, A, B): """ Returns a 2-tuple of sets of morphisms between objects A and B: one set of morphisms listed as premises, and the other set of morphisms listed as conclusions. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy import pretty >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {g * f: "unique"}) >>> print(pretty(d.hom(A, C), use_unicode=False)) ({g*f:A-->C}, {g*f:A-->C}) See Also ======== Object, Morphism """ premises = EmptySet conclusions = EmptySet for morphism in self.premises.keys(): if (morphism.domain == A) and (morphism.codomain == B): premises |= FiniteSet(morphism) for morphism in self.conclusions.keys(): if (morphism.domain == A) and (morphism.codomain == B): conclusions |= FiniteSet(morphism) return (premises, conclusions) def is_subdiagram(self, diagram): """ Checks whether ``diagram`` is a subdiagram of ``self``. Diagram `D'` is a subdiagram of `D` if all premises (conclusions) of `D'` are contained in the premises (conclusions) of `D`. The morphisms contained both in `D'` and `D` should have the same properties for `D'` to be a subdiagram of `D`. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {g * f: "unique"}) >>> d1 = Diagram([f]) >>> d.is_subdiagram(d1) True >>> d1.is_subdiagram(d) False """ premises = all([(m in self.premises) and (diagram.premises[m] == self.premises[m]) for m in diagram.premises]) if not premises: return False conclusions = all([(m in self.conclusions) and (diagram.conclusions[m] == self.conclusions[m]) for m in diagram.conclusions]) # Premises is surely ``True`` here. return conclusions def subdiagram_from_objects(self, objects): """ If ``objects`` is a subset of the objects of ``self``, returns a diagram which has as premises all those premises of ``self`` which have a domains and codomains in ``objects``, likewise for conclusions. Properties are preserved. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {f: "unique", g*f: "veryunique"}) >>> d1 = d.subdiagram_from_objects(FiniteSet(A, B)) >>> d1 == Diagram([f], {f: "unique"}) True """ if not objects.is_subset(self.objects): raise ValueError( "Supplied objects should all belong to the diagram.") new_premises = {} for morphism, props in self.premises.items(): if ((sympify(objects.contains(morphism.domain)) is S.true) and (sympify(objects.contains(morphism.codomain)) is S.true)): new_premises[morphism] = props new_conclusions = {} for morphism, props in self.conclusions.items(): if ((sympify(objects.contains(morphism.domain)) is S.true) and (sympify(objects.contains(morphism.codomain)) is S.true)): new_conclusions[morphism] = props return Diagram(new_premises, new_conclusions)
a62c974e5f76a0f5ed010f2674ffb740b6883955012ea8be6f7d7985292cfce2
# References : # http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/ # https://en.wikipedia.org/wiki/Quaternion from __future__ import print_function from sympy import S, Rational from sympy import re, im, conjugate, sign from sympy import sqrt, sin, cos, acos, exp, ln from sympy import trigsimp from sympy import integrate from sympy import Matrix from sympy import sympify from sympy.core.expr import Expr class Quaternion(Expr): """Provides basic quaternion operations. Quaternion objects can be instantiated as Quaternion(a, b, c, d) as in (a + b*i + c*j + d*k). Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> q = Quaternion(1, 2, 3, 4) >>> q 1 + 2*i + 3*j + 4*k Quaternions over complex fields can be defined as : >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import symbols, I >>> x = symbols('x') >>> q1 = Quaternion(x, x**3, x, x**2, real_field = False) >>> q2 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) >>> q1 x + x**3*i + x*j + x**2*k >>> q2 (3 + 4*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k """ _op_priority = 11.0 is_commutative = False def __new__(cls, a=0, b=0, c=0, d=0, real_field=True): a = sympify(a) b = sympify(b) c = sympify(c) d = sympify(d) if any(i.is_commutative is False for i in [a, b, c, d]): raise ValueError("arguments have to be commutative") else: obj = Expr.__new__(cls, a, b, c, d) obj._a = a obj._b = b obj._c = c obj._d = d obj._real_field = real_field return obj @property def a(self): return self._a @property def b(self): return self._b @property def c(self): return self._c @property def d(self): return self._d @property def real_field(self): return self._real_field @classmethod def from_axis_angle(cls, vector, angle): """Returns a rotation quaternion given the axis and the angle of rotation. Parameters ========== vector : tuple of three numbers The vector representation of the given axis. angle : number The angle by which axis is rotated (in radians). Returns ======= Quaternion The normalized rotation quaternion calculated from the given axis and the angle of rotation. Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import pi, sqrt >>> q = Quaternion.from_axis_angle((sqrt(3)/3, sqrt(3)/3, sqrt(3)/3), 2*pi/3) >>> q 1/2 + 1/2*i + 1/2*j + 1/2*k """ (x, y, z) = vector norm = sqrt(x**2 + y**2 + z**2) (x, y, z) = (x / norm, y / norm, z / norm) s = sin(angle * S.Half) a = cos(angle * S.Half) b = x * s c = y * s d = z * s return cls(a, b, c, d).normalize() @classmethod def from_rotation_matrix(cls, M): """Returns the equivalent quaternion of a matrix. The quaternion will be normalized only if the matrix is special orthogonal (orthogonal and det(M) = 1). Parameters ========== M : Matrix Input matrix to be converted to equivalent quaternion. M must be special orthogonal (orthogonal and det(M) = 1) for the quaternion to be normalized. Returns ======= Quaternion The quaternion equivalent to given matrix. Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import Matrix, symbols, cos, sin, trigsimp >>> x = symbols('x') >>> M = Matrix([[cos(x), -sin(x), 0], [sin(x), cos(x), 0], [0, 0, 1]]) >>> q = trigsimp(Quaternion.from_rotation_matrix(M)) >>> q sqrt(2)*sqrt(cos(x) + 1)/2 + 0*i + 0*j + sqrt(2 - 2*cos(x))*sign(sin(x))/2*k """ absQ = M.det()**Rational(1, 3) a = sqrt(absQ + M[0, 0] + M[1, 1] + M[2, 2]) / 2 b = sqrt(absQ + M[0, 0] - M[1, 1] - M[2, 2]) / 2 c = sqrt(absQ - M[0, 0] + M[1, 1] - M[2, 2]) / 2 d = sqrt(absQ - M[0, 0] - M[1, 1] + M[2, 2]) / 2 b = b * sign(M[2, 1] - M[1, 2]) c = c * sign(M[0, 2] - M[2, 0]) d = d * sign(M[1, 0] - M[0, 1]) return Quaternion(a, b, c, d) def __add__(self, other): return self.add(other) def __radd__(self, other): return self.add(other) def __sub__(self, other): return self.add(other*-1) def __mul__(self, other): return self._generic_mul(self, other) def __rmul__(self, other): return self._generic_mul(other, self) def __pow__(self, p): return self.pow(p) def __neg__(self): return Quaternion(-self._a, -self._b, -self._c, -self.d) def __truediv__(self, other): return self * sympify(other)**-1 __div__ = __truediv__ def __rtruediv__(self, other): return sympify(other) * self**-1 __rdiv__ = __rtruediv__ def _eval_Integral(self, *args): return self.integrate(*args) def diff(self, *symbols, **kwargs): kwargs.setdefault('evaluate', True) return self.func(*[a.diff(*symbols, **kwargs) for a in self.args]) def add(self, other): """Adds quaternions. Parameters ========== other : Quaternion The quaternion to add to current (self) quaternion. Returns ======= Quaternion The resultant quaternion after adding self to other Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import symbols >>> q1 = Quaternion(1, 2, 3, 4) >>> q2 = Quaternion(5, 6, 7, 8) >>> q1.add(q2) 6 + 8*i + 10*j + 12*k >>> q1 + 5 6 + 2*i + 3*j + 4*k >>> x = symbols('x', real = True) >>> q1.add(x) (x + 1) + 2*i + 3*j + 4*k Quaternions over complex fields : >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import I >>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) >>> q3.add(2 + 3*I) (5 + 7*I) + (2 + 5*I)*i + 0*j + (7 + 8*I)*k """ q1 = self q2 = sympify(other) # If q2 is a number or a sympy expression instead of a quaternion if not isinstance(q2, Quaternion): if q1.real_field and q2.is_complex: return Quaternion(re(q2) + q1.a, im(q2) + q1.b, q1.c, q1.d) elif q2.is_commutative: return Quaternion(q1.a + q2, q1.b, q1.c, q1.d) else: raise ValueError("Only commutative expressions can be added with a Quaternion.") return Quaternion(q1.a + q2.a, q1.b + q2.b, q1.c + q2.c, q1.d + q2.d) def mul(self, other): """Multiplies quaternions. Parameters ========== other : Quaternion or symbol The quaternion to multiply to current (self) quaternion. Returns ======= Quaternion The resultant quaternion after multiplying self with other Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import symbols >>> q1 = Quaternion(1, 2, 3, 4) >>> q2 = Quaternion(5, 6, 7, 8) >>> q1.mul(q2) (-60) + 12*i + 30*j + 24*k >>> q1.mul(2) 2 + 4*i + 6*j + 8*k >>> x = symbols('x', real = True) >>> q1.mul(x) x + 2*x*i + 3*x*j + 4*x*k Quaternions over complex fields : >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import I >>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) >>> q3.mul(2 + 3*I) (2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k """ return self._generic_mul(self, other) @staticmethod def _generic_mul(q1, q2): """Generic multiplication. Parameters ========== q1 : Quaternion or symbol q2 : Quaternion or symbol It's important to note that if neither q1 nor q2 is a Quaternion, this function simply returns q1 * q2. Returns ======= Quaternion The resultant quaternion after multiplying q1 and q2 Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import Symbol >>> q1 = Quaternion(1, 2, 3, 4) >>> q2 = Quaternion(5, 6, 7, 8) >>> Quaternion._generic_mul(q1, q2) (-60) + 12*i + 30*j + 24*k >>> Quaternion._generic_mul(q1, 2) 2 + 4*i + 6*j + 8*k >>> x = Symbol('x', real = True) >>> Quaternion._generic_mul(q1, x) x + 2*x*i + 3*x*j + 4*x*k Quaternions over complex fields : >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import I >>> q3 = Quaternion(3 + 4*I, 2 + 5*I, 0, 7 + 8*I, real_field = False) >>> Quaternion._generic_mul(q3, 2 + 3*I) (2 + 3*I)*(3 + 4*I) + (2 + 3*I)*(2 + 5*I)*i + 0*j + (2 + 3*I)*(7 + 8*I)*k """ q1 = sympify(q1) q2 = sympify(q2) # None is a Quaternion: if not isinstance(q1, Quaternion) and not isinstance(q2, Quaternion): return q1 * q2 # If q1 is a number or a sympy expression instead of a quaternion if not isinstance(q1, Quaternion): if q2.real_field and q1.is_complex: return Quaternion(re(q1), im(q1), 0, 0) * q2 elif q1.is_commutative: return Quaternion(q1 * q2.a, q1 * q2.b, q1 * q2.c, q1 * q2.d) else: raise ValueError("Only commutative expressions can be multiplied with a Quaternion.") # If q2 is a number or a sympy expression instead of a quaternion if not isinstance(q2, Quaternion): if q1.real_field and q2.is_complex: return q1 * Quaternion(re(q2), im(q2), 0, 0) elif q2.is_commutative: return Quaternion(q2 * q1.a, q2 * q1.b, q2 * q1.c, q2 * q1.d) else: raise ValueError("Only commutative expressions can be multiplied with a Quaternion.") return Quaternion(-q1.b*q2.b - q1.c*q2.c - q1.d*q2.d + q1.a*q2.a, q1.b*q2.a + q1.c*q2.d - q1.d*q2.c + q1.a*q2.b, -q1.b*q2.d + q1.c*q2.a + q1.d*q2.b + q1.a*q2.c, q1.b*q2.c - q1.c*q2.b + q1.d*q2.a + q1.a * q2.d) def _eval_conjugate(self): """Returns the conjugate of the quaternion.""" q = self return Quaternion(q.a, -q.b, -q.c, -q.d) def norm(self): """Returns the norm of the quaternion.""" q = self # trigsimp is used to simplify sin(x)^2 + cos(x)^2 (these terms # arise when from_axis_angle is used). return sqrt(trigsimp(q.a**2 + q.b**2 + q.c**2 + q.d**2)) def normalize(self): """Returns the normalized form of the quaternion.""" q = self return q * (1/q.norm()) def inverse(self): """Returns the inverse of the quaternion.""" q = self if not q.norm(): raise ValueError("Cannot compute inverse for a quaternion with zero norm") return conjugate(q) * (1/q.norm()**2) def pow(self, p): """Finds the pth power of the quaternion. Parameters ========== p : int Power to be applied on quaternion. Returns ======= Quaternion Returns the p-th power of the current quaternion. Returns the inverse if p = -1. Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> q = Quaternion(1, 2, 3, 4) >>> q.pow(4) 668 + (-224)*i + (-336)*j + (-448)*k """ p = sympify(p) q = self if p == -1: return q.inverse() res = 1 if not p.is_Integer: return NotImplemented if p < 0: q, p = q.inverse(), -p while p > 0: if p % 2 == 1: res = q * res p = p//2 q = q * q return res def exp(self): """Returns the exponential of q (e^q). Returns ======= Quaternion Exponential of q (e^q). Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> q = Quaternion(1, 2, 3, 4) >>> q.exp() E*cos(sqrt(29)) + 2*sqrt(29)*E*sin(sqrt(29))/29*i + 3*sqrt(29)*E*sin(sqrt(29))/29*j + 4*sqrt(29)*E*sin(sqrt(29))/29*k """ # exp(q) = e^a(cos||v|| + v/||v||*sin||v||) q = self vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2) a = exp(q.a) * cos(vector_norm) b = exp(q.a) * sin(vector_norm) * q.b / vector_norm c = exp(q.a) * sin(vector_norm) * q.c / vector_norm d = exp(q.a) * sin(vector_norm) * q.d / vector_norm return Quaternion(a, b, c, d) def _ln(self): """Returns the natural logarithm of the quaternion (_ln(q)). Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> q = Quaternion(1, 2, 3, 4) >>> q._ln() log(sqrt(30)) + 2*sqrt(29)*acos(sqrt(30)/30)/29*i + 3*sqrt(29)*acos(sqrt(30)/30)/29*j + 4*sqrt(29)*acos(sqrt(30)/30)/29*k """ # _ln(q) = _ln||q|| + v/||v||*arccos(a/||q||) q = self vector_norm = sqrt(q.b**2 + q.c**2 + q.d**2) q_norm = q.norm() a = ln(q_norm) b = q.b * acos(q.a / q_norm) / vector_norm c = q.c * acos(q.a / q_norm) / vector_norm d = q.d * acos(q.a / q_norm) / vector_norm return Quaternion(a, b, c, d) def pow_cos_sin(self, p): """Computes the pth power in the cos-sin form. Parameters ========== p : int Power to be applied on quaternion. Returns ======= Quaternion The p-th power in the cos-sin form. Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> q = Quaternion(1, 2, 3, 4) >>> q.pow_cos_sin(4) 900*cos(4*acos(sqrt(30)/30)) + 1800*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*i + 2700*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*j + 3600*sqrt(29)*sin(4*acos(sqrt(30)/30))/29*k """ # q = ||q||*(cos(a) + u*sin(a)) # q^p = ||q||^p * (cos(p*a) + u*sin(p*a)) q = self (v, angle) = q.to_axis_angle() q2 = Quaternion.from_axis_angle(v, p * angle) return q2 * (q.norm()**p) def integrate(self, *args): # TODO: is this expression correct? return Quaternion(integrate(self.a, *args), integrate(self.b, *args), integrate(self.c, *args), integrate(self.d, *args)) @staticmethod def rotate_point(pin, r): """Returns the coordinates of the point pin(a 3 tuple) after rotation. Parameters ========== pin : tuple A 3-element tuple of coordinates of a point which needs to be rotated. r : Quaternion or tuple Axis and angle of rotation. It's important to note that when r is a tuple, it must be of the form (axis, angle) Returns ======= tuple The coordinates of the point after rotation. Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import symbols, trigsimp, cos, sin >>> x = symbols('x') >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2)) >>> trigsimp(Quaternion.rotate_point((1, 1, 1), q)) (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1) >>> (axis, angle) = q.to_axis_angle() >>> trigsimp(Quaternion.rotate_point((1, 1, 1), (axis, angle))) (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1) """ if isinstance(r, tuple): # if r is of the form (vector, angle) q = Quaternion.from_axis_angle(r[0], r[1]) else: # if r is a quaternion q = r.normalize() pout = q * Quaternion(0, pin[0], pin[1], pin[2]) * conjugate(q) return (pout.b, pout.c, pout.d) def to_axis_angle(self): """Returns the axis and angle of rotation of a quaternion Returns ======= tuple Tuple of (axis, angle) Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> q = Quaternion(1, 1, 1, 1) >>> (axis, angle) = q.to_axis_angle() >>> axis (sqrt(3)/3, sqrt(3)/3, sqrt(3)/3) >>> angle 2*pi/3 """ q = self if q.a.is_negative: q = q * -1 q = q.normalize() angle = trigsimp(2 * acos(q.a)) # Since quaternion is normalised, q.a is less than 1. s = sqrt(1 - q.a*q.a) x = trigsimp(q.b / s) y = trigsimp(q.c / s) z = trigsimp(q.d / s) v = (x, y, z) t = (v, angle) return t def to_rotation_matrix(self, v=None): """Returns the equivalent rotation transformation matrix of the quaternion which represents rotation about the origin if v is not passed. Parameters ========== v : tuple or None Default value: None Returns ======= tuple Returns the equivalent rotation transformation matrix of the quaternion which represents rotation about the origin if v is not passed. Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import symbols, trigsimp, cos, sin >>> x = symbols('x') >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2)) >>> trigsimp(q.to_rotation_matrix()) Matrix([ [cos(x), -sin(x), 0], [sin(x), cos(x), 0], [ 0, 0, 1]]) Generates a 4x4 transformation matrix (used for rotation about a point other than the origin) if the point(v) is passed as an argument. Examples ======== >>> from sympy.algebras.quaternion import Quaternion >>> from sympy import symbols, trigsimp, cos, sin >>> x = symbols('x') >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2)) >>> trigsimp(q.to_rotation_matrix((1, 1, 1))) Matrix([ [cos(x), -sin(x), 0, sin(x) - cos(x) + 1], [sin(x), cos(x), 0, -sin(x) - cos(x) + 1], [ 0, 0, 1, 0], [ 0, 0, 0, 1]]) """ q = self s = q.norm()**-2 m00 = 1 - 2*s*(q.c**2 + q.d**2) m01 = 2*s*(q.b*q.c - q.d*q.a) m02 = 2*s*(q.b*q.d + q.c*q.a) m10 = 2*s*(q.b*q.c + q.d*q.a) m11 = 1 - 2*s*(q.b**2 + q.d**2) m12 = 2*s*(q.c*q.d - q.b*q.a) m20 = 2*s*(q.b*q.d - q.c*q.a) m21 = 2*s*(q.c*q.d + q.b*q.a) m22 = 1 - 2*s*(q.b**2 + q.c**2) if not v: return Matrix([[m00, m01, m02], [m10, m11, m12], [m20, m21, m22]]) else: (x, y, z) = v m03 = x - x*m00 - y*m01 - z*m02 m13 = y - x*m10 - y*m11 - z*m12 m23 = z - x*m20 - y*m21 - z*m22 m30 = m31 = m32 = 0 m33 = 1 return Matrix([[m00, m01, m02, m03], [m10, m11, m12, m13], [m20, m21, m22, m23], [m30, m31, m32, m33]])
351c028646e9c49e4098a504ab85056fcb2e58216ff859335be202867038a52c
"""Predefined R^n manifolds together with common coord. systems. Coordinate systems are predefined as well as the transformation laws between them. Coordinate functions can be accessed as attributes of the manifold (eg `R2.x`), as attributes of the coordinate systems (eg `R2_r.x` and `R2_p.theta`), or by using the usual `coord_sys.coord_function(index, name)` interface. """ from __future__ import print_function, division from typing import Any from .diffgeom import Manifold, Patch, CoordSystem from sympy import sqrt, atan2, acos, sin, cos, Dummy ############################################################################### # R2 ############################################################################### R2 = Manifold('R^2', 2) # type: Any # Patch and coordinate systems. R2_origin = Patch('origin', R2) # type: Any R2_r = CoordSystem('rectangular', R2_origin, ['x', 'y']) # type: Any R2_p = CoordSystem('polar', R2_origin, ['r', 'theta']) # type: Any # Connecting the coordinate charts. x, y, r, theta = [Dummy(s) for s in ['x', 'y', 'r', 'theta']] R2_r.connect_to(R2_p, [x, y], [sqrt(x**2 + y**2), atan2(y, x)], inverse=False, fill_in_gaps=False) R2_p.connect_to(R2_r, [r, theta], [r*cos(theta), r*sin(theta)], inverse=False, fill_in_gaps=False) del x, y, r, theta # Defining the basis coordinate functions and adding shortcuts for them to the # manifold and the patch. R2.x, R2.y = R2_origin.x, R2_origin.y = R2_r.x, R2_r.y = R2_r.coord_functions() R2.r, R2.theta = R2_origin.r, R2_origin.theta = R2_p.r, R2_p.theta = R2_p.coord_functions() # Defining the basis vector fields and adding shortcuts for them to the # manifold and the patch. R2.e_x, R2.e_y = R2_origin.e_x, R2_origin.e_y = R2_r.e_x, R2_r.e_y = R2_r.base_vectors() R2.e_r, R2.e_theta = R2_origin.e_r, R2_origin.e_theta = R2_p.e_r, R2_p.e_theta = R2_p.base_vectors() # Defining the basis oneform fields and adding shortcuts for them to the # manifold and the patch. R2.dx, R2.dy = R2_origin.dx, R2_origin.dy = R2_r.dx, R2_r.dy = R2_r.base_oneforms() R2.dr, R2.dtheta = R2_origin.dr, R2_origin.dtheta = R2_p.dr, R2_p.dtheta = R2_p.base_oneforms() ############################################################################### # R3 ############################################################################### R3 = Manifold('R^3', 3) # type: Any # Patch and coordinate systems. R3_origin = Patch('origin', R3) # type: Any R3_r = CoordSystem('rectangular', R3_origin, ['x', 'y', 'z']) # type: Any R3_c = CoordSystem('cylindrical', R3_origin, ['rho', 'psi', 'z']) # type: Any R3_s = CoordSystem('spherical', R3_origin, ['r', 'theta', 'phi']) # type: Any # Connecting the coordinate charts. x, y, z, rho, psi, r, theta, phi = [Dummy(s) for s in ['x', 'y', 'z', 'rho', 'psi', 'r', 'theta', 'phi']] ## rectangular <-> cylindrical R3_r.connect_to(R3_c, [x, y, z], [sqrt(x**2 + y**2), atan2(y, x), z], inverse=False, fill_in_gaps=False) R3_c.connect_to(R3_r, [rho, psi, z], [rho*cos(psi), rho*sin(psi), z], inverse=False, fill_in_gaps=False) ## rectangular <-> spherical R3_r.connect_to(R3_s, [x, y, z], [sqrt(x**2 + y**2 + z**2), acos(z/ sqrt(x**2 + y**2 + z**2)), atan2(y, x)], inverse=False, fill_in_gaps=False) R3_s.connect_to(R3_r, [r, theta, phi], [r*sin(theta)*cos(phi), r*sin( theta)*sin(phi), r*cos(theta)], inverse=False, fill_in_gaps=False) ## cylindrical <-> spherical R3_c.connect_to(R3_s, [rho, psi, z], [sqrt(rho**2 + z**2), acos(z/sqrt(rho**2 + z**2)), psi], inverse=False, fill_in_gaps=False) R3_s.connect_to(R3_c, [r, theta, phi], [r*sin(theta), phi, r*cos(theta)], inverse=False, fill_in_gaps=False) del x, y, z, rho, psi, r, theta, phi # Defining the basis coordinate functions. R3_r.x, R3_r.y, R3_r.z = R3_r.coord_functions() R3_c.rho, R3_c.psi, R3_c.z = R3_c.coord_functions() R3_s.r, R3_s.theta, R3_s.phi = R3_s.coord_functions() # Defining the basis vector fields. R3_r.e_x, R3_r.e_y, R3_r.e_z = R3_r.base_vectors() R3_c.e_rho, R3_c.e_psi, R3_c.e_z = R3_c.base_vectors() R3_s.e_r, R3_s.e_theta, R3_s.e_phi = R3_s.base_vectors() # Defining the basis oneform fields. R3_r.dx, R3_r.dy, R3_r.dz = R3_r.base_oneforms() R3_c.drho, R3_c.dpsi, R3_c.dz = R3_c.base_oneforms() R3_s.dr, R3_s.dtheta, R3_s.dphi = R3_s.base_oneforms()
a8d5b16005712701e04bbff3482a74f7e6a327ed4fce6d7f6353f462ca611b28
from __future__ import print_function, division from typing import Any, Set from itertools import permutations from sympy.combinatorics import Permutation from sympy.core import AtomicExpr, Basic, Expr, Dummy, Function, sympify, diff, Pow, Mul, Add, symbols, Tuple from sympy.core.compatibility import reduce from sympy.core.numbers import Zero from sympy.functions import factorial from sympy.matrices import Matrix from sympy.simplify import simplify from sympy.solvers import solve # TODO you are a bit excessive in the use of Dummies # TODO dummy point, literal field # TODO too often one needs to call doit or simplify on the output, check the # tests and find out why from sympy.tensor.array import ImmutableDenseNDimArray class Manifold(Basic): """Object representing a mathematical manifold. The only role that this object plays is to keep a list of all patches defined on the manifold. It does not provide any means to study the topological characteristics of the manifold that it represents. """ def __new__(cls, name, dim): name = sympify(name) dim = sympify(dim) obj = Basic.__new__(cls, name, dim) obj.name = name obj.dim = dim obj.patches = [] # The patches list is necessary if a Patch instance needs to enumerate # other Patch instance on the same manifold. return obj def _latex(self, printer, *args): return r'\text{%s}' % self.name class Patch(Basic): """Object representing a patch on a manifold. On a manifold one can have many patches that do not always include the whole manifold. On these patches coordinate charts can be defined that permit the parameterization of any point on the patch in terms of a tuple of real numbers (the coordinates). This object serves as a container/parent for all coordinate system charts that can be defined on the patch it represents. Examples ======== Define a Manifold and a Patch on that Manifold: >>> from sympy.diffgeom import Manifold, Patch >>> m = Manifold('M', 3) >>> p = Patch('P', m) >>> p in m.patches True """ # Contains a reference to the parent manifold in order to be able to access # other patches. def __new__(cls, name, manifold): name = sympify(name) obj = Basic.__new__(cls, name, manifold) obj.name = name obj.manifold = manifold obj.manifold.patches.append(obj) obj.coord_systems = [] # The list of coordinate systems is necessary for an instance of # CoordSystem to enumerate other coord systems on the patch. return obj @property def dim(self): return self.manifold.dim def _latex(self, printer, *args): return r'\text{%s}_{%s}' % (self.name, self.manifold._latex(printer, *args)) class CoordSystem(Basic): """Contains all coordinate transformation logic. Examples ======== Define a Manifold and a Patch, and then define two coord systems on that patch: >>> from sympy import symbols, sin, cos, pi >>> from sympy.diffgeom import Manifold, Patch, CoordSystem >>> from sympy.simplify import simplify >>> r, theta = symbols('r, theta') >>> m = Manifold('M', 2) >>> patch = Patch('P', m) >>> rect = CoordSystem('rect', patch) >>> polar = CoordSystem('polar', patch) >>> rect in patch.coord_systems True Connect the coordinate systems. An inverse transformation is automatically found by ``solve`` when possible: >>> polar.connect_to(rect, [r, theta], [r*cos(theta), r*sin(theta)]) >>> polar.coord_tuple_transform_to(rect, [0, 2]) Matrix([ [0], [0]]) >>> polar.coord_tuple_transform_to(rect, [2, pi/2]) Matrix([ [0], [2]]) >>> rect.coord_tuple_transform_to(polar, [1, 1]).applyfunc(simplify) Matrix([ [sqrt(2)], [ pi/4]]) Calculate the jacobian of the polar to cartesian transformation: >>> polar.jacobian(rect, [r, theta]) Matrix([ [cos(theta), -r*sin(theta)], [sin(theta), r*cos(theta)]]) Define a point using coordinates in one of the coordinate systems: >>> p = polar.point([1, 3*pi/4]) >>> rect.point_to_coords(p) Matrix([ [-sqrt(2)/2], [ sqrt(2)/2]]) Define a basis scalar field (i.e. a coordinate function), that takes a point and returns its coordinates. It is an instance of ``BaseScalarField``. >>> rect.coord_function(0)(p) -sqrt(2)/2 >>> rect.coord_function(1)(p) sqrt(2)/2 Define a basis vector field (i.e. a unit vector field along the coordinate line). Vectors are also differential operators on scalar fields. It is an instance of ``BaseVectorField``. >>> v_x = rect.base_vector(0) >>> x = rect.coord_function(0) >>> v_x(x) 1 >>> v_x(v_x(x)) 0 Define a basis oneform field: >>> dx = rect.base_oneform(0) >>> dx(v_x) 1 If you provide a list of names the fields will print nicely: - without provided names: >>> x, v_x, dx (rect_0, e_rect_0, drect_0) - with provided names >>> rect = CoordSystem('rect', patch, ['x', 'y']) >>> rect.coord_function(0), rect.base_vector(0), rect.base_oneform(0) (x, e_x, dx) """ # Contains a reference to the parent patch in order to be able to access # other coordinate system charts. def __new__(cls, name, patch, names=None): name = sympify(name) # names is not in args because it is related only to printing, not to # identifying the CoordSystem instance. if not names: names = ['%s_%d' % (name, i) for i in range(patch.dim)] if isinstance(names, Tuple): obj = Basic.__new__(cls, name, patch, names) else: names = Tuple(*symbols(names)) obj = Basic.__new__(cls, name, patch, names) obj.name = name obj._names = [str(i) for i in names.args] obj.patch = patch obj.patch.coord_systems.append(obj) obj.transforms = {} # All the coordinate transformation logic is in this dictionary in the # form of: # key = other coordinate system # value = tuple of # TODO make these Lambda instances # - list of `Dummy` coordinates in this coordinate system # - list of expressions as a function of the Dummies giving # the coordinates in another coordinate system obj._dummies = [Dummy(str(n)) for n in names] obj._dummy = Dummy() return obj @property def dim(self): return self.patch.dim ########################################################################## # Coordinate transformations. ########################################################################## def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False): """Register the transformation used to switch to another coordinate system. Parameters ========== to_sys another instance of ``CoordSystem`` from_coords list of symbols in terms of which ``to_exprs`` is given to_exprs list of the expressions of the new coordinate tuple inverse try to deduce and register the inverse transformation fill_in_gaps try to deduce other transformation that are made possible by composing the present transformation with other already registered transformation """ from_coords, to_exprs = dummyfy(from_coords, to_exprs) self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs) if inverse: to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs) if fill_in_gaps: self._fill_gaps_in_transformations() @staticmethod def _inv_transf(from_coords, to_exprs): inv_from = [i.as_dummy() for i in from_coords] inv_to = solve( [t[0] - t[1] for t in zip(inv_from, to_exprs)], list(from_coords), dict=True)[0] inv_to = [inv_to[fc] for fc in from_coords] return Matrix(inv_from), Matrix(inv_to) @staticmethod def _fill_gaps_in_transformations(): raise NotImplementedError # TODO def coord_tuple_transform_to(self, to_sys, coords): """Transform ``coords`` to coord system ``to_sys``. See the docstring of ``CoordSystem`` for examples.""" coords = Matrix(coords) if self != to_sys: transf = self.transforms[to_sys] coords = transf[1].subs(list(zip(transf[0], coords))) return coords def jacobian(self, to_sys, coords): """Return the jacobian matrix of a transformation.""" with_dummies = self.coord_tuple_transform_to( to_sys, self._dummies).jacobian(self._dummies) return with_dummies.subs(list(zip(self._dummies, coords))) ########################################################################## # Base fields. ########################################################################## def coord_function(self, coord_index): """Return a ``BaseScalarField`` that takes a point and returns one of the coords. Takes a point and returns its coordinate in this coordinate system. See the docstring of ``CoordSystem`` for examples.""" return BaseScalarField(self, coord_index) def coord_functions(self): """Returns a list of all coordinate functions. For more details see the ``coord_function`` method of this class.""" return [self.coord_function(i) for i in range(self.dim)] def base_vector(self, coord_index): """Return a basis vector field. The basis vector field for this coordinate system. It is also an operator on scalar fields. See the docstring of ``CoordSystem`` for examples.""" return BaseVectorField(self, coord_index) def base_vectors(self): """Returns a list of all base vectors. For more details see the ``base_vector`` method of this class.""" return [self.base_vector(i) for i in range(self.dim)] def base_oneform(self, coord_index): """Return a basis 1-form field. The basis one-form field for this coordinate system. It is also an operator on vector fields. See the docstring of ``CoordSystem`` for examples.""" return Differential(self.coord_function(coord_index)) def base_oneforms(self): """Returns a list of all base oneforms. For more details see the ``base_oneform`` method of this class.""" return [self.base_oneform(i) for i in range(self.dim)] ########################################################################## # Points. ########################################################################## def point(self, coords): """Create a ``Point`` with coordinates given in this coord system. See the docstring of ``CoordSystem`` for examples.""" return Point(self, coords) def point_to_coords(self, point): """Calculate the coordinates of a point in this coord system. See the docstring of ``CoordSystem`` for examples.""" return point.coords(self) ########################################################################## # Printing. ########################################################################## def _latex(self, printer, *args): return r'\text{%s}^{\text{%s}}_{%s}' % ( self.name, self.patch.name, self.patch.manifold._latex(printer, *args)) class Point(Basic): """Point in a Manifold object. To define a point you must supply coordinates and a coordinate system. The usage of this object after its definition is independent of the coordinate system that was used in order to define it, however due to limitations in the simplification routines you can arrive at complicated expressions if you use inappropriate coordinate systems. Examples ======== Define the boilerplate Manifold, Patch and coordinate systems: >>> from sympy import symbols, sin, cos, pi >>> from sympy.diffgeom import ( ... Manifold, Patch, CoordSystem, Point) >>> r, theta = symbols('r, theta') >>> m = Manifold('M', 2) >>> p = Patch('P', m) >>> rect = CoordSystem('rect', p) >>> polar = CoordSystem('polar', p) >>> polar.connect_to(rect, [r, theta], [r*cos(theta), r*sin(theta)]) Define a point using coordinates from one of the coordinate systems: >>> p = Point(polar, [r, 3*pi/4]) >>> p.coords() Matrix([ [ r], [3*pi/4]]) >>> p.coords(rect) Matrix([ [-sqrt(2)*r/2], [ sqrt(2)*r/2]]) """ def __init__(self, coord_sys, coords): super(Point, self).__init__() self._coord_sys = coord_sys self._coords = Matrix(coords) self._args = self._coord_sys, self._coords def coords(self, to_sys=None): """Coordinates of the point in a given coordinate system. If ``to_sys`` is ``None`` it returns the coordinates in the system in which the point was defined.""" if to_sys: return self._coord_sys.coord_tuple_transform_to(to_sys, self._coords) else: return self._coords @property def free_symbols(self): raise NotImplementedError return self._coords.free_symbols class BaseScalarField(AtomicExpr): """Base Scalar Field over a Manifold for a given Coordinate System. A scalar field takes a point as an argument and returns a scalar. A base scalar field of a coordinate system takes a point and returns one of the coordinates of that point in the coordinate system in question. To define a scalar field you need to choose the coordinate system and the index of the coordinate. The use of the scalar field after its definition is independent of the coordinate system in which it was defined, however due to limitations in the simplification routines you may arrive at more complicated expression if you use unappropriate coordinate systems. You can build complicated scalar fields by just building up SymPy expressions containing ``BaseScalarField`` instances. Examples ======== Define boilerplate Manifold, Patch and coordinate systems: >>> from sympy import symbols, sin, cos, pi, Function >>> from sympy.diffgeom import ( ... Manifold, Patch, CoordSystem, Point, BaseScalarField) >>> r0, theta0 = symbols('r0, theta0') >>> m = Manifold('M', 2) >>> p = Patch('P', m) >>> rect = CoordSystem('rect', p) >>> polar = CoordSystem('polar', p) >>> polar.connect_to(rect, [r0, theta0], [r0*cos(theta0), r0*sin(theta0)]) Point to be used as an argument for the filed: >>> point = polar.point([r0, 0]) Examples of fields: >>> fx = BaseScalarField(rect, 0) >>> fy = BaseScalarField(rect, 1) >>> (fx**2+fy**2).rcall(point) r0**2 >>> g = Function('g') >>> ftheta = BaseScalarField(polar, 1) >>> fg = g(ftheta-pi) >>> fg.rcall(point) g(-pi) """ is_commutative = True def __new__(cls, coord_sys, index): obj = AtomicExpr.__new__(cls, coord_sys, sympify(index)) obj._coord_sys = coord_sys obj._index = index return obj def __call__(self, *args): """Evaluating the field at a point or doing nothing. If the argument is a ``Point`` instance, the field is evaluated at that point. The field is returned itself if the argument is any other object. It is so in order to have working recursive calling mechanics for all fields (check the ``__call__`` method of ``Expr``). """ point = args[0] if len(args) != 1 or not isinstance(point, Point): return self coords = point.coords(self._coord_sys) # XXX Calling doit is necessary with all the Subs expressions # XXX Calling simplify is necessary with all the trig expressions return simplify(coords[self._index]).doit() # XXX Workaround for limitations on the content of args free_symbols = set() # type: Set[Any] def doit(self): return self class BaseVectorField(AtomicExpr): r"""Vector Field over a Manifold. A vector field is an operator taking a scalar field and returning a directional derivative (which is also a scalar field). A base vector field is the same type of operator, however the derivation is specifically done with respect to a chosen coordinate. To define a base vector field you need to choose the coordinate system and the index of the coordinate. The use of the vector field after its definition is independent of the coordinate system in which it was defined, however due to limitations in the simplification routines you may arrive at more complicated expression if you use unappropriate coordinate systems. Examples ======== Use the predefined R2 manifold, setup some boilerplate. >>> from sympy import symbols, pi, Function >>> from sympy.diffgeom.rn import R2, R2_p, R2_r >>> from sympy.diffgeom import BaseVectorField >>> from sympy import pprint >>> x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0') Points to be used as arguments for the field: >>> point_p = R2_p.point([r0, theta0]) >>> point_r = R2_r.point([x0, y0]) Scalar field to operate on: >>> g = Function('g') >>> s_field = g(R2.x, R2.y) >>> s_field.rcall(point_r) g(x0, y0) >>> s_field.rcall(point_p) g(r0*cos(theta0), r0*sin(theta0)) Vector field: >>> v = BaseVectorField(R2_r, 1) >>> pprint(v(s_field)) / d \| |---(g(x, xi))|| \dxi /|xi=y >>> pprint(v(s_field).rcall(point_r).doit()) d ---(g(x0, y0)) dy0 >>> pprint(v(s_field).rcall(point_p)) / d \| |---(g(r0*cos(theta0), xi))|| \dxi /|xi=r0*sin(theta0) """ is_commutative = False def __new__(cls, coord_sys, index): index = sympify(index) obj = AtomicExpr.__new__(cls, coord_sys, index) obj._coord_sys = coord_sys obj._index = index return obj def __call__(self, scalar_field): """Apply on a scalar field. The action of a vector field on a scalar field is a directional differentiation. If the argument is not a scalar field an error is raised. """ if covariant_order(scalar_field) or contravariant_order(scalar_field): raise ValueError('Only scalar fields can be supplied as arguments to vector fields.') if scalar_field is None: return self base_scalars = list(scalar_field.atoms(BaseScalarField)) # First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r) d_var = self._coord_sys._dummy # TODO: you need a real dummy function for the next line d_funcs = [Function('_#_%s' % i)(d_var) for i, b in enumerate(base_scalars)] d_result = scalar_field.subs(list(zip(base_scalars, d_funcs))) d_result = d_result.diff(d_var) # Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y)) coords = self._coord_sys._dummies d_funcs_deriv = [f.diff(d_var) for f in d_funcs] d_funcs_deriv_sub = [] for b in base_scalars: jac = self._coord_sys.jacobian(b._coord_sys, coords) d_funcs_deriv_sub.append(jac[b._index, self._index]) d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub))) # Remove the dummies result = d_result.subs(list(zip(d_funcs, base_scalars))) result = result.subs(list(zip(coords, self._coord_sys.coord_functions()))) return result.doit() class Commutator(Expr): r"""Commutator of two vector fields. The commutator of two vector fields `v_1` and `v_2` is defined as the vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal to `v_1(v_2(f)) - v_2(v_1(f))`. Examples ======== Use the predefined R2 manifold, setup some boilerplate. >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import Commutator >>> from sympy import pprint >>> from sympy.simplify import simplify Vector fields: >>> e_x, e_y, e_r = R2.e_x, R2.e_y, R2.e_r >>> c_xy = Commutator(e_x, e_y) >>> c_xr = Commutator(e_x, e_r) >>> c_xy 0 Unfortunately, the current code is not able to compute everything: >>> c_xr Commutator(e_x, e_r) >>> simplify(c_xr(R2.y**2)) -2*y**2*cos(theta)/(x**2 + y**2) """ def __new__(cls, v1, v2): if (covariant_order(v1) or contravariant_order(v1) != 1 or covariant_order(v2) or contravariant_order(v2) != 1): raise ValueError( 'Only commutators of vector fields are supported.') if v1 == v2: return Zero() coord_sys = set().union(*[v.atoms(CoordSystem) for v in (v1, v2)]) if len(coord_sys) == 1: # Only one coordinate systems is used, hence it is easy enough to # actually evaluate the commutator. if all(isinstance(v, BaseVectorField) for v in (v1, v2)): return Zero() bases_1, bases_2 = [list(v.atoms(BaseVectorField)) for v in (v1, v2)] coeffs_1 = [v1.expand().coeff(b) for b in bases_1] coeffs_2 = [v2.expand().coeff(b) for b in bases_2] res = 0 for c1, b1 in zip(coeffs_1, bases_1): for c2, b2 in zip(coeffs_2, bases_2): res += c1*b1(c2)*b2 - c2*b2(c1)*b1 return res else: return super(Commutator, cls).__new__(cls, v1, v2) def __init__(self, v1, v2): super(Commutator, self).__init__() self._args = (v1, v2) self._v1 = v1 self._v2 = v2 def __call__(self, scalar_field): """Apply on a scalar field. If the argument is not a scalar field an error is raised. """ return self._v1(self._v2(scalar_field)) - self._v2(self._v1(scalar_field)) class Differential(Expr): r"""Return the differential (exterior derivative) of a form field. The differential of a form (i.e. the exterior derivative) has a complicated definition in the general case. The differential `df` of the 0-form `f` is defined for any vector field `v` as `df(v) = v(f)`. Examples ======== Use the predefined R2 manifold, setup some boilerplate. >>> from sympy import Function >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import Differential >>> from sympy import pprint Scalar field (0-forms): >>> g = Function('g') >>> s_field = g(R2.x, R2.y) Vector fields: >>> e_x, e_y, = R2.e_x, R2.e_y Differentials: >>> dg = Differential(s_field) >>> dg d(g(x, y)) >>> pprint(dg(e_x)) / d \| |---(g(xi, y))|| \dxi /|xi=x >>> pprint(dg(e_y)) / d \| |---(g(x, xi))|| \dxi /|xi=y Applying the exterior derivative operator twice always results in: >>> Differential(dg) 0 """ is_commutative = False def __new__(cls, form_field): if contravariant_order(form_field): raise ValueError( 'A vector field was supplied as an argument to Differential.') if isinstance(form_field, Differential): return Zero() else: return super(Differential, cls).__new__(cls, form_field) def __init__(self, form_field): super(Differential, self).__init__() self._form_field = form_field self._args = (self._form_field, ) def __call__(self, *vector_fields): """Apply on a list of vector_fields. If the number of vector fields supplied is not equal to 1 + the order of the form field inside the differential the result is undefined. For 1-forms (i.e. differentials of scalar fields) the evaluation is done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector field, the differential is returned unchanged. This is done in order to permit partial contractions for higher forms. In the general case the evaluation is done by applying the form field inside the differential on a list with one less elements than the number of elements in the original list. Lowering the number of vector fields is achieved through replacing each pair of fields by their commutator. If the arguments are not vectors or ``None``s an error is raised. """ if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None for a in vector_fields): raise ValueError('The arguments supplied to Differential should be vector fields or Nones.') k = len(vector_fields) if k == 1: if vector_fields[0]: return vector_fields[0].rcall(self._form_field) return self else: # For higher form it is more complicated: # Invariant formula: # https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula # df(v1, ... vn) = +/- vi(f(v1..no i..vn)) # +/- f([vi,vj],v1..no i, no j..vn) f = self._form_field v = vector_fields ret = 0 for i in range(k): t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:])) ret += (-1)**i*t for j in range(i + 1, k): c = Commutator(v[i], v[j]) if c: # TODO this is ugly - the Commutator can be Zero and # this causes the next line to fail t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:]) ret += (-1)**(i + j)*t return ret class TensorProduct(Expr): """Tensor product of forms. The tensor product permits the creation of multilinear functionals (i.e. higher order tensors) out of lower order fields (e.g. 1-forms and vector fields). However, the higher tensors thus created lack the interesting features provided by the other type of product, the wedge product, namely they are not antisymmetric and hence are not form fields. Examples ======== Use the predefined R2 manifold, setup some boilerplate. >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import TensorProduct >>> TensorProduct(R2.dx, R2.dy)(R2.e_x, R2.e_y) 1 >>> TensorProduct(R2.dx, R2.dy)(R2.e_y, R2.e_x) 0 >>> TensorProduct(R2.dx, R2.x*R2.dy)(R2.x*R2.e_x, R2.e_y) x**2 >>> TensorProduct(R2.e_x, R2.e_y)(R2.x**2, R2.y**2) 4*x*y >>> TensorProduct(R2.e_y, R2.dx)(R2.y) dx You can nest tensor products. >>> tp1 = TensorProduct(R2.dx, R2.dy) >>> TensorProduct(tp1, R2.dx)(R2.e_x, R2.e_y, R2.e_x) 1 You can make partial contraction for instance when 'raising an index'. Putting ``None`` in the second argument of ``rcall`` means that the respective position in the tensor product is left as it is. >>> TP = TensorProduct >>> metric = TP(R2.dx, R2.dx) + 3*TP(R2.dy, R2.dy) >>> metric.rcall(R2.e_y, None) 3*dy Or automatically pad the args with ``None`` without specifying them. >>> metric.rcall(R2.e_y) 3*dy """ def __new__(cls, *args): scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0]) multifields = [m for m in args if covariant_order(m) + contravariant_order(m)] if multifields: if len(multifields) == 1: return scalar*multifields[0] return scalar*super(TensorProduct, cls).__new__(cls, *multifields) else: return scalar def __init__(self, *args): super(TensorProduct, self).__init__() self._args = args def __call__(self, *fields): """Apply on a list of fields. If the number of input fields supplied is not equal to the order of the tensor product field, the list of arguments is padded with ``None``'s. The list of arguments is divided in sublists depending on the order of the forms inside the tensor product. The sublists are provided as arguments to these forms and the resulting expressions are given to the constructor of ``TensorProduct``. """ tot_order = covariant_order(self) + contravariant_order(self) tot_args = len(fields) if tot_args != tot_order: fields = list(fields) + [None]*(tot_order - tot_args) orders = [covariant_order(f) + contravariant_order(f) for f in self._args] indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)] fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])] multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)] return TensorProduct(*multipliers) class WedgeProduct(TensorProduct): """Wedge product of forms. In the context of integration only completely antisymmetric forms make sense. The wedge product permits the creation of such forms. Examples ======== Use the predefined R2 manifold, setup some boilerplate. >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import WedgeProduct >>> WedgeProduct(R2.dx, R2.dy)(R2.e_x, R2.e_y) 1 >>> WedgeProduct(R2.dx, R2.dy)(R2.e_y, R2.e_x) -1 >>> WedgeProduct(R2.dx, R2.x*R2.dy)(R2.x*R2.e_x, R2.e_y) x**2 >>> WedgeProduct(R2.e_x,R2.e_y)(R2.y,None) -e_x You can nest wedge products. >>> wp1 = WedgeProduct(R2.dx, R2.dy) >>> WedgeProduct(wp1, R2.dx)(R2.e_x, R2.e_y, R2.e_x) 0 """ # TODO the calculation of signatures is slow # TODO you do not need all these permutations (neither the prefactor) def __call__(self, *fields): """Apply on a list of vector_fields. The expression is rewritten internally in terms of tensor products and evaluated.""" orders = (covariant_order(e) + contravariant_order(e) for e in self.args) mul = 1/Mul(*(factorial(o) for o in orders)) perms = permutations(fields) perms_par = (Permutation( p).signature() for p in permutations(list(range(len(fields))))) tensor_prod = TensorProduct(*self.args) return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)]) class LieDerivative(Expr): """Lie derivative with respect to a vector field. The transport operator that defines the Lie derivative is the pushforward of the field to be derived along the integral curve of the field with respect to which one derives. Examples ======== >>> from sympy.diffgeom import (LieDerivative, TensorProduct) >>> from sympy.diffgeom.rn import R2 >>> LieDerivative(R2.e_x, R2.y) 0 >>> LieDerivative(R2.e_x, R2.x) 1 >>> LieDerivative(R2.e_x, R2.e_x) 0 The Lie derivative of a tensor field by another tensor field is equal to their commutator: >>> LieDerivative(R2.e_x, R2.e_r) Commutator(e_x, e_r) >>> LieDerivative(R2.e_x + R2.e_y, R2.x) 1 >>> tp = TensorProduct(R2.dx, R2.dy) >>> LieDerivative(R2.e_x, tp) LieDerivative(e_x, TensorProduct(dx, dy)) >>> LieDerivative(R2.e_x, tp) LieDerivative(e_x, TensorProduct(dx, dy)) """ def __new__(cls, v_field, expr): expr_form_ord = covariant_order(expr) if contravariant_order(v_field) != 1 or covariant_order(v_field): raise ValueError('Lie derivatives are defined only with respect to' ' vector fields. The supplied argument was not a ' 'vector field.') if expr_form_ord > 0: return super(LieDerivative, cls).__new__(cls, v_field, expr) if expr.atoms(BaseVectorField): return Commutator(v_field, expr) else: return v_field.rcall(expr) def __init__(self, v_field, expr): super(LieDerivative, self).__init__() self._v_field = v_field self._expr = expr self._args = (self._v_field, self._expr) def __call__(self, *args): v = self._v_field expr = self._expr lead_term = v(expr(*args)) rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:]) for i in range(len(args))]) return lead_term - rest class BaseCovarDerivativeOp(Expr): """Covariant derivative operator with respect to a base vector. Examples ======== >>> from sympy.diffgeom.rn import R2, R2_r >>> from sympy.diffgeom import BaseCovarDerivativeOp >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct >>> TP = TensorProduct >>> ch = metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) >>> ch [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch) >>> cvd(R2.x) 1 >>> cvd(R2.x*R2.e_x) e_x """ def __init__(self, coord_sys, index, christoffel): super(BaseCovarDerivativeOp, self).__init__() self._coord_sys = coord_sys self._index = index self._christoffel = christoffel self._args = self._coord_sys, self._index, self._christoffel def __call__(self, field): """Apply on a scalar field. The action of a vector field on a scalar field is a directional differentiation. If the argument is not a scalar field the behaviour is undefined. """ if covariant_order(field) != 0: raise NotImplementedError() field = vectors_in_basis(field, self._coord_sys) wrt_vector = self._coord_sys.base_vector(self._index) wrt_scalar = self._coord_sys.coord_function(self._index) vectors = list(field.atoms(BaseVectorField)) # First step: replace all vectors with something susceptible to # derivation and do the derivation # TODO: you need a real dummy function for the next line d_funcs = [Function('_#_%s' % i)(wrt_scalar) for i, b in enumerate(vectors)] d_result = field.subs(list(zip(vectors, d_funcs))) d_result = wrt_vector(d_result) # Second step: backsubstitute the vectors in d_result = d_result.subs(list(zip(d_funcs, vectors))) # Third step: evaluate the derivatives of the vectors derivs = [] for v in vectors: d = Add(*[(self._christoffel[k, wrt_vector._index, v._index] *v._coord_sys.base_vector(k)) for k in range(v._coord_sys.dim)]) derivs.append(d) to_subs = [wrt_vector(d) for d in d_funcs] # XXX: This substitution can fail when there are Dummy symbols and the # cache is disabled: https://github.com/sympy/sympy/issues/17794 result = d_result.subs(list(zip(to_subs, derivs))) # Remove the dummies result = result.subs(list(zip(d_funcs, vectors))) return result.doit() class CovarDerivativeOp(Expr): """Covariant derivative operator. Examples ======== >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import CovarDerivativeOp >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct >>> TP = TensorProduct >>> ch = metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) >>> ch [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> cvd = CovarDerivativeOp(R2.x*R2.e_x, ch) >>> cvd(R2.x) x >>> cvd(R2.x*R2.e_x) x*e_x """ def __init__(self, wrt, christoffel): super(CovarDerivativeOp, self).__init__() if len(set(v._coord_sys for v in wrt.atoms(BaseVectorField))) > 1: raise NotImplementedError() if contravariant_order(wrt) != 1 or covariant_order(wrt): raise ValueError('Covariant derivatives are defined only with ' 'respect to vector fields. The supplied argument ' 'was not a vector field.') self._wrt = wrt self._christoffel = christoffel self._args = self._wrt, self._christoffel def __call__(self, field): vectors = list(self._wrt.atoms(BaseVectorField)) base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel) for v in vectors] return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field) def _latex(self, printer, *args): return r'\mathbb{\nabla}_{%s}' % printer._print(self._wrt) ############################################################################### # Integral curves on vector fields ############################################################################### def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False): r"""Return the series expansion for an integral curve of the field. Integral curve is a function `\gamma` taking a parameter in `R` to a point in the manifold. It verifies the equation: `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` where the given ``vector_field`` is denoted as `V`. This holds for any value `t` for the parameter and any scalar field `f`. This equation can also be decomposed of a basis of coordinate functions `V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i` This function returns a series expansion of `\gamma(t)` in terms of the coordinate system ``coord_sys``. The equations and expansions are necessarily done in coordinate-system-dependent way as there is no other way to represent movement between points on the manifold (i.e. there is no such thing as a difference of points for a general manifold). See Also ======== intcurve_diffequ Parameters ========== vector_field the vector field for which an integral curve will be given param the argument of the function `\gamma` from R to the curve start_point the point which corresponds to `\gamma(0)` n the order to which to expand coord_sys the coordinate system in which to expand coeffs (default False) - if True return a list of elements of the expansion Examples ======== Use the predefined R2 manifold: >>> from sympy.abc import t, x, y >>> from sympy.diffgeom.rn import R2, R2_p, R2_r >>> from sympy.diffgeom import intcurve_series Specify a starting point and a vector field: >>> start_point = R2_r.point([x, y]) >>> vector_field = R2_r.e_x Calculate the series: >>> intcurve_series(vector_field, t, start_point, n=3) Matrix([ [t + x], [ y]]) Or get the elements of the expansion in a list: >>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True) >>> series[0] Matrix([ [x], [y]]) >>> series[1] Matrix([ [t], [0]]) >>> series[2] Matrix([ [0], [0]]) The series in the polar coordinate system: >>> series = intcurve_series(vector_field, t, start_point, ... n=3, coord_sys=R2_p, coeffs=True) >>> series[0] Matrix([ [sqrt(x**2 + y**2)], [ atan2(y, x)]]) >>> series[1] Matrix([ [t*x/sqrt(x**2 + y**2)], [ -t*y/(x**2 + y**2)]]) >>> series[2] Matrix([ [t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2], [ t**2*x*y/(x**2 + y**2)**2]]) """ if contravariant_order(vector_field) != 1 or covariant_order(vector_field): raise ValueError('The supplied field was not a vector field.') def iter_vfield(scalar_field, i): """Return ``vector_field`` called `i` times on ``scalar_field``.""" return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field) def taylor_terms_per_coord(coord_function): """Return the series for one of the coordinates.""" return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i) for i in range(n)] coord_sys = coord_sys if coord_sys else start_point._coord_sys coord_functions = coord_sys.coord_functions() taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions] if coeffs: return [Matrix(t) for t in zip(*taylor_terms)] else: return Matrix([sum(c) for c in taylor_terms]) def intcurve_diffequ(vector_field, param, start_point, coord_sys=None): r"""Return the differential equation for an integral curve of the field. Integral curve is a function `\gamma` taking a parameter in `R` to a point in the manifold. It verifies the equation: `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` where the given ``vector_field`` is denoted as `V`. This holds for any value `t` for the parameter and any scalar field `f`. This function returns the differential equation of `\gamma(t)` in terms of the coordinate system ``coord_sys``. The equations and expansions are necessarily done in coordinate-system-dependent way as there is no other way to represent movement between points on the manifold (i.e. there is no such thing as a difference of points for a general manifold). See Also ======== intcurve_series Parameters ========== vector_field the vector field for which an integral curve will be given param the argument of the function `\gamma` from R to the curve start_point the point which corresponds to `\gamma(0)` coord_sys the coordinate system in which to give the equations Returns ======= a tuple of (equations, initial conditions) Examples ======== Use the predefined R2 manifold: >>> from sympy.abc import t >>> from sympy.diffgeom.rn import R2, R2_p, R2_r >>> from sympy.diffgeom import intcurve_diffequ Specify a starting point and a vector field: >>> start_point = R2_r.point([0, 1]) >>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y Get the equation: >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point) >>> equations [f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)] >>> init_cond [f_0(0), f_1(0) - 1] The series in the polar coordinate system: >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p) >>> equations [Derivative(f_0(t), t), Derivative(f_1(t), t) - 1] >>> init_cond [f_0(0) - 1, f_1(0) - pi/2] """ if contravariant_order(vector_field) != 1 or covariant_order(vector_field): raise ValueError('The supplied field was not a vector field.') coord_sys = coord_sys if coord_sys else start_point._coord_sys gammas = [Function('f_%d' % i)(param) for i in range( start_point._coord_sys.dim)] arbitrary_p = Point(coord_sys, gammas) coord_functions = coord_sys.coord_functions() equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p)) for cf in coord_functions] init_cond = [simplify(cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point)) for cf in coord_functions] return equations, init_cond ############################################################################### # Helpers ############################################################################### def dummyfy(args, exprs): # TODO Is this a good idea? d_args = Matrix([s.as_dummy() for s in args]) reps = dict(zip(args, d_args)) d_exprs = Matrix([sympify(expr).subs(reps) for expr in exprs]) return d_args, d_exprs ############################################################################### # Helpers ############################################################################### def contravariant_order(expr, _strict=False): """Return the contravariant order of an expression. Examples ======== >>> from sympy.diffgeom import contravariant_order >>> from sympy.diffgeom.rn import R2 >>> from sympy.abc import a >>> contravariant_order(a) 0 >>> contravariant_order(a*R2.x + 2) 0 >>> contravariant_order(a*R2.x*R2.e_y + R2.e_x) 1 """ # TODO move some of this to class methods. # TODO rewrite using the .as_blah_blah methods if isinstance(expr, Add): orders = [contravariant_order(e) for e in expr.args] if len(set(orders)) != 1: raise ValueError('Misformed expression containing contravariant fields of varying order.') return orders[0] elif isinstance(expr, Mul): orders = [contravariant_order(e) for e in expr.args] not_zero = [o for o in orders if o != 0] if len(not_zero) > 1: raise ValueError('Misformed expression containing multiplication between vectors.') return 0 if not not_zero else not_zero[0] elif isinstance(expr, Pow): if covariant_order(expr.base) or covariant_order(expr.exp): raise ValueError( 'Misformed expression containing a power of a vector.') return 0 elif isinstance(expr, BaseVectorField): return 1 elif isinstance(expr, TensorProduct): return sum(contravariant_order(a) for a in expr.args) elif not _strict or expr.atoms(BaseScalarField): return 0 else: # If it does not contain anything related to the diffgeom module and it is _strict return -1 def covariant_order(expr, _strict=False): """Return the covariant order of an expression. Examples ======== >>> from sympy.diffgeom import covariant_order >>> from sympy.diffgeom.rn import R2 >>> from sympy.abc import a >>> covariant_order(a) 0 >>> covariant_order(a*R2.x + 2) 0 >>> covariant_order(a*R2.x*R2.dy + R2.dx) 1 """ # TODO move some of this to class methods. # TODO rewrite using the .as_blah_blah methods if isinstance(expr, Add): orders = [covariant_order(e) for e in expr.args] if len(set(orders)) != 1: raise ValueError('Misformed expression containing form fields of varying order.') return orders[0] elif isinstance(expr, Mul): orders = [covariant_order(e) for e in expr.args] not_zero = [o for o in orders if o != 0] if len(not_zero) > 1: raise ValueError('Misformed expression containing multiplication between forms.') return 0 if not not_zero else not_zero[0] elif isinstance(expr, Pow): if covariant_order(expr.base) or covariant_order(expr.exp): raise ValueError( 'Misformed expression containing a power of a form.') return 0 elif isinstance(expr, Differential): return covariant_order(*expr.args) + 1 elif isinstance(expr, TensorProduct): return sum(covariant_order(a) for a in expr.args) elif not _strict or expr.atoms(BaseScalarField): return 0 else: # If it does not contain anything related to the diffgeom module and it is _strict return -1 ############################################################################### # Coordinate transformation functions ############################################################################### def vectors_in_basis(expr, to_sys): """Transform all base vectors in base vectors of a specified coord basis. While the new base vectors are in the new coordinate system basis, any coefficients are kept in the old system. Examples ======== >>> from sympy.diffgeom import vectors_in_basis >>> from sympy.diffgeom.rn import R2_r, R2_p >>> vectors_in_basis(R2_r.e_x, R2_p) x*e_r/sqrt(x**2 + y**2) - y*e_theta/(x**2 + y**2) >>> vectors_in_basis(R2_p.e_r, R2_r) sin(theta)*e_y + cos(theta)*e_x """ vectors = list(expr.atoms(BaseVectorField)) new_vectors = [] for v in vectors: cs = v._coord_sys jac = cs.jacobian(to_sys, cs.coord_functions()) new = (jac.T*Matrix(to_sys.base_vectors()))[v._index] new_vectors.append(new) return expr.subs(list(zip(vectors, new_vectors))) ############################################################################### # Coordinate-dependent functions ############################################################################### def twoform_to_matrix(expr): """Return the matrix representing the twoform. For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`, where `e_i` is the i-th base vector field for the coordinate system in which the expression of `w` is given. Examples ======== >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import twoform_to_matrix, TensorProduct >>> TP = TensorProduct >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) Matrix([ [1, 0], [0, 1]]) >>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) Matrix([ [x, 0], [0, 1]]) >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2) Matrix([ [ 1, 0], [-1/2, 1]]) """ if covariant_order(expr) != 2 or contravariant_order(expr): raise ValueError('The input expression is not a two-form.') coord_sys = expr.atoms(CoordSystem) if len(coord_sys) != 1: raise ValueError('The input expression concerns more than one ' 'coordinate systems, hence there is no unambiguous ' 'way to choose a coordinate system for the matrix.') coord_sys = coord_sys.pop() vectors = coord_sys.base_vectors() expr = expr.expand() matrix_content = [[expr.rcall(v1, v2) for v1 in vectors] for v2 in vectors] return Matrix(matrix_content) def metric_to_Christoffel_1st(expr): """Return the nested list of Christoffel symbols for the given metric. This returns the Christoffel symbol of first kind that represents the Levi-Civita connection for the given metric. Examples ======== >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Christoffel_1st, TensorProduct >>> TP = TensorProduct >>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]] """ matrix = twoform_to_matrix(expr) if not matrix.is_symmetric(): raise ValueError( 'The two-form representing the metric is not symmetric.') coord_sys = expr.atoms(CoordSystem).pop() deriv_matrices = [matrix.applyfunc(lambda a: d(a)) for d in coord_sys.base_vectors()] indices = list(range(coord_sys.dim)) christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2 for k in indices] for j in indices] for i in indices] return ImmutableDenseNDimArray(christoffel) def metric_to_Christoffel_2nd(expr): """Return the nested list of Christoffel symbols for the given metric. This returns the Christoffel symbol of second kind that represents the Levi-Civita connection for the given metric. Examples ======== >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct >>> TP = TensorProduct >>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]] """ ch_1st = metric_to_Christoffel_1st(expr) coord_sys = expr.atoms(CoordSystem).pop() indices = list(range(coord_sys.dim)) # XXX workaround, inverting a matrix does not work if it contains non # symbols #matrix = twoform_to_matrix(expr).inv() matrix = twoform_to_matrix(expr) s_fields = set() for e in matrix: s_fields.update(e.atoms(BaseScalarField)) s_fields = list(s_fields) dums = coord_sys._dummies matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields))) # XXX end of workaround christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices]) for k in indices] for j in indices] for i in indices] return ImmutableDenseNDimArray(christoffel) def metric_to_Riemann_components(expr): """Return the components of the Riemann tensor expressed in a given basis. Given a metric it calculates the components of the Riemann tensor in the canonical basis of the coordinate system in which the metric expression is given. Examples ======== >>> from sympy import exp >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Riemann_components, TensorProduct >>> TP = TensorProduct >>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]] >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ R2.r**2*TP(R2.dtheta, R2.dtheta) >>> non_trivial_metric r**2*TensorProduct(dtheta, dtheta) + exp(2*r)*TensorProduct(dr, dr) >>> riemann = metric_to_Riemann_components(non_trivial_metric) >>> riemann[0, :, :, :] [[[0, 0], [0, 0]], [[0, r*exp(-2*r)], [-r*exp(-2*r), 0]]] >>> riemann[1, :, :, :] [[[0, -1/r], [1/r, 0]], [[0, 0], [0, 0]]] """ ch_2nd = metric_to_Christoffel_2nd(expr) coord_sys = expr.atoms(CoordSystem).pop() indices = list(range(coord_sys.dim)) deriv_ch = [[[[d(ch_2nd[i, j, k]) for d in coord_sys.base_vectors()] for k in indices] for j in indices] for i in indices] riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu] for nu in indices] for mu in indices] for sig in indices] for rho in indices] riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices]) for nu in indices] for mu in indices] for sig in indices] for rho in indices] riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu] for nu in indices] for mu in indices] for sig in indices] for rho in indices] return ImmutableDenseNDimArray(riemann) def metric_to_Ricci_components(expr): """Return the components of the Ricci tensor expressed in a given basis. Given a metric it calculates the components of the Ricci tensor in the canonical basis of the coordinate system in which the metric expression is given. Examples ======== >>> from sympy import exp >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Ricci_components, TensorProduct >>> TP = TensorProduct >>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[0, 0], [0, 0]] >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ R2.r**2*TP(R2.dtheta, R2.dtheta) >>> non_trivial_metric r**2*TensorProduct(dtheta, dtheta) + exp(2*r)*TensorProduct(dr, dr) >>> metric_to_Ricci_components(non_trivial_metric) [[1/r, 0], [0, r*exp(-2*r)]] """ riemann = metric_to_Riemann_components(expr) coord_sys = expr.atoms(CoordSystem).pop() indices = list(range(coord_sys.dim)) ricci = [[Add(*[riemann[k, i, k, j] for k in indices]) for j in indices] for i in indices] return ImmutableDenseNDimArray(ricci)
590bec00e4b08fff1e97e759ba4ffcbf5710b11de89a2fd7f4907f48de5fb9b5
""" AST nodes specific to C++. """ from sympy.codegen.ast import Attribute, String, Token, Type, none class using(Token): """ Represents a 'using' statement in C++ """ __slots__ = ('type', 'alias') defaults = {'alias': none} _construct_type = Type _construct_alias = String constexpr = Attribute('constexpr')
fd42865731cf146684e52c3615b1b3c124ec7145445a1d96cb4300acdf38a210
""" AST nodes specific to the C family of languages """ from sympy.codegen.ast import Attribute, Declaration, Node, String, Token, Type, none, FunctionCall from sympy.core.basic import Basic from sympy.core.containers import Tuple from sympy.core.sympify import sympify void = Type('void') restrict = Attribute('restrict') # guarantees no pointer aliasing volatile = Attribute('volatile') static = Attribute('static') def alignof(arg): """ Generate of FunctionCall instance for calling 'alignof' """ return FunctionCall('alignof', [String(arg) if isinstance(arg, str) else arg]) def sizeof(arg): """ Generate of FunctionCall instance for calling 'sizeof' Examples ======== >>> from sympy.codegen.ast import real >>> from sympy.codegen.cnodes import sizeof >>> from sympy.printing.ccode import ccode >>> ccode(sizeof(real)) 'sizeof(double)' """ return FunctionCall('sizeof', [String(arg) if isinstance(arg, str) else arg]) class CommaOperator(Basic): """ Represents the comma operator in C """ def __new__(cls, *args): return Basic.__new__(cls, *[sympify(arg) for arg in args]) class Label(String): """ Label for use with e.g. goto statement. Examples ======== >>> from sympy.codegen.cnodes import Label >>> from sympy.printing.ccode import ccode >>> print(ccode(Label('foo'))) foo: """ class goto(Token): """ Represents goto in C """ __slots__ = ('label',) _construct_label = Label class PreDecrement(Basic): """ Represents the pre-decrement operator Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cnodes import PreDecrement >>> from sympy.printing.ccode import ccode >>> ccode(PreDecrement(x)) '--(x)' """ nargs = 1 class PostDecrement(Basic): """ Represents the post-decrement operator """ nargs = 1 class PreIncrement(Basic): """ Represents the pre-increment operator """ nargs = 1 class PostIncrement(Basic): """ Represents the post-increment operator """ nargs = 1 class struct(Node): """ Represents a struct in C """ __slots__ = ('name', 'declarations') defaults = {'name': none} _construct_name = String @classmethod def _construct_declarations(cls, args): return Tuple(*[Declaration(arg) for arg in args]) class union(struct): """ Represents a union in C """
ec14dfa0e58d891286f736a91f47ce85761ba7cef280aa1705fd2a2ef2d8073d
""" AST nodes specific to Fortran. The functions defined in this module allows the user to express functions such as ``dsign`` as a SymPy function for symbolic manipulation. """ from sympy.codegen.ast import ( Attribute, CodeBlock, FunctionCall, Node, none, String, Token, _mk_Tuple, Variable ) from sympy.core.basic import Basic from sympy.core.containers import Tuple from sympy.core.expr import Expr from sympy.core.function import Function from sympy.core.numbers import Float, Integer from sympy.core.sympify import sympify from sympy.logic import true, false from sympy.utilities.iterables import iterable pure = Attribute('pure') elemental = Attribute('elemental') # (all elemental procedures are also pure) intent_in = Attribute('intent_in') intent_out = Attribute('intent_out') intent_inout = Attribute('intent_inout') allocatable = Attribute('allocatable') class Program(Token): """ Represents a 'program' block in Fortran Examples ======== >>> from sympy.codegen.ast import Print >>> from sympy.codegen.fnodes import Program >>> prog = Program('myprogram', [Print([42])]) >>> from sympy.printing import fcode >>> print(fcode(prog, source_format='free')) program myprogram print *, 42 end program """ __slots__ = ('name', 'body') _construct_name = String _construct_body = staticmethod(lambda body: CodeBlock(*body)) class use_rename(Token): """ Represents a renaming in a use statement in Fortran Examples ======== >>> from sympy.codegen.fnodes import use_rename, use >>> from sympy.printing import fcode >>> ren = use_rename("thingy", "convolution2d") >>> print(fcode(ren, source_format='free')) thingy => convolution2d >>> full = use('signallib', only=['snr', ren]) >>> print(fcode(full, source_format='free')) use signallib, only: snr, thingy => convolution2d """ __slots__ = ('local', 'original') _construct_local = String _construct_original = String def _name(arg): if hasattr(arg, 'name'): return arg.name else: return String(arg) class use(Token): """ Represents a use statement in Fortran Examples ======== >>> from sympy.codegen.fnodes import use >>> from sympy.printing import fcode >>> fcode(use('signallib'), source_format='free') 'use signallib' >>> fcode(use('signallib', [('metric', 'snr')]), source_format='free') 'use signallib, metric => snr' >>> fcode(use('signallib', only=['snr', 'convolution2d']), source_format='free') 'use signallib, only: snr, convolution2d' """ __slots__ = ('namespace', 'rename', 'only') defaults = {'rename': none, 'only': none} _construct_namespace = staticmethod(_name) _construct_rename = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else use_rename(*arg) for arg in args])) _construct_only = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else _name(arg) for arg in args])) class Module(Token): """ Represents a module in Fortran Examples ======== >>> from sympy.codegen.fnodes import Module >>> from sympy.printing import fcode >>> print(fcode(Module('signallib', ['implicit none'], []), source_format='free')) module signallib implicit none <BLANKLINE> contains <BLANKLINE> <BLANKLINE> end module """ __slots__ = ('name', 'declarations', 'definitions') defaults = {'declarations': Tuple()} _construct_name = String _construct_declarations = staticmethod(lambda arg: CodeBlock(*arg)) _construct_definitions = staticmethod(lambda arg: CodeBlock(*arg)) class Subroutine(Node): """ Represents a subroutine in Fortran Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import Print >>> from sympy.codegen.fnodes import Subroutine >>> from sympy.printing import fcode >>> x, y = symbols('x y', real=True) >>> sub = Subroutine('mysub', [x, y], [Print([x**2 + y**2, x*y])]) >>> print(fcode(sub, source_format='free', standard=2003)) subroutine mysub(x, y) real*8 :: x real*8 :: y print *, x**2 + y**2, x*y end subroutine """ __slots__ = ('name', 'parameters', 'body', 'attrs') _construct_name = String _construct_parameters = staticmethod(lambda params: Tuple(*map(Variable.deduced, params))) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) class SubroutineCall(Token): """ Represents a call to a subroutine in Fortran Examples ======== >>> from sympy.codegen.fnodes import SubroutineCall >>> from sympy.printing import fcode >>> fcode(SubroutineCall('mysub', 'x y'.split())) ' call mysub(x, y)' """ __slots__ = ('name', 'subroutine_args') _construct_name = staticmethod(_name) _construct_subroutine_args = staticmethod(_mk_Tuple) class Do(Token): """ Represents a Do loop in in Fortran Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import aug_assign, Print >>> from sympy.codegen.fnodes import Do >>> from sympy.printing import fcode >>> i, n = symbols('i n', integer=True) >>> r = symbols('r', real=True) >>> body = [aug_assign(r, '+', 1/i), Print([i, r])] >>> do1 = Do(body, i, 1, n) >>> print(fcode(do1, source_format='free')) do i = 1, n r = r + 1d0/i print *, i, r end do >>> do2 = Do(body, i, 1, n, 2) >>> print(fcode(do2, source_format='free')) do i = 1, n, 2 r = r + 1d0/i print *, i, r end do """ __slots__ = ('body', 'counter', 'first', 'last', 'step', 'concurrent') defaults = {'step': Integer(1), 'concurrent': false} _construct_body = staticmethod(lambda body: CodeBlock(*body)) _construct_counter = staticmethod(sympify) _construct_first = staticmethod(sympify) _construct_last = staticmethod(sympify) _construct_step = staticmethod(sympify) _construct_concurrent = staticmethod(lambda arg: true if arg else false) class ArrayConstructor(Token): """ Represents an array constructor Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import ArrayConstructor >>> ac = ArrayConstructor([1, 2, 3]) >>> fcode(ac, standard=95, source_format='free') '(/1, 2, 3/)' >>> fcode(ac, standard=2003, source_format='free') '[1, 2, 3]' """ __slots__ = ('elements',) _construct_elements = staticmethod(_mk_Tuple) class ImpliedDoLoop(Token): """ Represents an implied do loop in Fortran Examples ======== >>> from sympy import Symbol, fcode >>> from sympy.codegen.fnodes import ImpliedDoLoop, ArrayConstructor >>> i = Symbol('i', integer=True) >>> idl = ImpliedDoLoop(i**3, i, -3, 3, 2) # -27, -1, 1, 27 >>> ac = ArrayConstructor([-28, idl, 28]) # -28, -27, -1, 1, 27, 28 >>> fcode(ac, standard=2003, source_format='free') '[-28, (i**3, i = -3, 3, 2), 28]' """ __slots__ = ('expr', 'counter', 'first', 'last', 'step') defaults = {'step': Integer(1)} _construct_expr = staticmethod(sympify) _construct_counter = staticmethod(sympify) _construct_first = staticmethod(sympify) _construct_last = staticmethod(sympify) _construct_step = staticmethod(sympify) class Extent(Basic): """ Represents a dimension extent. Examples ======== >>> from sympy.codegen.fnodes import Extent >>> e = Extent(-3, 3) # -3, -2, -1, 0, 1, 2, 3 >>> from sympy.printing import fcode >>> fcode(e, source_format='free') '-3:3' >>> from sympy.codegen.ast import Variable, real >>> from sympy.codegen.fnodes import dimension, intent_out >>> dim = dimension(e, e) >>> arr = Variable('x', real, attrs=[dim, intent_out]) >>> fcode(arr.as_Declaration(), source_format='free', standard=2003) 'real*8, dimension(-3:3, -3:3), intent(out) :: x' """ def __new__(cls, *args): if len(args) == 2: low, high = args return Basic.__new__(cls, sympify(low), sympify(high)) elif len(args) == 0 or (len(args) == 1 and args[0] in (':', None)): return Basic.__new__(cls) # assumed shape else: raise ValueError("Expected 0 or 2 args (or one argument == None or ':')") def _sympystr(self, printer): if len(self.args) == 0: return ':' return '%d:%d' % self.args assumed_extent = Extent() # or Extent(':'), Extent(None) def dimension(*args): """ Creates a 'dimension' Attribute with (up to 7) extents. Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import dimension, intent_in >>> dim = dimension('2', ':') # 2 rows, runtime determined number of columns >>> from sympy.codegen.ast import Variable, integer >>> arr = Variable('a', integer, attrs=[dim, intent_in]) >>> fcode(arr.as_Declaration(), source_format='free', standard=2003) 'integer*4, dimension(2, :), intent(in) :: a' """ if len(args) > 7: raise ValueError("Fortran only supports up to 7 dimensional arrays") parameters = [] for arg in args: if isinstance(arg, Extent): parameters.append(arg) elif isinstance(arg, str): if arg == ':': parameters.append(Extent()) else: parameters.append(String(arg)) elif iterable(arg): parameters.append(Extent(*arg)) else: parameters.append(sympify(arg)) if len(args) == 0: raise ValueError("Need at least one dimension") return Attribute('dimension', parameters) assumed_size = dimension('*') def array(symbol, dim, intent=None, **kwargs): """ Convenience function for creating a Variable instance for a Fortran array Parameters ========== symbol : symbol dim : Attribute or iterable If dim is an ``Attribute`` it need to have the name 'dimension'. If it is not an ``Attribute``, then it is passsed to :func:`dimension` as ``*dim`` intent : str One of: 'in', 'out', 'inout' or None \\*\\*kwargs: Keyword arguments for ``Variable`` ('type' & 'value') Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.ast import integer, real >>> from sympy.codegen.fnodes import array >>> arr = array('a', '*', 'in', type=integer) >>> print(fcode(arr.as_Declaration(), source_format='free', standard=2003)) integer*4, dimension(*), intent(in) :: a >>> x = array('x', [3, ':', ':'], intent='out', type=real) >>> print(fcode(x.as_Declaration(value=1), source_format='free', standard=2003)) real*8, dimension(3, :, :), intent(out) :: x = 1 """ if isinstance(dim, Attribute): if str(dim.name) != 'dimension': raise ValueError("Got an unexpected Attribute argument as dim: %s" % str(dim)) else: dim = dimension(*dim) attrs = list(kwargs.pop('attrs', [])) + [dim] if intent is not None: if intent not in (intent_in, intent_out, intent_inout): intent = {'in': intent_in, 'out': intent_out, 'inout': intent_inout}[intent] attrs.append(intent) value = kwargs.pop('value', None) type_ = kwargs.pop('type', None) if type_ is None: return Variable.deduced(symbol, value=value, attrs=attrs) else: return Variable(symbol, type_, value=value, attrs=attrs) def _printable(arg): return String(arg) if isinstance(arg, str) else sympify(arg) def allocated(array): """ Creates an AST node for a function call to Fortran's "allocated(...)" Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import allocated >>> alloc = allocated('x') >>> fcode(alloc, source_format='free') 'allocated(x)' """ return FunctionCall('allocated', [_printable(array)]) def lbound(array, dim=None, kind=None): """ Creates an AST node for a function call to Fortran's "lbound(...)" Parameters ========== array : Symbol or String dim : expr kind : expr Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import lbound >>> lb = lbound('arr', dim=2) >>> fcode(lb, source_format='free') 'lbound(arr, 2)' """ return FunctionCall( 'lbound', [_printable(array)] + ([_printable(dim)] if dim else []) + ([_printable(kind)] if kind else []) ) def ubound(array, dim=None, kind=None): return FunctionCall( 'ubound', [_printable(array)] + ([_printable(dim)] if dim else []) + ([_printable(kind)] if kind else []) ) def shape(source, kind=None): """ Creates an AST node for a function call to Fortran's "shape(...)" Parameters ========== source : Symbol or String kind : expr Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import shape >>> shp = shape('x') >>> fcode(shp, source_format='free') 'shape(x)' """ return FunctionCall( 'shape', [_printable(source)] + ([_printable(kind)] if kind else []) ) def size(array, dim=None, kind=None): """ Creates an AST node for a function call to Fortran's "size(...)" Examples ======== >>> from sympy import Symbol >>> from sympy.printing import fcode >>> from sympy.codegen.ast import FunctionDefinition, real, Return, Variable >>> from sympy.codegen.fnodes import array, sum_, size >>> a = Symbol('a', real=True) >>> body = [Return((sum_(a**2)/size(a))**.5)] >>> arr = array(a, dim=[':'], intent='in') >>> fd = FunctionDefinition(real, 'rms', [arr], body) >>> print(fcode(fd, source_format='free', standard=2003)) real*8 function rms(a) real*8, dimension(:), intent(in) :: a rms = sqrt(sum(a**2)*1d0/size(a)) end function """ return FunctionCall( 'size', [_printable(array)] + ([_printable(dim)] if dim else []) + ([_printable(kind)] if kind else []) ) def reshape(source, shape, pad=None, order=None): """ Creates an AST node for a function call to Fortran's "reshape(...)" Parameters ========== source : Symbol or String shape : ArrayExpr """ return FunctionCall( 'reshape', [_printable(source), _printable(shape)] + ([_printable(pad)] if pad else []) + ([_printable(order)] if pad else []) ) def bind_C(name=None): """ Creates an Attribute ``bind_C`` with a name Parameters ========== name : str Examples ======== >>> from sympy import Symbol >>> from sympy.printing import fcode >>> from sympy.codegen.ast import FunctionDefinition, real, Return, Variable >>> from sympy.codegen.fnodes import array, sum_, size, bind_C >>> a = Symbol('a', real=True) >>> s = Symbol('s', integer=True) >>> arr = array(a, dim=[s], intent='in') >>> body = [Return((sum_(a**2)/s)**.5)] >>> fd = FunctionDefinition(real, 'rms', [arr, s], body, attrs=[bind_C('rms')]) >>> print(fcode(fd, source_format='free', standard=2003)) real*8 function rms(a, s) bind(C, name="rms") real*8, dimension(s), intent(in) :: a integer*4 :: s rms = sqrt(sum(a**2)/s) end function """ return Attribute('bind_C', [String(name)] if name else []) class GoTo(Token): """ Represents a goto statement in Fortran Examples ======== >>> from sympy.codegen.fnodes import GoTo >>> go = GoTo([10, 20, 30], 'i') >>> from sympy.printing import fcode >>> fcode(go, source_format='free') 'go to (10, 20, 30), i' """ __slots__ = ('labels', 'expr') defaults = {'expr': none} _construct_labels = staticmethod(_mk_Tuple) _construct_expr = staticmethod(sympify) class FortranReturn(Token): """ AST node explicitly mapped to a fortran "return". Because a return statement in fortran is different from C, and in order to aid reuse of our codegen ASTs the ordinary ``.codegen.ast.Return`` is interpreted as assignment to the result variable of the function. If one for some reason needs to generate a fortran RETURN statement, this node should be used. Examples ======== >>> from sympy.codegen.fnodes import FortranReturn >>> from sympy.printing import fcode >>> fcode(FortranReturn('x')) ' return x' """ __slots__ = ('return_value',) defaults = {'return_value': none} _construct_return_value = staticmethod(sympify) class FFunction(Function): _required_standard = 77 def _fcode(self, printer): name = self.__class__.__name__ if printer._settings['standard'] < self._required_standard: raise NotImplementedError("%s requires Fortran %d or newer" % (name, self._required_standard)) return '{0}({1})'.format(name, ', '.join(map(printer._print, self.args))) class F95Function(FFunction): _required_standard = 95 class isign(FFunction): """ Fortran sign intrinsic for integer arguments. """ nargs = 2 class dsign(FFunction): """ Fortran sign intrinsic for double precision arguments. """ nargs = 2 class cmplx(FFunction): """ Fortran complex conversion function. """ nargs = 2 # may be extended to (2, 3) at a later point class kind(FFunction): """ Fortran kind function. """ nargs = 1 class merge(F95Function): """ Fortran merge function """ nargs = 3 class _literal(Float): _token = None # type: str _decimals = None # type: int def _fcode(self, printer, *args, **kwargs): mantissa, sgnd_ex = ('%.{0}e'.format(self._decimals) % self).split('e') mantissa = mantissa.strip('0').rstrip('.') ex_sgn, ex_num = sgnd_ex[0], sgnd_ex[1:].lstrip('0') ex_sgn = '' if ex_sgn == '+' else ex_sgn return (mantissa or '0') + self._token + ex_sgn + (ex_num or '0') class literal_sp(_literal): """ Fortran single precision real literal """ _token = 'e' _decimals = 9 class literal_dp(_literal): """ Fortran double precision real literal """ _token = 'd' _decimals = 17 class sum_(Token, Expr): __slots__ = ('array', 'dim', 'mask') defaults = {'dim': none, 'mask': none} _construct_array = staticmethod(sympify) _construct_dim = staticmethod(sympify) class product_(Token, Expr): __slots__ = ('array', 'dim', 'mask') defaults = {'dim': none, 'mask': none} _construct_array = staticmethod(sympify) _construct_dim = staticmethod(sympify)
6feb4036b61a3bb042e77f08949b8a46f785ebfd98397d8beee3d0254e0447f7
""" Types used to represent a full function/module as an Abstract Syntax Tree. Most types are small, and are merely used as tokens in the AST. A tree diagram has been included below to illustrate the relationships between the AST types. AST Type Tree ------------- :: *Basic* |--->AssignmentBase | |--->Assignment | |--->AugmentedAssignment | |--->AddAugmentedAssignment | |--->SubAugmentedAssignment | |--->MulAugmentedAssignment | |--->DivAugmentedAssignment | |--->ModAugmentedAssignment | |--->CodeBlock | | |--->Token | |--->Attribute | |--->For | |--->String | | |--->QuotedString | | |--->Comment | |--->Type | | |--->IntBaseType | | | |--->_SizedIntType | | | |--->SignedIntType | | | |--->UnsignedIntType | | |--->FloatBaseType | | |--->FloatType | | |--->ComplexBaseType | | |--->ComplexType | |--->Node | | |--->Variable | | | |---> Pointer | | |--->FunctionPrototype | | |--->FunctionDefinition | |--->Element | |--->Declaration | |--->While | |--->Scope | |--->Stream | |--->Print | |--->FunctionCall | |--->BreakToken | |--->ContinueToken | |--->NoneToken | |--->Statement |--->Return Predefined types ---------------- A number of ``Type`` instances are provided in the ``sympy.codegen.ast`` module for convenience. Perhaps the two most common ones for code-generation (of numeric codes) are ``float32`` and ``float64`` (known as single and double precision respectively). There are also precision generic versions of Types (for which the codeprinters selects the underlying data type at time of printing): ``real``, ``integer``, ``complex_``, ``bool_``. The other ``Type`` instances defined are: - ``intc``: Integer type used by C's "int". - ``intp``: Integer type used by C's "unsigned". - ``int8``, ``int16``, ``int32``, ``int64``: n-bit integers. - ``uint8``, ``uint16``, ``uint32``, ``uint64``: n-bit unsigned integers. - ``float80``: known as "extended precision" on modern x86/amd64 hardware. - ``complex64``: Complex number represented by two ``float32`` numbers - ``complex128``: Complex number represented by two ``float64`` numbers Using the nodes --------------- It is possible to construct simple algorithms using the AST nodes. Let's construct a loop applying Newton's method:: >>> from sympy import symbols, cos >>> from sympy.codegen.ast import While, Assignment, aug_assign, Print >>> t, dx, x = symbols('tol delta val') >>> expr = cos(x) - x**3 >>> whl = While(abs(dx) > t, [ ... Assignment(dx, -expr/expr.diff(x)), ... aug_assign(x, '+', dx), ... Print([x]) ... ]) >>> from sympy.printing import pycode >>> py_str = pycode(whl) >>> print(py_str) while (abs(delta) > tol): delta = (val**3 - math.cos(val))/(-3*val**2 - math.sin(val)) val += delta print(val) >>> import math >>> tol, val, delta = 1e-5, 0.5, float('inf') >>> exec(py_str) 1.1121416371 0.909672693737 0.867263818209 0.865477135298 0.865474033111 >>> print('%3.1g' % (math.cos(val) - val**3)) -3e-11 If we want to generate Fortran code for the same while loop we simple call ``fcode``:: >>> from sympy.printing.fcode import fcode >>> print(fcode(whl, standard=2003, source_format='free')) do while (abs(delta) > tol) delta = (val**3 - cos(val))/(-3*val**2 - sin(val)) val = val + delta print *, val end do There is a function constructing a loop (or a complete function) like this in :mod:`sympy.codegen.algorithms`. """ from __future__ import print_function, division from typing import Any, Dict, List from collections import defaultdict from sympy.core import Symbol, Tuple, Dummy from sympy.core.basic import Basic from sympy.core.expr import Expr from sympy.core.numbers import Float, Integer, oo from sympy.core.relational import Lt, Le, Ge, Gt from sympy.core.sympify import _sympify, sympify, SympifyError from sympy.utilities.iterables import iterable def _mk_Tuple(args): """ Create a Sympy Tuple object from an iterable, converting Python strings to AST strings. Parameters ========== args: iterable Arguments to :class:`sympy.Tuple`. Returns ======= sympy.Tuple """ args = [String(arg) if isinstance(arg, str) else arg for arg in args] return Tuple(*args) class Token(Basic): """ Base class for the AST types. Defining fields are set in ``__slots__``. Attributes (defined in __slots__) are only allowed to contain instances of Basic (unless atomic, see ``String``). The arguments to ``__new__()`` correspond to the attributes in the order defined in ``__slots__`. The ``defaults`` class attribute is a dictionary mapping attribute names to their default values. Subclasses should not need to override the ``__new__()`` method. They may define a class or static method named ``_construct_<attr>`` for each attribute to process the value passed to ``__new__()``. Attributes listed in the class attribute ``not_in_args`` are not passed to :class:`~.Basic`. """ __slots__ = () defaults = {} # type: Dict[str, Any] not_in_args = [] # type: List[str] indented_args = ['body'] @property def is_Atom(self): return len(self.__slots__) == 0 @classmethod def _get_constructor(cls, attr): """ Get the constructor function for an attribute by name. """ return getattr(cls, '_construct_%s' % attr, lambda x: x) @classmethod def _construct(cls, attr, arg): """ Construct an attribute value from argument passed to ``__new__()``. """ # arg may be ``NoneToken()``, so comparation is done using == instead of ``is`` operator if arg == None: return cls.defaults.get(attr, none) else: if isinstance(arg, Dummy): # sympy's replace uses Dummy instances return arg else: return cls._get_constructor(attr)(arg) def __new__(cls, *args, **kwargs): # Pass through existing instances when given as sole argument if len(args) == 1 and not kwargs and isinstance(args[0], cls): return args[0] if len(args) > len(cls.__slots__): raise ValueError("Too many arguments (%d), expected at most %d" % (len(args), len(cls.__slots__))) attrvals = [] # Process positional arguments for attrname, argval in zip(cls.__slots__, args): if attrname in kwargs: raise TypeError('Got multiple values for attribute %r' % attrname) attrvals.append(cls._construct(attrname, argval)) # Process keyword arguments for attrname in cls.__slots__[len(args):]: if attrname in kwargs: argval = kwargs.pop(attrname) elif attrname in cls.defaults: argval = cls.defaults[attrname] else: raise TypeError('No value for %r given and attribute has no default' % attrname) attrvals.append(cls._construct(attrname, argval)) if kwargs: raise ValueError("Unknown keyword arguments: %s" % ' '.join(kwargs)) # Parent constructor basic_args = [ val for attr, val in zip(cls.__slots__, attrvals) if attr not in cls.not_in_args ] obj = Basic.__new__(cls, *basic_args) # Set attributes for attr, arg in zip(cls.__slots__, attrvals): setattr(obj, attr, arg) return obj def __eq__(self, other): if not isinstance(other, self.__class__): return False for attr in self.__slots__: if getattr(self, attr) != getattr(other, attr): return False return True def _hashable_content(self): return tuple([getattr(self, attr) for attr in self.__slots__]) def __hash__(self): return super(Token, self).__hash__() def _joiner(self, k, indent_level): return (',\n' + ' '*indent_level) if k in self.indented_args else ', ' def _indented(self, printer, k, v, *args, **kwargs): il = printer._context['indent_level'] def _print(arg): if isinstance(arg, Token): return printer._print(arg, *args, joiner=self._joiner(k, il), **kwargs) else: return printer._print(arg, *args, **kwargs) if isinstance(v, Tuple): joined = self._joiner(k, il).join([_print(arg) for arg in v.args]) if k in self.indented_args: return '(\n' + ' '*il + joined + ',\n' + ' '*(il - 4) + ')' else: return ('({0},)' if len(v.args) == 1 else '({0})').format(joined) else: return _print(v) def _sympyrepr(self, printer, *args, **kwargs): from sympy.printing.printer import printer_context exclude = kwargs.get('exclude', ()) values = [getattr(self, k) for k in self.__slots__] indent_level = printer._context.get('indent_level', 0) joiner = kwargs.pop('joiner', ', ') arg_reprs = [] for i, (attr, value) in enumerate(zip(self.__slots__, values)): if attr in exclude: continue # Skip attributes which have the default value if attr in self.defaults and value == self.defaults[attr]: continue ilvl = indent_level + 4 if attr in self.indented_args else 0 with printer_context(printer, indent_level=ilvl): indented = self._indented(printer, attr, value, *args, **kwargs) arg_reprs.append(('{1}' if i == 0 else '{0}={1}').format(attr, indented.lstrip())) return "{0}({1})".format(self.__class__.__name__, joiner.join(arg_reprs)) _sympystr = _sympyrepr def __repr__(self): # sympy.core.Basic.__repr__ uses sstr from sympy.printing import srepr return srepr(self) def kwargs(self, exclude=(), apply=None): """ Get instance's attributes as dict of keyword arguments. Parameters ========== exclude : collection of str Collection of keywords to exclude. apply : callable, optional Function to apply to all values. """ kwargs = {k: getattr(self, k) for k in self.__slots__ if k not in exclude} if apply is not None: return {k: apply(v) for k, v in kwargs.items()} else: return kwargs class BreakToken(Token): """ Represents 'break' in C/Python ('exit' in Fortran). Use the premade instance ``break_`` or instantiate manually. Examples ======== >>> from sympy.printing import ccode, fcode >>> from sympy.codegen.ast import break_ >>> ccode(break_) 'break' >>> fcode(break_, source_format='free') 'exit' """ break_ = BreakToken() class ContinueToken(Token): """ Represents 'continue' in C/Python ('cycle' in Fortran) Use the premade instance ``continue_`` or instantiate manually. Examples ======== >>> from sympy.printing import ccode, fcode >>> from sympy.codegen.ast import continue_ >>> ccode(continue_) 'continue' >>> fcode(continue_, source_format='free') 'cycle' """ continue_ = ContinueToken() class NoneToken(Token): """ The AST equivalence of Python's NoneType The corresponding instance of Python's ``None`` is ``none``. Examples ======== >>> from sympy.codegen.ast import none, Variable >>> from sympy.printing.pycode import pycode >>> print(pycode(Variable('x').as_Declaration(value=none))) x = None """ def __eq__(self, other): return other is None or isinstance(other, NoneToken) def _hashable_content(self): return () def __hash__(self): return super(NoneToken, self).__hash__() none = NoneToken() class AssignmentBase(Basic): """ Abstract base class for Assignment and AugmentedAssignment. Attributes: =========== op : str Symbol for assignment operator, e.g. "=", "+=", etc. """ def __new__(cls, lhs, rhs): lhs = _sympify(lhs) rhs = _sympify(rhs) cls._check_args(lhs, rhs) return super(AssignmentBase, cls).__new__(cls, lhs, rhs) @property def lhs(self): return self.args[0] @property def rhs(self): return self.args[1] @classmethod def _check_args(cls, lhs, rhs): """ Check arguments to __new__ and raise exception if any problems found. Derived classes may wish to override this. """ from sympy.matrices.expressions.matexpr import ( MatrixElement, MatrixSymbol) from sympy.tensor.indexed import Indexed # Tuple of things that can be on the lhs of an assignment assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed, Element, Variable) if not isinstance(lhs, assignable): raise TypeError("Cannot assign to lhs of type %s." % type(lhs)) # Indexed types implement shape, but don't define it until later. This # causes issues in assignment validation. For now, matrices are defined # as anything with a shape that is not an Indexed lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed) rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed) # If lhs and rhs have same structure, then this assignment is ok if lhs_is_mat: if not rhs_is_mat: raise ValueError("Cannot assign a scalar to a matrix.") elif lhs.shape != rhs.shape: raise ValueError("Dimensions of lhs and rhs don't align.") elif rhs_is_mat and not lhs_is_mat: raise ValueError("Cannot assign a matrix to a scalar.") class Assignment(AssignmentBase): """ Represents variable assignment for code generation. Parameters ========== lhs : Expr Sympy object representing the lhs of the expression. These should be singular objects, such as one would use in writing code. Notable types include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that subclass these types are also supported. rhs : Expr Sympy object representing the rhs of the expression. This can be any type, provided its shape corresponds to that of the lhs. For example, a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as the dimensions will not align. Examples ======== >>> from sympy import symbols, MatrixSymbol, Matrix >>> from sympy.codegen.ast import Assignment >>> x, y, z = symbols('x, y, z') >>> Assignment(x, y) Assignment(x, y) >>> Assignment(x, 0) Assignment(x, 0) >>> A = MatrixSymbol('A', 1, 3) >>> mat = Matrix([x, y, z]).T >>> Assignment(A, mat) Assignment(A, Matrix([[x, y, z]])) >>> Assignment(A[0, 1], x) Assignment(A[0, 1], x) """ op = ':=' class AugmentedAssignment(AssignmentBase): """ Base class for augmented assignments. Attributes: =========== binop : str Symbol for binary operation being applied in the assignment, such as "+", "*", etc. """ binop = None # type: str @property def op(self): return self.binop + '=' class AddAugmentedAssignment(AugmentedAssignment): binop = '+' class SubAugmentedAssignment(AugmentedAssignment): binop = '-' class MulAugmentedAssignment(AugmentedAssignment): binop = '*' class DivAugmentedAssignment(AugmentedAssignment): binop = '/' class ModAugmentedAssignment(AugmentedAssignment): binop = '%' # Mapping from binary op strings to AugmentedAssignment subclasses augassign_classes = { cls.binop: cls for cls in [ AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment, DivAugmentedAssignment, ModAugmentedAssignment ] } def aug_assign(lhs, op, rhs): """ Create 'lhs op= rhs'. Represents augmented variable assignment for code generation. This is a convenience function. You can also use the AugmentedAssignment classes directly, like AddAugmentedAssignment(x, y). Parameters ========== lhs : Expr Sympy object representing the lhs of the expression. These should be singular objects, such as one would use in writing code. Notable types include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that subclass these types are also supported. op : str Operator (+, -, /, \\*, %). rhs : Expr Sympy object representing the rhs of the expression. This can be any type, provided its shape corresponds to that of the lhs. For example, a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as the dimensions will not align. Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import aug_assign >>> x, y = symbols('x, y') >>> aug_assign(x, '+', y) AddAugmentedAssignment(x, y) """ if op not in augassign_classes: raise ValueError("Unrecognized operator %s" % op) return augassign_classes[op](lhs, rhs) class CodeBlock(Basic): """ Represents a block of code For now only assignments are supported. This restriction will be lifted in the future. Useful attributes on this object are: ``left_hand_sides``: Tuple of left-hand sides of assignments, in order. ``left_hand_sides``: Tuple of right-hand sides of assignments, in order. ``free_symbols``: Free symbols of the expressions in the right-hand sides which do not appear in the left-hand side of an assignment. Useful methods on this object are: ``topological_sort``: Class method. Return a CodeBlock with assignments sorted so that variables are assigned before they are used. ``cse``: Return a new CodeBlock with common subexpressions eliminated and pulled out as assignments. Examples ======== >>> from sympy import symbols, ccode >>> from sympy.codegen.ast import CodeBlock, Assignment >>> x, y = symbols('x y') >>> c = CodeBlock(Assignment(x, 1), Assignment(y, x + 1)) >>> print(ccode(c)) x = 1; y = x + 1; """ def __new__(cls, *args): left_hand_sides = [] right_hand_sides = [] for i in args: if isinstance(i, Assignment): lhs, rhs = i.args left_hand_sides.append(lhs) right_hand_sides.append(rhs) obj = Basic.__new__(cls, *args) obj.left_hand_sides = Tuple(*left_hand_sides) obj.right_hand_sides = Tuple(*right_hand_sides) return obj def __iter__(self): return iter(self.args) def _sympyrepr(self, printer, *args, **kwargs): il = printer._context.get('indent_level', 0) joiner = ',\n' + ' '*il joined = joiner.join(map(printer._print, self.args)) return ('{0}(\n'.format(' '*(il-4) + self.__class__.__name__,) + ' '*il + joined + '\n' + ' '*(il - 4) + ')') _sympystr = _sympyrepr @property def free_symbols(self): return super(CodeBlock, self).free_symbols - set(self.left_hand_sides) @classmethod def topological_sort(cls, assignments): """ Return a CodeBlock with topologically sorted assignments so that variables are assigned before they are used. The existing order of assignments is preserved as much as possible. This function assumes that variables are assigned to only once. This is a class constructor so that the default constructor for CodeBlock can error when variables are used before they are assigned. Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import CodeBlock, Assignment >>> x, y, z = symbols('x y z') >>> assignments = [ ... Assignment(x, y + z), ... Assignment(y, z + 1), ... Assignment(z, 2), ... ] >>> CodeBlock.topological_sort(assignments) CodeBlock( Assignment(z, 2), Assignment(y, z + 1), Assignment(x, y + z) ) """ from sympy.utilities.iterables import topological_sort if not all(isinstance(i, Assignment) for i in assignments): # Will support more things later raise NotImplementedError("CodeBlock.topological_sort only supports Assignments") if any(isinstance(i, AugmentedAssignment) for i in assignments): raise NotImplementedError("CodeBlock.topological_sort doesn't yet work with AugmentedAssignments") # Create a graph where the nodes are assignments and there is a directed edge # between nodes that use a variable and nodes that assign that # variable, like # [(x := 1, y := x + 1), (x := 1, z := y + z), (y := x + 1, z := y + z)] # If we then topologically sort these nodes, they will be in # assignment order, like # x := 1 # y := x + 1 # z := y + z # A = The nodes # # enumerate keeps nodes in the same order they are already in if # possible. It will also allow us to handle duplicate assignments to # the same variable when those are implemented. A = list(enumerate(assignments)) # var_map = {variable: [nodes for which this variable is assigned to]} # like {x: [(1, x := y + z), (4, x := 2 * w)], ...} var_map = defaultdict(list) for node in A: i, a = node var_map[a.lhs].append(node) # E = Edges in the graph E = [] for dst_node in A: i, a = dst_node for s in a.rhs.free_symbols: for src_node in var_map[s]: E.append((src_node, dst_node)) ordered_assignments = topological_sort([A, E]) # De-enumerate the result return cls(*[a for i, a in ordered_assignments]) def cse(self, symbols=None, optimizations=None, postprocess=None, order='canonical'): """ Return a new code block with common subexpressions eliminated See the docstring of :func:`sympy.simplify.cse_main.cse` for more information. Examples ======== >>> from sympy import symbols, sin >>> from sympy.codegen.ast import CodeBlock, Assignment >>> x, y, z = symbols('x y z') >>> c = CodeBlock( ... Assignment(x, 1), ... Assignment(y, sin(x) + 1), ... Assignment(z, sin(x) - 1), ... ) ... >>> c.cse() CodeBlock( Assignment(x, 1), Assignment(x0, sin(x)), Assignment(y, x0 + 1), Assignment(z, x0 - 1) ) """ from sympy.simplify.cse_main import cse from sympy.utilities.iterables import numbered_symbols, filter_symbols # Check that the CodeBlock only contains assignments to unique variables if not all(isinstance(i, Assignment) for i in self.args): # Will support more things later raise NotImplementedError("CodeBlock.cse only supports Assignments") if any(isinstance(i, AugmentedAssignment) for i in self.args): raise NotImplementedError("CodeBlock.cse doesn't yet work with AugmentedAssignments") for i, lhs in enumerate(self.left_hand_sides): if lhs in self.left_hand_sides[:i]: raise NotImplementedError("Duplicate assignments to the same " "variable are not yet supported (%s)" % lhs) # Ensure new symbols for subexpressions do not conflict with existing existing_symbols = self.atoms(Symbol) if symbols is None: symbols = numbered_symbols() symbols = filter_symbols(symbols, existing_symbols) replacements, reduced_exprs = cse(list(self.right_hand_sides), symbols=symbols, optimizations=optimizations, postprocess=postprocess, order=order) new_block = [Assignment(var, expr) for var, expr in zip(self.left_hand_sides, reduced_exprs)] new_assignments = [Assignment(var, expr) for var, expr in replacements] return self.topological_sort(new_assignments + new_block) class For(Token): """Represents a 'for-loop' in the code. Expressions are of the form: "for target in iter: body..." Parameters ========== target : symbol iter : iterable body : CodeBlock or iterable ! When passed an iterable it is used to instantiate a CodeBlock. Examples ======== >>> from sympy import symbols, Range >>> from sympy.codegen.ast import aug_assign, For >>> x, i, j, k = symbols('x i j k') >>> for_i = For(i, Range(10), [aug_assign(x, '+', i*j*k)]) >>> for_i # doctest: -NORMALIZE_WHITESPACE For(i, iterable=Range(0, 10, 1), body=CodeBlock( AddAugmentedAssignment(x, i*j*k) )) >>> for_ji = For(j, Range(7), [for_i]) >>> for_ji # doctest: -NORMALIZE_WHITESPACE For(j, iterable=Range(0, 7, 1), body=CodeBlock( For(i, iterable=Range(0, 10, 1), body=CodeBlock( AddAugmentedAssignment(x, i*j*k) )) )) >>> for_kji =For(k, Range(5), [for_ji]) >>> for_kji # doctest: -NORMALIZE_WHITESPACE For(k, iterable=Range(0, 5, 1), body=CodeBlock( For(j, iterable=Range(0, 7, 1), body=CodeBlock( For(i, iterable=Range(0, 10, 1), body=CodeBlock( AddAugmentedAssignment(x, i*j*k) )) )) )) """ __slots__ = ('target', 'iterable', 'body') _construct_target = staticmethod(_sympify) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) @classmethod def _construct_iterable(cls, itr): if not iterable(itr): raise TypeError("iterable must be an iterable") if isinstance(itr, list): # _sympify errors on lists because they are mutable itr = tuple(itr) return _sympify(itr) class String(Token): """ SymPy object representing a string. Atomic object which is not an expression (as opposed to Symbol). Parameters ========== text : str Examples ======== >>> from sympy.codegen.ast import String >>> f = String('foo') >>> f foo >>> str(f) 'foo' >>> f.text 'foo' >>> print(repr(f)) String('foo') """ __slots__ = ('text',) not_in_args = ['text'] is_Atom = True @classmethod def _construct_text(cls, text): if not isinstance(text, str): raise TypeError("Argument text is not a string type.") return text def _sympystr(self, printer, *args, **kwargs): return self.text class QuotedString(String): """ Represents a string which should be printed with quotes. """ class Comment(String): """ Represents a comment. """ class Node(Token): """ Subclass of Token, carrying the attribute 'attrs' (Tuple) Examples ======== >>> from sympy.codegen.ast import Node, value_const, pointer_const >>> n1 = Node([value_const]) >>> n1.attr_params('value_const') # get the parameters of attribute (by name) () >>> from sympy.codegen.fnodes import dimension >>> n2 = Node([value_const, dimension(5, 3)]) >>> n2.attr_params(value_const) # get the parameters of attribute (by Attribute instance) () >>> n2.attr_params('dimension') # get the parameters of attribute (by name) (5, 3) >>> n2.attr_params(pointer_const) is None True """ __slots__ = ('attrs',) defaults = {'attrs': Tuple()} # type: Dict[str, Any] _construct_attrs = staticmethod(_mk_Tuple) def attr_params(self, looking_for): """ Returns the parameters of the Attribute with name ``looking_for`` in self.attrs """ for attr in self.attrs: if str(attr.name) == str(looking_for): return attr.parameters class Type(Token): """ Represents a type. The naming is a super-set of NumPy naming. Type has a classmethod ``from_expr`` which offer type deduction. It also has a method ``cast_check`` which casts the argument to its type, possibly raising an exception if rounding error is not within tolerances, or if the value is not representable by the underlying data type (e.g. unsigned integers). Parameters ========== name : str Name of the type, e.g. ``object``, ``int16``, ``float16`` (where the latter two would use the ``Type`` sub-classes ``IntType`` and ``FloatType`` respectively). If a ``Type`` instance is given, the said instance is returned. Examples ======== >>> from sympy.codegen.ast import Type >>> t = Type.from_expr(42) >>> t integer >>> print(repr(t)) IntBaseType(String('integer')) >>> from sympy.codegen.ast import uint8 >>> uint8.cast_check(-1) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Minimum value for data type bigger than new value. >>> from sympy.codegen.ast import float32 >>> v6 = 0.123456 >>> float32.cast_check(v6) 0.123456 >>> v10 = 12345.67894 >>> float32.cast_check(v10) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Casting gives a significantly different value. >>> boost_mp50 = Type('boost::multiprecision::cpp_dec_float_50') >>> from sympy import Symbol >>> from sympy.printing.cxxcode import cxxcode >>> from sympy.codegen.ast import Declaration, Variable >>> cxxcode(Declaration(Variable('x', type=boost_mp50))) 'boost::multiprecision::cpp_dec_float_50 x' References ========== .. [1] https://docs.scipy.org/doc/numpy/user/basics.types.html """ __slots__ = ('name',) _construct_name = String def _sympystr(self, printer, *args, **kwargs): return str(self.name) @classmethod def from_expr(cls, expr): """ Deduces type from an expression or a ``Symbol``. Parameters ========== expr : number or SymPy object The type will be deduced from type or properties. Examples ======== >>> from sympy.codegen.ast import Type, integer, complex_ >>> Type.from_expr(2) == integer True >>> from sympy import Symbol >>> Type.from_expr(Symbol('z', complex=True)) == complex_ True >>> Type.from_expr(sum) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Could not deduce type from expr. Raises ====== ValueError when type deduction fails. """ if isinstance(expr, (float, Float)): return real if isinstance(expr, (int, Integer)) or getattr(expr, 'is_integer', False): return integer if getattr(expr, 'is_real', False): return real if isinstance(expr, complex) or getattr(expr, 'is_complex', False): return complex_ if isinstance(expr, bool) or getattr(expr, 'is_Relational', False): return bool_ else: raise ValueError("Could not deduce type from expr.") def _check(self, value): pass def cast_check(self, value, rtol=None, atol=0, limits=None, precision_targets=None): """ Casts a value to the data type of the instance. Parameters ========== value : number rtol : floating point number Relative tolerance. (will be deduced if not given). atol : floating point number Absolute tolerance (in addition to ``rtol``). limits : dict Values given by ``limits.h``, x86/IEEE754 defaults if not given. type_aliases : dict Maps substitutions for Type, e.g. {integer: int64, real: float32} Examples ======== >>> from sympy.codegen.ast import Type, integer, float32, int8 >>> integer.cast_check(3.0) == 3 True >>> float32.cast_check(1e-40) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Minimum value for data type bigger than new value. >>> int8.cast_check(256) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Maximum value for data type smaller than new value. >>> v10 = 12345.67894 >>> float32.cast_check(v10) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Casting gives a significantly different value. >>> from sympy.codegen.ast import float64 >>> float64.cast_check(v10) 12345.67894 >>> from sympy import Float >>> v18 = Float('0.123456789012345646') >>> float64.cast_check(v18) Traceback (most recent call last): ... ValueError: Casting gives a significantly different value. >>> from sympy.codegen.ast import float80 >>> float80.cast_check(v18) 0.123456789012345649 """ val = sympify(value) ten = Integer(10) exp10 = getattr(self, 'decimal_dig', None) if rtol is None: rtol = 1e-15 if exp10 is None else 2.0*ten**(-exp10) def tol(num): return atol + rtol*abs(num) new_val = self.cast_nocheck(value) self._check(new_val) delta = new_val - val if abs(delta) > tol(val): # rounding, e.g. int(3.5) != 3.5 raise ValueError("Casting gives a significantly different value.") return new_val class IntBaseType(Type): """ Integer base type, contains no size information. """ __slots__ = ('name',) cast_nocheck = lambda self, i: Integer(int(i)) class _SizedIntType(IntBaseType): __slots__ = ('name', 'nbits',) _construct_nbits = Integer def _check(self, value): if value < self.min: raise ValueError("Value is too small: %d < %d" % (value, self.min)) if value > self.max: raise ValueError("Value is too big: %d > %d" % (value, self.max)) class SignedIntType(_SizedIntType): """ Represents a signed integer type. """ @property def min(self): return -2**(self.nbits-1) @property def max(self): return 2**(self.nbits-1) - 1 class UnsignedIntType(_SizedIntType): """ Represents an unsigned integer type. """ @property def min(self): return 0 @property def max(self): return 2**self.nbits - 1 two = Integer(2) class FloatBaseType(Type): """ Represents a floating point number type. """ cast_nocheck = Float class FloatType(FloatBaseType): """ Represents a floating point type with fixed bit width. Base 2 & one sign bit is assumed. Parameters ========== name : str Name of the type. nbits : integer Number of bits used (storage). nmant : integer Number of bits used to represent the mantissa. nexp : integer Number of bits used to represent the mantissa. Examples ======== >>> from sympy import S, Float >>> from sympy.codegen.ast import FloatType >>> half_precision = FloatType('f16', nbits=16, nmant=10, nexp=5) >>> half_precision.max 65504 >>> half_precision.tiny == S(2)**-14 True >>> half_precision.eps == S(2)**-10 True >>> half_precision.dig == 3 True >>> half_precision.decimal_dig == 5 True >>> half_precision.cast_check(1.0) 1.0 >>> half_precision.cast_check(1e5) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Maximum value for data type smaller than new value. """ __slots__ = ('name', 'nbits', 'nmant', 'nexp',) _construct_nbits = _construct_nmant = _construct_nexp = Integer @property def max_exponent(self): """ The largest positive number n, such that 2**(n - 1) is a representable finite value. """ # cf. C++'s ``std::numeric_limits::max_exponent`` return two**(self.nexp - 1) @property def min_exponent(self): """ The lowest negative number n, such that 2**(n - 1) is a valid normalized number. """ # cf. C++'s ``std::numeric_limits::min_exponent`` return 3 - self.max_exponent @property def max(self): """ Maximum value representable. """ return (1 - two**-(self.nmant+1))*two**self.max_exponent @property def tiny(self): """ The minimum positive normalized value. """ # See C macros: FLT_MIN, DBL_MIN, LDBL_MIN # or C++'s ``std::numeric_limits::min`` # or numpy.finfo(dtype).tiny return two**(self.min_exponent - 1) @property def eps(self): """ Difference between 1.0 and the next representable value. """ return two**(-self.nmant) @property def dig(self): """ Number of decimal digits that are guaranteed to be preserved in text. When converting text -> float -> text, you are guaranteed that at least ``dig`` number of digits are preserved with respect to rounding or overflow. """ from sympy.functions import floor, log return floor(self.nmant * log(2)/log(10)) @property def decimal_dig(self): """ Number of digits needed to store & load without loss. Number of decimal digits needed to guarantee that two consecutive conversions (float -> text -> float) to be idempotent. This is useful when one do not want to loose precision due to rounding errors when storing a floating point value as text. """ from sympy.functions import ceiling, log return ceiling((self.nmant + 1) * log(2)/log(10) + 1) def cast_nocheck(self, value): """ Casts without checking if out of bounds or subnormal. """ if value == oo: # float(oo) or oo return float(oo) elif value == -oo: # float(-oo) or -oo return float(-oo) return Float(str(sympify(value).evalf(self.decimal_dig)), self.decimal_dig) def _check(self, value): if value < -self.max: raise ValueError("Value is too small: %d < %d" % (value, -self.max)) if value > self.max: raise ValueError("Value is too big: %d > %d" % (value, self.max)) if abs(value) < self.tiny: raise ValueError("Smallest (absolute) value for data type bigger than new value.") class ComplexBaseType(FloatBaseType): def cast_nocheck(self, value): """ Casts without checking if out of bounds or subnormal. """ from sympy.functions import re, im return ( super(ComplexBaseType, self).cast_nocheck(re(value)) + super(ComplexBaseType, self).cast_nocheck(im(value))*1j ) def _check(self, value): from sympy.functions import re, im super(ComplexBaseType, self)._check(re(value)) super(ComplexBaseType, self)._check(im(value)) class ComplexType(ComplexBaseType, FloatType): """ Represents a complex floating point number. """ # NumPy types: intc = IntBaseType('intc') intp = IntBaseType('intp') int8 = SignedIntType('int8', 8) int16 = SignedIntType('int16', 16) int32 = SignedIntType('int32', 32) int64 = SignedIntType('int64', 64) uint8 = UnsignedIntType('uint8', 8) uint16 = UnsignedIntType('uint16', 16) uint32 = UnsignedIntType('uint32', 32) uint64 = UnsignedIntType('uint64', 64) float16 = FloatType('float16', 16, nexp=5, nmant=10) # IEEE 754 binary16, Half precision float32 = FloatType('float32', 32, nexp=8, nmant=23) # IEEE 754 binary32, Single precision float64 = FloatType('float64', 64, nexp=11, nmant=52) # IEEE 754 binary64, Double precision float80 = FloatType('float80', 80, nexp=15, nmant=63) # x86 extended precision (1 integer part bit), "long double" float128 = FloatType('float128', 128, nexp=15, nmant=112) # IEEE 754 binary128, Quadruple precision float256 = FloatType('float256', 256, nexp=19, nmant=236) # IEEE 754 binary256, Octuple precision complex64 = ComplexType('complex64', nbits=64, **float32.kwargs(exclude=('name', 'nbits'))) complex128 = ComplexType('complex128', nbits=128, **float64.kwargs(exclude=('name', 'nbits'))) # Generic types (precision may be chosen by code printers): untyped = Type('untyped') real = FloatBaseType('real') integer = IntBaseType('integer') complex_ = ComplexBaseType('complex') bool_ = Type('bool') class Attribute(Token): """ Attribute (possibly parametrized) For use with :class:`sympy.codegen.ast.Node` (which takes instances of ``Attribute`` as ``attrs``). Parameters ========== name : str parameters : Tuple Examples ======== >>> from sympy.codegen.ast import Attribute >>> volatile = Attribute('volatile') >>> volatile volatile >>> print(repr(volatile)) Attribute(String('volatile')) >>> a = Attribute('foo', [1, 2, 3]) >>> a foo(1, 2, 3) >>> a.parameters == (1, 2, 3) True """ __slots__ = ('name', 'parameters') defaults = {'parameters': Tuple()} _construct_name = String _construct_parameters = staticmethod(_mk_Tuple) def _sympystr(self, printer, *args, **kwargs): result = str(self.name) if self.parameters: result += '(%s)' % ', '.join(map(lambda arg: printer._print( arg, *args, **kwargs), self.parameters)) return result value_const = Attribute('value_const') pointer_const = Attribute('pointer_const') class Variable(Node): """ Represents a variable Parameters ========== symbol : Symbol type : Type (optional) Type of the variable. attrs : iterable of Attribute instances Will be stored as a Tuple. Examples ======== >>> from sympy import Symbol >>> from sympy.codegen.ast import Variable, float32, integer >>> x = Symbol('x') >>> v = Variable(x, type=float32) >>> v.attrs () >>> v == Variable('x') False >>> v == Variable('x', type=float32) True >>> v Variable(x, type=float32) One may also construct a ``Variable`` instance with the type deduced from assumptions about the symbol using the ``deduced`` classmethod: >>> i = Symbol('i', integer=True) >>> v = Variable.deduced(i) >>> v.type == integer True >>> v == Variable('i') False >>> from sympy.codegen.ast import value_const >>> value_const in v.attrs False >>> w = Variable('w', attrs=[value_const]) >>> w Variable(w, attrs=(value_const,)) >>> value_const in w.attrs True >>> w.as_Declaration(value=42) Declaration(Variable(w, value=42, attrs=(value_const,))) """ __slots__ = ('symbol', 'type', 'value') + Node.__slots__ defaults = Node.defaults.copy() defaults.update({'type': untyped, 'value': none}) _construct_symbol = staticmethod(sympify) _construct_value = staticmethod(sympify) @classmethod def deduced(cls, symbol, value=None, attrs=Tuple(), cast_check=True): """ Alt. constructor with type deduction from ``Type.from_expr``. Deduces type primarily from ``symbol``, secondarily from ``value``. Parameters ========== symbol : Symbol value : expr (optional) value of the variable. attrs : iterable of Attribute instances cast_check : bool Whether to apply ``Type.cast_check`` on ``value``. Examples ======== >>> from sympy import Symbol >>> from sympy.codegen.ast import Variable, complex_ >>> n = Symbol('n', integer=True) >>> str(Variable.deduced(n).type) 'integer' >>> x = Symbol('x', real=True) >>> v = Variable.deduced(x) >>> v.type real >>> z = Symbol('z', complex=True) >>> Variable.deduced(z).type == complex_ True """ if isinstance(symbol, Variable): return symbol try: type_ = Type.from_expr(symbol) except ValueError: type_ = Type.from_expr(value) if value is not None and cast_check: value = type_.cast_check(value) return cls(symbol, type=type_, value=value, attrs=attrs) def as_Declaration(self, **kwargs): """ Convenience method for creating a Declaration instance. If the variable of the Declaration need to wrap a modified variable keyword arguments may be passed (overriding e.g. the ``value`` of the Variable instance). Examples ======== >>> from sympy.codegen.ast import Variable, NoneToken >>> x = Variable('x') >>> decl1 = x.as_Declaration() >>> # value is special NoneToken() which must be tested with == operator >>> decl1.variable.value is None # won't work False >>> decl1.variable.value == None # not PEP-8 compliant True >>> decl1.variable.value == NoneToken() # OK True >>> decl2 = x.as_Declaration(value=42.0) >>> decl2.variable.value == 42 True """ kw = self.kwargs() kw.update(kwargs) return Declaration(self.func(**kw)) def _relation(self, rhs, op): try: rhs = _sympify(rhs) except SympifyError: raise TypeError("Invalid comparison %s < %s" % (self, rhs)) return op(self, rhs, evaluate=False) __lt__ = lambda self, other: self._relation(other, Lt) __le__ = lambda self, other: self._relation(other, Le) __ge__ = lambda self, other: self._relation(other, Ge) __gt__ = lambda self, other: self._relation(other, Gt) class Pointer(Variable): """ Represents a pointer. See ``Variable``. Examples ======== Can create instances of ``Element``: >>> from sympy import Symbol >>> from sympy.codegen.ast import Pointer >>> i = Symbol('i', integer=True) >>> p = Pointer('x') >>> p[i+1] Element(x, indices=(i + 1,)) """ def __getitem__(self, key): try: return Element(self.symbol, key) except TypeError: return Element(self.symbol, (key,)) class Element(Token): """ Element in (a possibly N-dimensional) array. Examples ======== >>> from sympy.codegen.ast import Element >>> elem = Element('x', 'ijk') >>> elem.symbol.name == 'x' True >>> elem.indices (i, j, k) >>> from sympy import ccode >>> ccode(elem) 'x[i][j][k]' >>> ccode(Element('x', 'ijk', strides='lmn', offset='o')) 'x[i*l + j*m + k*n + o]' """ __slots__ = ('symbol', 'indices', 'strides', 'offset') defaults = {'strides': none, 'offset': none} _construct_symbol = staticmethod(sympify) _construct_indices = staticmethod(lambda arg: Tuple(*arg)) _construct_strides = staticmethod(lambda arg: Tuple(*arg)) _construct_offset = staticmethod(sympify) class Declaration(Token): """ Represents a variable declaration Parameters ========== variable : Variable Examples ======== >>> from sympy import Symbol >>> from sympy.codegen.ast import Declaration, Type, Variable, NoneToken, integer, untyped >>> z = Declaration('z') >>> z.variable.type == untyped True >>> # value is special NoneToken() which must be tested with == operator >>> z.variable.value is None # won't work False >>> z.variable.value == None # not PEP-8 compliant True >>> z.variable.value == NoneToken() # OK True """ __slots__ = ('variable',) _construct_variable = Variable class While(Token): """ Represents a 'for-loop' in the code. Expressions are of the form: "while condition: body..." Parameters ========== condition : expression convertible to Boolean body : CodeBlock or iterable When passed an iterable it is used to instantiate a CodeBlock. Examples ======== >>> from sympy import symbols, Gt, Abs >>> from sympy.codegen import aug_assign, Assignment, While >>> x, dx = symbols('x dx') >>> expr = 1 - x**2 >>> whl = While(Gt(Abs(dx), 1e-9), [ ... Assignment(dx, -expr/expr.diff(x)), ... aug_assign(x, '+', dx) ... ]) """ __slots__ = ('condition', 'body') _construct_condition = staticmethod(lambda cond: _sympify(cond)) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) class Scope(Token): """ Represents a scope in the code. Parameters ========== body : CodeBlock or iterable When passed an iterable it is used to instantiate a CodeBlock. """ __slots__ = ('body',) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) class Stream(Token): """ Represents a stream. There are two predefined Stream instances ``stdout`` & ``stderr``. Parameters ========== name : str Examples ======== >>> from sympy import Symbol >>> from sympy.printing.pycode import pycode >>> from sympy.codegen.ast import Print, stderr, QuotedString >>> print(pycode(Print(['x'], file=stderr))) print(x, file=sys.stderr) >>> x = Symbol('x') >>> print(pycode(Print([QuotedString('x')], file=stderr))) # print literally "x" print("x", file=sys.stderr) """ __slots__ = ('name',) _construct_name = String stdout = Stream('stdout') stderr = Stream('stderr') class Print(Token): """ Represents print command in the code. Parameters ========== formatstring : str *args : Basic instances (or convertible to such through sympify) Examples ======== >>> from sympy.codegen.ast import Print >>> from sympy.printing.pycode import pycode >>> print(pycode(Print('x y'.split(), "coordinate: %12.5g %12.5g"))) print("coordinate: %12.5g %12.5g" % (x, y)) """ __slots__ = ('print_args', 'format_string', 'file') defaults = {'format_string': none, 'file': none} _construct_print_args = staticmethod(_mk_Tuple) _construct_format_string = QuotedString _construct_file = Stream class FunctionPrototype(Node): """ Represents a function prototype Allows the user to generate forward declaration in e.g. C/C++. Parameters ========== return_type : Type name : str parameters: iterable of Variable instances attrs : iterable of Attribute instances Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import real, FunctionPrototype >>> from sympy.printing.ccode import ccode >>> x, y = symbols('x y', real=True) >>> fp = FunctionPrototype(real, 'foo', [x, y]) >>> ccode(fp) 'double foo(double x, double y)' """ __slots__ = ('return_type', 'name', 'parameters', 'attrs') _construct_return_type = Type _construct_name = String @staticmethod def _construct_parameters(args): def _var(arg): if isinstance(arg, Declaration): return arg.variable elif isinstance(arg, Variable): return arg else: return Variable.deduced(arg) return Tuple(*map(_var, args)) @classmethod def from_FunctionDefinition(cls, func_def): if not isinstance(func_def, FunctionDefinition): raise TypeError("func_def is not an instance of FunctionDefiniton") return cls(**func_def.kwargs(exclude=('body',))) class FunctionDefinition(FunctionPrototype): """ Represents a function definition in the code. Parameters ========== return_type : Type name : str parameters: iterable of Variable instances body : CodeBlock or iterable attrs : iterable of Attribute instances Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import real, FunctionPrototype >>> from sympy.printing.ccode import ccode >>> x, y = symbols('x y', real=True) >>> fp = FunctionPrototype(real, 'foo', [x, y]) >>> ccode(fp) 'double foo(double x, double y)' >>> from sympy.codegen.ast import FunctionDefinition, Return >>> body = [Return(x*y)] >>> fd = FunctionDefinition.from_FunctionPrototype(fp, body) >>> print(ccode(fd)) double foo(double x, double y){ return x*y; } """ __slots__ = FunctionPrototype.__slots__[:-1] + ('body', 'attrs') @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) @classmethod def from_FunctionPrototype(cls, func_proto, body): if not isinstance(func_proto, FunctionPrototype): raise TypeError("func_proto is not an instance of FunctionPrototype") return cls(body=body, **func_proto.kwargs()) class Return(Basic): """ Represents a return command in the code. """ class FunctionCall(Token, Expr): """ Represents a call to a function in the code. Parameters ========== name : str function_args : Tuple Examples ======== >>> from sympy.codegen.ast import FunctionCall >>> from sympy.printing.pycode import pycode >>> fcall = FunctionCall('foo', 'bar baz'.split()) >>> print(pycode(fcall)) foo(bar, baz) """ __slots__ = ('name', 'function_args') _construct_name = String _construct_function_args = staticmethod(lambda args: Tuple(*args))