hash
stringlengths
64
64
content
stringlengths
0
1.51M
b391f72d7398f1471f07e9b065be742da7d652562b45f8c923e53fe1da41f64d
"""Various algorithms for helping identifying numbers and sequences.""" from sympy.utilities import public from sympy.core import Function, Symbol from sympy.core.numbers import Zero from sympy import (sympify, floor, lcm, denom, Integer, Rational, exp, integrate, symbols, Product, product) from sympy.polys.polyfuncs import rational_interpolate as rinterp @public def find_simple_recurrence_vector(l): """ This function is used internally by other functions from the sympy.concrete.guess module. While most users may want to rather use the function find_simple_recurrence when looking for recurrence relations among rational numbers, the current function may still be useful when some post-processing has to be done. Explanation =========== The function returns a vector of length n when a recurrence relation of order n is detected in the sequence of rational numbers v. If the returned vector has a length 1, then the returned value is always the list [0], which means that no relation has been found. While the functions is intended to be used with rational numbers, it should work for other kinds of real numbers except for some cases involving quadratic numbers; for that reason it should be used with some caution when the argument is not a list of rational numbers. Examples ======== >>> from sympy.concrete.guess import find_simple_recurrence_vector >>> from sympy import fibonacci >>> find_simple_recurrence_vector([fibonacci(k) for k in range(12)]) [1, -1, -1] See Also ======== See the function sympy.concrete.guess.find_simple_recurrence which is more user-friendly. """ q1 = [0] q2 = [Integer(1)] b, z = 0, len(l) >> 1 while len(q2) <= z: while l[b]==0: b += 1 if b == len(l): c = 1 for x in q2: c = lcm(c, denom(x)) if q2[0]*c < 0: c = -c for k in range(len(q2)): q2[k] = int(q2[k]*c) return q2 a = Integer(1)/l[b] m = [a] for k in range(b+1, len(l)): m.append(-sum(l[j+1]*m[b-j-1] for j in range(b, k))*a) l, m = m, [0] * max(len(q2), b+len(q1)) for k in range(len(q2)): m[k] = a*q2[k] for k in range(b, b+len(q1)): m[k] += q1[k-b] while m[-1]==0: m.pop() # because trailing zeros can occur q1, q2, b = q2, m, 1 return [0] @public def find_simple_recurrence(v, A=Function('a'), N=Symbol('n')): """ Detects and returns a recurrence relation from a sequence of several integer (or rational) terms. The name of the function in the returned expression is 'a' by default; the main variable is 'n' by default. The smallest index in the returned expression is always n (and never n-1, n-2, etc.). Examples ======== >>> from sympy.concrete.guess import find_simple_recurrence >>> from sympy import fibonacci >>> find_simple_recurrence([fibonacci(k) for k in range(12)]) -a(n) - a(n + 1) + a(n + 2) >>> from sympy import Function, Symbol >>> a = [1, 1, 1] >>> for k in range(15): a.append(5*a[-1]-3*a[-2]+8*a[-3]) >>> find_simple_recurrence(a, A=Function('f'), N=Symbol('i')) -8*f(i) + 3*f(i + 1) - 5*f(i + 2) + f(i + 3) """ p = find_simple_recurrence_vector(v) n = len(p) if n <= 1: return Zero() rel = Zero() for k in range(n): rel += A(N+n-1-k)*p[k] return rel @public def rationalize(x, maxcoeff=10000): """ Helps identifying a rational number from a float (or mpmath.mpf) value by using a continued fraction. The algorithm stops as soon as a large partial quotient is detected (greater than 10000 by default). Examples ======== >>> from sympy.concrete.guess import rationalize >>> from mpmath import cos, pi >>> rationalize(cos(pi/3)) 1/2 >>> from mpmath import mpf >>> rationalize(mpf("0.333333333333333")) 1/3 While the function is rather intended to help 'identifying' rational values, it may be used in some cases for approximating real numbers. (Though other functions may be more relevant in that case.) >>> rationalize(pi, maxcoeff = 250) 355/113 See Also ======== Several other methods can approximate a real number as a rational, like: * fractions.Fraction.from_decimal * fractions.Fraction.from_float * mpmath.identify * mpmath.pslq by using the following syntax: mpmath.pslq([x, 1]) * mpmath.findpoly by using the following syntax: mpmath.findpoly(x, 1) * sympy.simplify.nsimplify (which is a more general function) The main difference between the current function and all these variants is that control focuses on magnitude of partial quotients here rather than on global precision of the approximation. If the real is "known to be" a rational number, the current function should be able to detect it correctly with the default settings even when denominator is great (unless its expansion contains unusually big partial quotients) which may occur when studying sequences of increasing numbers. If the user cares more on getting simple fractions, other methods may be more convenient. """ p0, p1 = 0, 1 q0, q1 = 1, 0 a = floor(x) while a < maxcoeff or q1==0: p = a*p1 + p0 q = a*q1 + q0 p0, p1 = p1, p q0, q1 = q1, q if x==a: break x = 1/(x-a) a = floor(x) return sympify(p) / q @public def guess_generating_function_rational(v, X=Symbol('x')): """ Tries to "guess" a rational generating function for a sequence of rational numbers v. Examples ======== >>> from sympy.concrete.guess import guess_generating_function_rational >>> from sympy import fibonacci >>> l = [fibonacci(k) for k in range(5,15)] >>> guess_generating_function_rational(l) (3*x + 5)/(-x**2 - x + 1) See Also ======== sympy.series.approximants mpmath.pade """ # a) compute the denominator as q q = find_simple_recurrence_vector(v) n = len(q) if n <= 1: return None # b) compute the numerator as p p = [sum(v[i-k]*q[k] for k in range(min(i+1, n))) for i in range(len(v)>>1)] return (sum(p[k]*X**k for k in range(len(p))) / sum(q[k]*X**k for k in range(n))) @public def guess_generating_function(v, X=Symbol('x'), types=['all'], maxsqrtn=2): """ Tries to "guess" a generating function for a sequence of rational numbers v. Only a few patterns are implemented yet. Explanation =========== The function returns a dictionary where keys are the name of a given type of generating function. Six types are currently implemented: type | formal definition -------+---------------------------------------------------------------- ogf | f(x) = Sum( a_k * x^k , k: 0..infinity ) egf | f(x) = Sum( a_k * x^k / k! , k: 0..infinity ) lgf | f(x) = Sum( (-1)^(k+1) a_k * x^k / k , k: 1..infinity ) | (with initial index being hold as 1 rather than 0) hlgf | f(x) = Sum( a_k * x^k / k , k: 1..infinity ) | (with initial index being hold as 1 rather than 0) lgdogf | f(x) = derivate( log(Sum( a_k * x^k, k: 0..infinity )), x) lgdegf | f(x) = derivate( log(Sum( a_k * x^k / k!, k: 0..infinity )), x) In order to spare time, the user can select only some types of generating functions (default being ['all']). While forgetting to use a list in the case of a single type may seem to work most of the time as in: types='ogf' this (convenient) syntax may lead to unexpected extra results in some cases. Discarding a type when calling the function does not mean that the type will not be present in the returned dictionary; it only means that no extra computation will be performed for that type, but the function may still add it in the result when it can be easily converted from another type. Two generating functions (lgdogf and lgdegf) are not even computed if the initial term of the sequence is 0; it may be useful in that case to try again after having removed the leading zeros. Examples ======== >>> from sympy.concrete.guess import guess_generating_function as ggf >>> ggf([k+1 for k in range(12)], types=['ogf', 'lgf', 'hlgf']) {'hlgf': 1/(1 - x), 'lgf': 1/(x + 1), 'ogf': 1/(x**2 - 2*x + 1)} >>> from sympy import sympify >>> l = sympify("[3/2, 11/2, 0, -121/2, -363/2, 121]") >>> ggf(l) {'ogf': (x + 3/2)/(11*x**2 - 3*x + 1)} >>> from sympy import fibonacci >>> ggf([fibonacci(k) for k in range(5, 15)], types=['ogf']) {'ogf': (3*x + 5)/(-x**2 - x + 1)} >>> from sympy import factorial >>> ggf([factorial(k) for k in range(12)], types=['ogf', 'egf', 'lgf']) {'egf': 1/(1 - x)} >>> ggf([k+1 for k in range(12)], types=['egf']) {'egf': (x + 1)*exp(x), 'lgdegf': (x + 2)/(x + 1)} N-th root of a rational function can also be detected (below is an example coming from the sequence A108626 from http://oeis.org). The greatest n-th root to be tested is specified as maxsqrtn (default 2). >>> ggf([1, 2, 5, 14, 41, 124, 383, 1200, 3799, 12122, 38919])['ogf'] sqrt(1/(x**4 + 2*x**2 - 4*x + 1)) References ========== .. [1] "Concrete Mathematics", R.L. Graham, D.E. Knuth, O. Patashnik .. [2] https://oeis.org/wiki/Generating_functions """ # List of all types of all g.f. known by the algorithm if 'all' in types: types = ['ogf', 'egf', 'lgf', 'hlgf', 'lgdogf', 'lgdegf'] result = {} # Ordinary Generating Function (ogf) if 'ogf' in types: # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(v))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*v[i] for i in range(n+1)) for n in range(len(v))] g = guess_generating_function_rational(t, X=X) if g: result['ogf'] = g**Rational(1, d+1) break # Exponential Generating Function (egf) if 'egf' in types: # Transform sequence (division by factorial) w, f = [], Integer(1) for i, k in enumerate(v): f *= i if i else 1 w.append(k/f) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['egf'] = g**Rational(1, d+1) break # Logarithmic Generating Function (lgf) if 'lgf' in types: # Transform sequence (multiplication by (-1)^(n+1) / n) w, f = [], Integer(-1) for i, k in enumerate(v): f = -f w.append(f*k/Integer(i+1)) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['lgf'] = g**Rational(1, d+1) break # Hyperbolic logarithmic Generating Function (hlgf) if 'hlgf' in types: # Transform sequence (division by n+1) w = [] for i, k in enumerate(v): w.append(k/Integer(i+1)) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['hlgf'] = g**Rational(1, d+1) break # Logarithmic derivative of ordinary generating Function (lgdogf) if v[0] != 0 and ('lgdogf' in types or ('ogf' in types and 'ogf' not in result)): # Transform sequence by computing f'(x)/f(x) # because log(f(x)) = integrate( f'(x)/f(x) ) a, w = sympify(v[0]), [] for n in range(len(v)-1): w.append( (v[n+1]*(n+1) - sum(w[-i-1]*v[i+1] for i in range(n)))/a) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['lgdogf'] = g**Rational(1, d+1) if 'ogf' not in result: result['ogf'] = exp(integrate(result['lgdogf'], X)) break # Logarithmic derivative of exponential generating Function (lgdegf) if v[0] != 0 and ('lgdegf' in types or ('egf' in types and 'egf' not in result)): # Transform sequence / step 1 (division by factorial) z, f = [], Integer(1) for i, k in enumerate(v): f *= i if i else 1 z.append(k/f) # Transform sequence / step 2 by computing f'(x)/f(x) # because log(f(x)) = integrate( f'(x)/f(x) ) a, w = z[0], [] for n in range(len(z)-1): w.append( (z[n+1]*(n+1) - sum(w[-i-1]*z[i+1] for i in range(n)))/a) # Perform some convolutions of the sequence with itself t = [1 if k==0 else 0 for k in range(len(w))] for d in range(max(1, maxsqrtn)): t = [sum(t[n-i]*w[i] for i in range(n+1)) for n in range(len(w))] g = guess_generating_function_rational(t, X=X) if g: result['lgdegf'] = g**Rational(1, d+1) if 'egf' not in result: result['egf'] = exp(integrate(result['lgdegf'], X)) break return result @public def guess(l, all=False, evaluate=True, niter=2, variables=None): """ This function is adapted from the Rate.m package for Mathematica written by Christian Krattenthaler. It tries to guess a formula from a given sequence of rational numbers. Explanation =========== In order to speed up the process, the 'all' variable is set to False by default, stopping the computation as some results are returned during an iteration; the variable can be set to True if more iterations are needed (other formulas may be found; however they may be equivalent to the first ones). Another option is the 'evaluate' variable (default is True); setting it to False will leave the involved products unevaluated. By default, the number of iterations is set to 2 but a greater value (up to len(l)-1) can be specified with the optional 'niter' variable. More and more convoluted results are found when the order of the iteration gets higher: * first iteration returns polynomial or rational functions; * second iteration returns products of rising factorials and their inverses; * third iteration returns products of products of rising factorials and their inverses; * etc. The returned formulas contain symbols i0, i1, i2, ... where the main variables is i0 (and auxiliary variables are i1, i2, ...). A list of other symbols can be provided in the 'variables' option; the length of the least should be the value of 'niter' (more is acceptable but only the first symbols will be used); in this case, the main variable will be the first symbol in the list. Examples ======== >>> from sympy.concrete.guess import guess >>> guess([1,2,6,24,120], evaluate=False) [Product(i1 + 1, (i1, 1, i0 - 1))] >>> from sympy import symbols >>> r = guess([1,2,7,42,429,7436,218348,10850216], niter=4) >>> i0 = symbols("i0") >>> [r[0].subs(i0,n).doit() for n in range(1,10)] [1, 2, 7, 42, 429, 7436, 218348, 10850216, 911835460] """ if any(a==0 for a in l[:-1]): return [] N = len(l) niter = min(N-1, niter) myprod = product if evaluate else Product g = [] res = [] if variables is None: symb = symbols('i:'+str(niter)) else: symb = variables for k, s in enumerate(symb): g.append(l) n, r = len(l), [] for i in range(n-2-1, -1, -1): ri = rinterp(enumerate(g[k][:-1], start=1), i, X=s) if ((denom(ri).subs({s:n}) != 0) and (ri.subs({s:n}) - g[k][-1] == 0) and ri not in r): r.append(ri) if r: for i in range(k-1, -1, -1): r = list(map(lambda v: g[i][0] * myprod(v, (symb[i+1], 1, symb[i]-1)), r)) if not all: return r res += r l = [Rational(l[i+1], l[i]) for i in range(N-k-1)] return res
6cf8d0beb52de1285045c245a0871256d05bc01669017af2341752fe35697819
from sympy.calculus.singularities import is_decreasing from sympy.calculus.util import AccumulationBounds from sympy.concrete.expr_with_limits import AddWithLimits from sympy.concrete.expr_with_intlimits import ExprWithIntLimits from sympy.concrete.gosper import gosper_sum from sympy.core.add import Add from sympy.core.function import Derivative from sympy.core.mul import Mul from sympy.core.relational import Eq from sympy.core.singleton import S from sympy.core.symbol import Dummy, Wild, Symbol from sympy.functions.special.zeta_functions import zeta from sympy.functions.elementary.piecewise import Piecewise from sympy.logic.boolalg import And from sympy.polys import apart, PolynomialError, together from sympy.series.limitseq import limit_seq from sympy.series.order import O from sympy.sets.sets import FiniteSet from sympy.simplify import denom from sympy.simplify.combsimp import combsimp from sympy.simplify.powsimp import powsimp from sympy.solvers import solve from sympy.solvers.solveset import solveset import itertools class Sum(AddWithLimits, ExprWithIntLimits): r""" Represents unevaluated summation. Explanation =========== ``Sum`` represents a finite or infinite series, with the first argument being the general form of terms in the series, and the second argument being ``(dummy_variable, start, end)``, with ``dummy_variable`` taking all integer values from ``start`` through ``end``. In accordance with long-standing mathematical convention, the end term is included in the summation. Finite sums =========== For finite sums (and sums with symbolic limits assumed to be finite) we follow the summation convention described by Karr [1], especially definition 3 of section 1.4. The sum: .. math:: \sum_{m \leq i < n} f(i) has *the obvious meaning* for `m < n`, namely: .. math:: \sum_{m \leq i < n} f(i) = f(m) + f(m+1) + \ldots + f(n-2) + f(n-1) with the upper limit value `f(n)` excluded. The sum over an empty set is zero if and only if `m = n`: .. math:: \sum_{m \leq i < n} f(i) = 0 \quad \mathrm{for} \quad m = n Finally, for all other sums over empty sets we assume the following definition: .. math:: \sum_{m \leq i < n} f(i) = - \sum_{n \leq i < m} f(i) \quad \mathrm{for} \quad m > n It is important to note that Karr defines all sums with the upper limit being exclusive. This is in contrast to the usual mathematical notation, but does not affect the summation convention. Indeed we have: .. math:: \sum_{m \leq i < n} f(i) = \sum_{i = m}^{n - 1} f(i) where the difference in notation is intentional to emphasize the meaning, with limits typeset on the top being inclusive. Examples ======== >>> from sympy.abc import i, k, m, n, x >>> from sympy import Sum, factorial, oo, IndexedBase, Function >>> Sum(k, (k, 1, m)) Sum(k, (k, 1, m)) >>> Sum(k, (k, 1, m)).doit() m**2/2 + m/2 >>> Sum(k**2, (k, 1, m)) Sum(k**2, (k, 1, m)) >>> Sum(k**2, (k, 1, m)).doit() m**3/3 + m**2/2 + m/6 >>> Sum(x**k, (k, 0, oo)) Sum(x**k, (k, 0, oo)) >>> Sum(x**k, (k, 0, oo)).doit() Piecewise((1/(1 - x), Abs(x) < 1), (Sum(x**k, (k, 0, oo)), True)) >>> Sum(x**k/factorial(k), (k, 0, oo)).doit() exp(x) Here are examples to do summation with symbolic indices. You can use either Function of IndexedBase classes: >>> f = Function('f') >>> Sum(f(n), (n, 0, 3)).doit() f(0) + f(1) + f(2) + f(3) >>> Sum(f(n), (n, 0, oo)).doit() Sum(f(n), (n, 0, oo)) >>> f = IndexedBase('f') >>> Sum(f[n]**2, (n, 0, 3)).doit() f[0]**2 + f[1]**2 + f[2]**2 + f[3]**2 An example showing that the symbolic result of a summation is still valid for seemingly nonsensical values of the limits. Then the Karr convention allows us to give a perfectly valid interpretation to those sums by interchanging the limits according to the above rules: >>> S = Sum(i, (i, 1, n)).doit() >>> S n**2/2 + n/2 >>> S.subs(n, -4) 6 >>> Sum(i, (i, 1, -4)).doit() 6 >>> Sum(-i, (i, -3, 0)).doit() 6 An explicit example of the Karr summation convention: >>> S1 = Sum(i**2, (i, m, m+n-1)).doit() >>> S1 m**2*n + m*n**2 - m*n + n**3/3 - n**2/2 + n/6 >>> S2 = Sum(i**2, (i, m+n, m-1)).doit() >>> S2 -m**2*n - m*n**2 + m*n - n**3/3 + n**2/2 - n/6 >>> S1 + S2 0 >>> S3 = Sum(i, (i, m, m-1)).doit() >>> S3 0 See Also ======== summation Product, sympy.concrete.products.product References ========== .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM, Volume 28 Issue 2, April 1981, Pages 305-350 http://dl.acm.org/citation.cfm?doid=322248.322255 .. [2] https://en.wikipedia.org/wiki/Summation#Capital-sigma_notation .. [3] https://en.wikipedia.org/wiki/Empty_sum """ __slots__ = ('is_commutative',) def __new__(cls, function, *symbols, **assumptions): obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions) if not hasattr(obj, 'limits'): return obj if any(len(l) != 3 or None in l for l in obj.limits): raise ValueError('Sum requires values for lower and upper bounds.') return obj def _eval_is_zero(self): # a Sum is only zero if its function is zero or if all terms # cancel out. This only answers whether the summand is zero; if # not then None is returned since we don't analyze whether all # terms cancel out. if self.function.is_zero or self.has_empty_sequence: return True def _eval_is_extended_real(self): if self.has_empty_sequence: return True return self.function.is_extended_real def _eval_is_positive(self): if self.has_finite_limits and self.has_reversed_limits is False: return self.function.is_positive def _eval_is_negative(self): if self.has_finite_limits and self.has_reversed_limits is False: return self.function.is_negative def _eval_is_finite(self): if self.has_finite_limits and self.function.is_finite: return True def doit(self, **hints): if hints.get('deep', True): f = self.function.doit(**hints) else: f = self.function # first make sure any definite limits have summation # variables with matching assumptions reps = {} for xab in self.limits: d = _dummy_with_inherited_properties_concrete(xab) if d: reps[xab[0]] = d if reps: undo = {v: k for k, v in reps.items()} did = self.xreplace(reps).doit(**hints) if type(did) is tuple: # when separate=True did = tuple([i.xreplace(undo) for i in did]) elif did is not None: did = did.xreplace(undo) else: did = self return did if self.function.is_Matrix: expanded = self.expand() if self != expanded: return expanded.doit() return _eval_matrix_sum(self) for n, limit in enumerate(self.limits): i, a, b = limit dif = b - a if dif == -1: # Any summation over an empty set is zero return S.Zero if dif.is_integer and dif.is_negative: a, b = b + 1, a - 1 f = -f newf = eval_sum(f, (i, a, b)) if newf is None: if f == self.function: zeta_function = self.eval_zeta_function(f, (i, a, b)) if zeta_function is not None: return zeta_function return self else: return self.func(f, *self.limits[n:]) f = newf if hints.get('deep', True): # eval_sum could return partially unevaluated # result with Piecewise. In this case we won't # doit() recursively. if not isinstance(f, Piecewise): return f.doit(**hints) return f def eval_zeta_function(self, f, limits): """ Check whether the function matches with the zeta function. If it matches, then return a `Piecewise` expression because zeta function does not converge unless `s > 1` and `q > 0` """ i, a, b = limits w, y, z = Wild('w', exclude=[i]), Wild('y', exclude=[i]), Wild('z', exclude=[i]) result = f.match((w * i + y) ** (-z)) if result is not None and b is S.Infinity: coeff = 1 / result[w] ** result[z] s = result[z] q = result[y] / result[w] + a return Piecewise((coeff * zeta(s, q), And(q > 0, s > 1)), (self, True)) def _eval_derivative(self, x): """ Differentiate wrt x as long as x is not in the free symbols of any of the upper or lower limits. Explanation =========== Sum(a*b*x, (x, 1, a)) can be differentiated wrt x or b but not `a` since the value of the sum is discontinuous in `a`. In a case involving a limit variable, the unevaluated derivative is returned. """ # diff already confirmed that x is in the free symbols of self, but we # don't want to differentiate wrt any free symbol in the upper or lower # limits # XXX remove this test for free_symbols when the default _eval_derivative is in if isinstance(x, Symbol) and x not in self.free_symbols: return S.Zero # get limits and the function f, limits = self.function, list(self.limits) limit = limits.pop(-1) if limits: # f is the argument to a Sum f = self.func(f, *limits) _, a, b = limit if x in a.free_symbols or x in b.free_symbols: return None df = Derivative(f, x, evaluate=True) rv = self.func(df, limit) return rv def _eval_difference_delta(self, n, step): k, _, upper = self.args[-1] new_upper = upper.subs(n, n + step) if len(self.args) == 2: f = self.args[0] else: f = self.func(*self.args[:-1]) return Sum(f, (k, upper + 1, new_upper)).doit() def _eval_simplify(self, **kwargs): from sympy.simplify.simplify import factor_sum, sum_combine from sympy.core.function import expand from sympy.core.mul import Mul # split the function into adds terms = Add.make_args(expand(self.function)) s_t = [] # Sum Terms o_t = [] # Other Terms for term in terms: if term.has(Sum): # if there is an embedded sum here # it is of the form x * (Sum(whatever)) # hence we make a Mul out of it, and simplify all interior sum terms subterms = Mul.make_args(expand(term)) out_terms = [] for subterm in subterms: # go through each term if isinstance(subterm, Sum): # if it's a sum, simplify it out_terms.append(subterm._eval_simplify()) else: # otherwise, add it as is out_terms.append(subterm) # turn it back into a Mul s_t.append(Mul(*out_terms)) else: o_t.append(term) # next try to combine any interior sums for further simplification result = Add(sum_combine(s_t), *o_t) return factor_sum(result, limits=self.limits) def is_convergent(self): r""" Checks for the convergence of a Sum. Explanation =========== We divide the study of convergence of infinite sums and products in two parts. First Part: One part is the question whether all the terms are well defined, i.e., they are finite in a sum and also non-zero in a product. Zero is the analogy of (minus) infinity in products as :math:`e^{-\infty} = 0`. Second Part: The second part is the question of convergence after infinities, and zeros in products, have been omitted assuming that their number is finite. This means that we only consider the tail of the sum or product, starting from some point after which all terms are well defined. For example, in a sum of the form: .. math:: \sum_{1 \leq i < \infty} \frac{1}{n^2 + an + b} where a and b are numbers. The routine will return true, even if there are infinities in the term sequence (at most two). An analogous product would be: .. math:: \prod_{1 \leq i < \infty} e^{\frac{1}{n^2 + an + b}} This is how convergence is interpreted. It is concerned with what happens at the limit. Finding the bad terms is another independent matter. Note: It is responsibility of user to see that the sum or product is well defined. There are various tests employed to check the convergence like divergence test, root test, integral test, alternating series test, comparison tests, Dirichlet tests. It returns true if Sum is convergent and false if divergent and NotImplementedError if it can not be checked. References ========== .. [1] https://en.wikipedia.org/wiki/Convergence_tests Examples ======== >>> from sympy import factorial, S, Sum, Symbol, oo >>> n = Symbol('n', integer=True) >>> Sum(n/(n - 1), (n, 4, 7)).is_convergent() True >>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent() False >>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent() False >>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent() True See Also ======== Sum.is_absolutely_convergent() sympy.concrete.products.Product.is_convergent() """ from sympy import Interval, Integral, log, symbols, simplify p, q, r = symbols('p q r', cls=Wild) sym = self.limits[0][0] lower_limit = self.limits[0][1] upper_limit = self.limits[0][2] sequence_term = self.function.simplify() if len(sequence_term.free_symbols) > 1: raise NotImplementedError("convergence checking for more than one symbol " "containing series is not handled") if lower_limit.is_finite and upper_limit.is_finite: return S.true # transform sym -> -sym and swap the upper_limit = S.Infinity # and lower_limit = - upper_limit if lower_limit is S.NegativeInfinity: if upper_limit is S.Infinity: return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \ Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent() sequence_term = simplify(sequence_term.xreplace({sym: -sym})) lower_limit = -upper_limit upper_limit = S.Infinity sym_ = Dummy(sym.name, integer=True, positive=True) sequence_term = sequence_term.xreplace({sym: sym_}) sym = sym_ interval = Interval(lower_limit, upper_limit) # Piecewise function handle if sequence_term.is_Piecewise: for func, cond in sequence_term.args: # see if it represents something going to oo if cond == True or cond.as_set().sup is S.Infinity: s = Sum(func, (sym, lower_limit, upper_limit)) return s.is_convergent() return S.true ### -------- Divergence test ----------- ### try: lim_val = limit_seq(sequence_term, sym) if lim_val is not None and lim_val.is_zero is False: return S.false except NotImplementedError: pass try: lim_val_abs = limit_seq(abs(sequence_term), sym) if lim_val_abs is not None and lim_val_abs.is_zero is False: return S.false except NotImplementedError: pass order = O(sequence_term, (sym, S.Infinity)) ### --------- p-series test (1/n**p) ---------- ### p_series_test = order.expr.match(sym**p) if p_series_test is not None: if p_series_test[p] < -1: return S.true if p_series_test[p] >= -1: return S.false ### ------------- comparison test ------------- ### # 1/(n**p*log(n)**q*log(log(n))**r) comparison n_log_test = order.expr.match(1/(sym**p*log(sym)**q*log(log(sym))**r)) if n_log_test is not None: if (n_log_test[p] > 1 or (n_log_test[p] == 1 and n_log_test[q] > 1) or (n_log_test[p] == n_log_test[q] == 1 and n_log_test[r] > 1)): return S.true return S.false ### ------------- Limit comparison test -----------### # (1/n) comparison try: lim_comp = limit_seq(sym*sequence_term, sym) if lim_comp is not None and lim_comp.is_number and lim_comp > 0: return S.false except NotImplementedError: pass ### ----------- ratio test ---------------- ### next_sequence_term = sequence_term.xreplace({sym: sym + 1}) ratio = combsimp(powsimp(next_sequence_term/sequence_term)) try: lim_ratio = limit_seq(ratio, sym) if lim_ratio is not None and lim_ratio.is_number: if abs(lim_ratio) > 1: return S.false if abs(lim_ratio) < 1: return S.true except NotImplementedError: lim_ratio = None ### ---------- Raabe's test -------------- ### if lim_ratio == 1: # ratio test inconclusive test_val = sym*(sequence_term/ sequence_term.subs(sym, sym + 1) - 1) test_val = test_val.gammasimp() try: lim_val = limit_seq(test_val, sym) if lim_val is not None and lim_val.is_number: if lim_val > 1: return S.true if lim_val < 1: return S.false except NotImplementedError: pass ### ----------- root test ---------------- ### # lim = Limit(abs(sequence_term)**(1/sym), sym, S.Infinity) try: lim_evaluated = limit_seq(abs(sequence_term)**(1/sym), sym) if lim_evaluated is not None and lim_evaluated.is_number: if lim_evaluated < 1: return S.true if lim_evaluated > 1: return S.false except NotImplementedError: pass ### ------------- alternating series test ----------- ### dict_val = sequence_term.match((-1)**(sym + p)*q) if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval): return S.true ### ------------- integral test -------------- ### check_interval = None maxima = solveset(sequence_term.diff(sym), sym, interval) if not maxima: check_interval = interval elif isinstance(maxima, FiniteSet) and maxima.sup.is_number: check_interval = Interval(maxima.sup, interval.sup) if (check_interval is not None and (is_decreasing(sequence_term, check_interval) or is_decreasing(-sequence_term, check_interval))): integral_val = Integral( sequence_term, (sym, lower_limit, upper_limit)) try: integral_val_evaluated = integral_val.doit() if integral_val_evaluated.is_number: return S(integral_val_evaluated.is_finite) except NotImplementedError: pass ### ----- Dirichlet and bounded times convergent tests ----- ### # TODO # # Dirichlet_test # https://en.wikipedia.org/wiki/Dirichlet%27s_test # # Bounded times convergent test # It is based on comparison theorems for series. # In particular, if the general term of a series can # be written as a product of two terms a_n and b_n # and if a_n is bounded and if Sum(b_n) is absolutely # convergent, then the original series Sum(a_n * b_n) # is absolutely convergent and so convergent. # # The following code can grows like 2**n where n is the # number of args in order.expr # Possibly combined with the potentially slow checks # inside the loop, could make this test extremely slow # for larger summation expressions. if order.expr.is_Mul: args = order.expr.args argset = set(args) ### -------------- Dirichlet tests -------------- ### m = Dummy('m', integer=True) def _dirichlet_test(g_n): try: ing_val = limit_seq(Sum(g_n, (sym, interval.inf, m)).doit(), m) if ing_val is not None and ing_val.is_finite: return S.true except NotImplementedError: pass ### -------- bounded times convergent test ---------### def _bounded_convergent_test(g1_n, g2_n): try: lim_val = limit_seq(g1_n, sym) if lim_val is not None and (lim_val.is_finite or ( isinstance(lim_val, AccumulationBounds) and (lim_val.max - lim_val.min).is_finite)): if Sum(g2_n, (sym, lower_limit, upper_limit)).is_absolutely_convergent(): return S.true except NotImplementedError: pass for n in range(1, len(argset)): for a_tuple in itertools.combinations(args, n): b_set = argset - set(a_tuple) a_n = Mul(*a_tuple) b_n = Mul(*b_set) if is_decreasing(a_n, interval): dirich = _dirichlet_test(b_n) if dirich is not None: return dirich bc_test = _bounded_convergent_test(a_n, b_n) if bc_test is not None: return bc_test _sym = self.limits[0][0] sequence_term = sequence_term.xreplace({sym: _sym}) raise NotImplementedError("The algorithm to find the Sum convergence of %s " "is not yet implemented" % (sequence_term)) def is_absolutely_convergent(self): """ Checks for the absolute convergence of an infinite series. Same as checking convergence of absolute value of sequence_term of an infinite series. References ========== .. [1] https://en.wikipedia.org/wiki/Absolute_convergence Examples ======== >>> from sympy import Sum, Symbol, oo >>> n = Symbol('n', integer=True) >>> Sum((-1)**n, (n, 1, oo)).is_absolutely_convergent() False >>> Sum((-1)**n/n**2, (n, 1, oo)).is_absolutely_convergent() True See Also ======== Sum.is_convergent() """ return Sum(abs(self.function), self.limits).is_convergent() def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True): """ Return an Euler-Maclaurin approximation of self, where m is the number of leading terms to sum directly and n is the number of terms in the tail. With m = n = 0, this is simply the corresponding integral plus a first-order endpoint correction. Returns (s, e) where s is the Euler-Maclaurin approximation and e is the estimated error (taken to be the magnitude of the first omitted term in the tail): >>> from sympy.abc import k, a, b >>> from sympy import Sum >>> Sum(1/k, (k, 2, 5)).doit().evalf() 1.28333333333333 >>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin() >>> s -log(2) + 7/20 + log(5) >>> from sympy import sstr >>> print(sstr((s.evalf(), e.evalf()), full_prec=True)) (1.26629073187415, 0.0175000000000000) The endpoints may be symbolic: >>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin() >>> s -log(a) + log(b) + 1/(2*b) + 1/(2*a) >>> e Abs(1/(12*b**2) - 1/(12*a**2)) If the function is a polynomial of degree at most 2n+1, the Euler-Maclaurin formula becomes exact (and e = 0 is returned): >>> Sum(k, (k, 2, b)).euler_maclaurin() (b**2/2 + b/2 - 1, 0) >>> Sum(k, (k, 2, b)).doit() b**2/2 + b/2 - 1 With a nonzero eps specified, the summation is ended as soon as the remainder term is less than the epsilon. """ from sympy.functions import bernoulli, factorial from sympy.integrals import Integral m = int(m) n = int(n) f = self.function if len(self.limits) != 1: raise ValueError("More than 1 limit") i, a, b = self.limits[0] if (a > b) == True: if a - b == 1: return S.Zero, S.Zero a, b = b + 1, a - 1 f = -f s = S.Zero if m: if b.is_Integer and a.is_Integer: m = min(m, b - a + 1) if not eps or f.is_polynomial(i): for k in range(m): s += f.subs(i, a + k) else: term = f.subs(i, a) if term: test = abs(term.evalf(3)) < eps if test == True: return s, abs(term) elif not (test == False): # a symbolic Relational class, can't go further return term, S.Zero s += term for k in range(1, m): term = f.subs(i, a + k) if abs(term.evalf(3)) < eps and term != 0: return s, abs(term) s += term if b - a + 1 == m: return s, S.Zero a += m x = Dummy('x') I = Integral(f.subs(i, x), (x, a, b)) if eval_integral: I = I.doit() s += I def fpoint(expr): if b is S.Infinity: return expr.subs(i, a), 0 return expr.subs(i, a), expr.subs(i, b) fa, fb = fpoint(f) iterm = (fa + fb)/2 g = f.diff(i) for k in range(1, n + 2): ga, gb = fpoint(g) term = bernoulli(2*k)/factorial(2*k)*(gb - ga) if (eps and term and abs(term.evalf(3)) < eps) or (k > n): break s += term g = g.diff(i, 2, simplify=False) return s + iterm, abs(term) def reverse_order(self, *indices): """ Reverse the order of a limit in a Sum. Explanation =========== ``reverse_order(self, *indices)`` reverses some limits in the expression ``self`` which can be either a ``Sum`` or a ``Product``. The selectors in the argument ``indices`` specify some indices whose limits get reversed. These selectors are either variable names or numerical indices counted starting from the inner-most limit tuple. Examples ======== >>> from sympy import Sum >>> from sympy.abc import x, y, a, b, c, d >>> Sum(x, (x, 0, 3)).reverse_order(x) Sum(-x, (x, 4, -1)) >>> Sum(x*y, (x, 1, 5), (y, 0, 6)).reverse_order(x, y) Sum(x*y, (x, 6, 0), (y, 7, -1)) >>> Sum(x, (x, a, b)).reverse_order(x) Sum(-x, (x, b + 1, a - 1)) >>> Sum(x, (x, a, b)).reverse_order(0) Sum(-x, (x, b + 1, a - 1)) While one should prefer variable names when specifying which limits to reverse, the index counting notation comes in handy in case there are several symbols with the same name. >>> S = Sum(x**2, (x, a, b), (x, c, d)) >>> S Sum(x**2, (x, a, b), (x, c, d)) >>> S0 = S.reverse_order(0) >>> S0 Sum(-x**2, (x, b + 1, a - 1), (x, c, d)) >>> S1 = S0.reverse_order(1) >>> S1 Sum(x**2, (x, b + 1, a - 1), (x, d + 1, c - 1)) Of course we can mix both notations: >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1) Sum(x*y, (x, b + 1, a - 1), (y, 6, 1)) >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x) Sum(x*y, (x, b + 1, a - 1), (y, 6, 1)) See Also ======== sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index, reorder_limit, sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder References ========== .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM, Volume 28 Issue 2, April 1981, Pages 305-350 http://dl.acm.org/citation.cfm?doid=322248.322255 """ l_indices = list(indices) for i, indx in enumerate(l_indices): if not isinstance(indx, int): l_indices[i] = self.index(indx) e = 1 limits = [] for i, limit in enumerate(self.limits): l = limit if i in l_indices: e = -e l = (limit[0], limit[2] + 1, limit[1] - 1) limits.append(l) return Sum(e * self.function, *limits) def summation(f, *symbols, **kwargs): r""" Compute the summation of f with respect to symbols. Explanation =========== The notation for symbols is similar to the notation used in Integral. summation(f, (i, a, b)) computes the sum of f with respect to i from a to b, i.e., :: b ____ \ ` summation(f, (i, a, b)) = ) f /___, i = a If it cannot compute the sum, it returns an unevaluated Sum object. Repeated sums can be computed by introducing additional symbols tuples:: Examples ======== >>> from sympy import summation, oo, symbols, log >>> i, n, m = symbols('i n m', integer=True) >>> summation(2*i - 1, (i, 1, n)) n**2 >>> summation(1/2**i, (i, 0, oo)) 2 >>> summation(1/log(n)**n, (n, 2, oo)) Sum(log(n)**(-n), (n, 2, oo)) >>> summation(i, (i, 0, n), (n, 0, m)) m**3/6 + m**2/2 + m/3 >>> from sympy.abc import x >>> from sympy import factorial >>> summation(x**n/factorial(n), (n, 0, oo)) exp(x) See Also ======== Sum Product, sympy.concrete.products.product """ return Sum(f, *symbols, **kwargs).doit(deep=False) def telescopic_direct(L, R, n, limits): """ Returns the direct summation of the terms of a telescopic sum Explanation =========== L is the term with lower index R is the term with higher index n difference between the indexes of L and R Examples ======== >>> from sympy.concrete.summations import telescopic_direct >>> from sympy.abc import k, a, b >>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b)) -1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a """ (i, a, b) = limits s = 0 for m in range(n): s += L.subs(i, a + m) + R.subs(i, b - m) return s def telescopic(L, R, limits): ''' Tries to perform the summation using the telescopic property. Return None if not possible. ''' (i, a, b) = limits if L.is_Add or R.is_Add: return None # We want to solve(L.subs(i, i + m) + R, m) # First we try a simple match since this does things that # solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails k = Wild("k") sol = (-R).match(L.subs(i, i + k)) s = None if sol and k in sol: s = sol[k] if not (s.is_Integer and L.subs(i, i + s) == -R): # sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x})) s = None # But there are things that match doesn't do that solve # can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1 if s is None: m = Dummy('m') try: sol = solve(L.subs(i, i + m) + R, m) or [] except NotImplementedError: return None sol = [si for si in sol if si.is_Integer and (L.subs(i, i + si) + R).expand().is_zero] if len(sol) != 1: return None s = sol[0] if s < 0: return telescopic_direct(R, L, abs(s), (i, a, b)) elif s > 0: return telescopic_direct(L, R, s, (i, a, b)) def eval_sum(f, limits): from sympy.concrete.delta import deltasummation, _has_simple_delta from sympy.functions import KroneckerDelta (i, a, b) = limits if f.is_zero: return S.Zero if i not in f.free_symbols: return f*(b - a + 1) if a == b: return f.subs(i, a) if isinstance(f, Piecewise): if not any(i in arg.args[1].free_symbols for arg in f.args): # Piecewise conditions do not depend on the dummy summation variable, # therefore we can fold: Sum(Piecewise((e, c), ...), limits) # --> Piecewise((Sum(e, limits), c), ...) newargs = [] for arg in f.args: newexpr = eval_sum(arg.expr, limits) if newexpr is None: return None newargs.append((newexpr, arg.cond)) return f.func(*newargs) if f.has(KroneckerDelta): f = f.replace( lambda x: isinstance(x, Sum), lambda x: x.factor() ) if _has_simple_delta(f, limits[0]): return deltasummation(f, limits) dif = b - a definite = dif.is_Integer # Doing it directly may be faster if there are very few terms. if definite and (dif < 100): return eval_sum_direct(f, (i, a, b)) if isinstance(f, Piecewise): return None # Try to do it symbolically. Even when the number of terms is known, # this can save time when b-a is big. # We should try to transform to partial fractions value = eval_sum_symbolic(f.expand(), (i, a, b)) if value is not None: return value # Do it directly if definite: return eval_sum_direct(f, (i, a, b)) def eval_sum_direct(expr, limits): """ Evaluate expression directly, but perform some simple checks first to possibly result in a smaller expression and faster execution. """ from sympy.core import Add (i, a, b) = limits dif = b - a # Linearity if expr.is_Mul: # Try factor out everything not including i without_i, with_i = expr.as_independent(i) if without_i != 1: s = eval_sum_direct(with_i, (i, a, b)) if s: r = without_i*s if r is not S.NaN: return r else: # Try term by term L, R = expr.as_two_terms() if not L.has(i): sR = eval_sum_direct(R, (i, a, b)) if sR: return L*sR if not R.has(i): sL = eval_sum_direct(L, (i, a, b)) if sL: return sL*R try: expr = apart(expr, i) # see if it becomes an Add except PolynomialError: pass if expr.is_Add: # Try factor out everything not including i without_i, with_i = expr.as_independent(i) if without_i != 0: s = eval_sum_direct(with_i, (i, a, b)) if s: r = without_i*(dif + 1) + s if r is not S.NaN: return r else: # Try term by term L, R = expr.as_two_terms() lsum = eval_sum_direct(L, (i, a, b)) rsum = eval_sum_direct(R, (i, a, b)) if None not in (lsum, rsum): r = lsum + rsum if r is not S.NaN: return r return Add(*[expr.subs(i, a + j) for j in range(dif + 1)]) def eval_sum_symbolic(f, limits): from sympy.functions import harmonic, bernoulli f_orig = f (i, a, b) = limits if not f.has(i): return f*(b - a + 1) # Linearity if f.is_Mul: # Try factor out everything not including i without_i, with_i = f.as_independent(i) if without_i != 1: s = eval_sum_symbolic(with_i, (i, a, b)) if s: r = without_i*s if r is not S.NaN: return r else: # Try term by term L, R = f.as_two_terms() if not L.has(i): sR = eval_sum_symbolic(R, (i, a, b)) if sR: return L*sR if not R.has(i): sL = eval_sum_symbolic(L, (i, a, b)) if sL: return sL*R try: f = apart(f, i) # see if it becomes an Add except PolynomialError: pass if f.is_Add: L, R = f.as_two_terms() lrsum = telescopic(L, R, (i, a, b)) if lrsum: return lrsum # Try factor out everything not including i without_i, with_i = f.as_independent(i) if without_i != 0: s = eval_sum_symbolic(with_i, (i, a, b)) if s: r = without_i*(b - a + 1) + s if r is not S.NaN: return r else: # Try term by term lsum = eval_sum_symbolic(L, (i, a, b)) rsum = eval_sum_symbolic(R, (i, a, b)) if None not in (lsum, rsum): r = lsum + rsum if r is not S.NaN: return r # Polynomial terms with Faulhaber's formula n = Wild('n') result = f.match(i**n) if result is not None: n = result[n] if n.is_Integer: if n >= 0: if (b is S.Infinity and not a is S.NegativeInfinity) or \ (a is S.NegativeInfinity and not b is S.Infinity): return S.Infinity return ((bernoulli(n + 1, b + 1) - bernoulli(n + 1, a))/(n + 1)).expand() elif a.is_Integer and a >= 1: if n == -1: return harmonic(b) - harmonic(a - 1) else: return harmonic(b, abs(n)) - harmonic(a - 1, abs(n)) if not (a.has(S.Infinity, S.NegativeInfinity) or b.has(S.Infinity, S.NegativeInfinity)): # Geometric terms c1 = Wild('c1', exclude=[i]) c2 = Wild('c2', exclude=[i]) c3 = Wild('c3', exclude=[i]) wexp = Wild('wexp') # Here we first attempt powsimp on f for easier matching with the # exponential pattern, and attempt expansion on the exponent for easier # matching with the linear pattern. e = f.powsimp().match(c1 ** wexp) if e is not None: e_exp = e.pop(wexp).expand().match(c2*i + c3) if e_exp is not None: e.update(e_exp) if e is not None: p = (c1**c3).subs(e) q = (c1**c2).subs(e) r = p*(q**a - q**(b + 1))/(1 - q) l = p*(b - a + 1) return Piecewise((l, Eq(q, S.One)), (r, True)) r = gosper_sum(f, (i, a, b)) if isinstance(r, (Mul,Add)): from sympy import ordered, Tuple non_limit = r.free_symbols - Tuple(*limits[1:]).free_symbols den = denom(together(r)) den_sym = non_limit & den.free_symbols args = [] for v in ordered(den_sym): try: s = solve(den, v) m = Eq(v, s[0]) if s else S.false if m != False: args.append((Sum(f_orig.subs(*m.args), limits).doit(), m)) break except NotImplementedError: continue args.append((r, True)) return Piecewise(*args) if not r in (None, S.NaN): return r h = eval_sum_hyper(f_orig, (i, a, b)) if h is not None: return h factored = f_orig.factor() if factored != f_orig: return eval_sum_symbolic(factored, (i, a, b)) def _eval_sum_hyper(f, i, a): """ Returns (res, cond). Sums from a to oo. """ from sympy.functions import hyper from sympy.simplify import hyperexpand, hypersimp, fraction, simplify from sympy.polys.polytools import Poly, factor from sympy.core.numbers import Float if a != 0: return _eval_sum_hyper(f.subs(i, i + a), i, 0) if f.subs(i, 0) == 0: if simplify(f.subs(i, Dummy('i', integer=True, positive=True))) == 0: return S.Zero, True return _eval_sum_hyper(f.subs(i, i + 1), i, 0) hs = hypersimp(f, i) if hs is None: return None if isinstance(hs, Float): from sympy.simplify.simplify import nsimplify hs = nsimplify(hs) numer, denom = fraction(factor(hs)) top, topl = numer.as_coeff_mul(i) bot, botl = denom.as_coeff_mul(i) ab = [top, bot] factors = [topl, botl] params = [[], []] for k in range(2): for fac in factors[k]: mul = 1 if fac.is_Pow: mul = fac.exp fac = fac.base if not mul.is_Integer: return None p = Poly(fac, i) if p.degree() != 1: return None m, n = p.all_coeffs() ab[k] *= m**mul params[k] += [n/m]*mul # Add "1" to numerator parameters, to account for implicit n! in # hypergeometric series. ap = params[0] + [1] bq = params[1] x = ab[0]/ab[1] h = hyper(ap, bq, x) f = combsimp(f) return f.subs(i, 0)*hyperexpand(h), h.convergence_statement def eval_sum_hyper(f, i_a_b): from sympy.logic.boolalg import And i, a, b = i_a_b if (b - a).is_Integer: # We are never going to do better than doing the sum in the obvious way return None old_sum = Sum(f, (i, a, b)) if b != S.Infinity: if a is S.NegativeInfinity: res = _eval_sum_hyper(f.subs(i, -i), i, -b) if res is not None: return Piecewise(res, (old_sum, True)) else: res1 = _eval_sum_hyper(f, i, a) res2 = _eval_sum_hyper(f, i, b + 1) if res1 is None or res2 is None: return None (res1, cond1), (res2, cond2) = res1, res2 cond = And(cond1, cond2) if cond == False: return None return Piecewise((res1 - res2, cond), (old_sum, True)) if a is S.NegativeInfinity: res1 = _eval_sum_hyper(f.subs(i, -i), i, 1) res2 = _eval_sum_hyper(f, i, 0) if res1 is None or res2 is None: return None res1, cond1 = res1 res2, cond2 = res2 cond = And(cond1, cond2) if cond == False or cond.as_set() == S.EmptySet: return None return Piecewise((res1 + res2, cond), (old_sum, True)) # Now b == oo, a != -oo res = _eval_sum_hyper(f, i, a) if res is not None: r, c = res if c == False: if r.is_number: f = f.subs(i, Dummy('i', integer=True, positive=True) + a) if f.is_positive or f.is_zero: return S.Infinity elif f.is_negative: return S.NegativeInfinity return None return Piecewise(res, (old_sum, True)) def _eval_matrix_sum(expression): f = expression.function for n, limit in enumerate(expression.limits): i, a, b = limit dif = b - a if dif.is_Integer: if (dif < 0) == True: a, b = b + 1, a - 1 f = -f newf = eval_sum_direct(f, (i, a, b)) if newf is not None: return newf.doit() def _dummy_with_inherited_properties_concrete(limits): """ Return a Dummy symbol that inherits as many assumptions as possible from the provided symbol and limits. If the symbol already has all True assumption shared by the limits then return None. """ x, a, b = limits l = [a, b] assumptions_to_consider = ['extended_nonnegative', 'nonnegative', 'extended_nonpositive', 'nonpositive', 'extended_positive', 'positive', 'extended_negative', 'negative', 'integer', 'rational', 'finite', 'zero', 'real', 'extended_real'] assumptions_to_keep = {} assumptions_to_add = {} for assum in assumptions_to_consider: assum_true = x._assumptions.get(assum, None) if assum_true: assumptions_to_keep[assum] = True elif all([getattr(i, 'is_' + assum) for i in l]): assumptions_to_add[assum] = True if assumptions_to_add: assumptions_to_keep.update(assumptions_to_add) return Dummy('d', **assumptions_to_keep)
c7bfc7bf74f8573cf3d92ee200c6a18e04f31654e71b06c22123d2255e15ce5d
from sympy import Integer from sympy.core import Symbol from sympy.utilities import public @public def approximants(l, X=Symbol('x'), simplify=False): """ Return a generator for consecutive Pade approximants for a series. It can also be used for computing the rational generating function of a series when possible, since the last approximant returned by the generator will be the generating function (if any). Explanation =========== The input list can contain more complex expressions than integer or rational numbers; symbols may also be involved in the computation. An example below show how to compute the generating function of the whole Pascal triangle. The generator can be asked to apply the sympy.simplify function on each generated term, which will make the computation slower; however it may be useful when symbols are involved in the expressions. Examples ======== >>> from sympy.series import approximants >>> from sympy import lucas, fibonacci, symbols, binomial >>> g = [lucas(k) for k in range(16)] >>> [e for e in approximants(g)] [2, -4/(x - 2), (5*x - 2)/(3*x - 1), (x - 2)/(x**2 + x - 1)] >>> h = [fibonacci(k) for k in range(16)] >>> [e for e in approximants(h)] [x, -x/(x - 1), (x**2 - x)/(2*x - 1), -x/(x**2 + x - 1)] >>> x, t = symbols("x,t") >>> p=[sum(binomial(k,i)*x**i for i in range(k+1)) for k in range(16)] >>> y = approximants(p, t) >>> for k in range(3): print(next(y)) 1 (x + 1)/((-x - 1)*(t*(x + 1) + (x + 1)/(-x - 1))) nan >>> y = approximants(p, t, simplify=True) >>> for k in range(3): print(next(y)) 1 -1/(t*(x + 1) - 1) nan See Also ======== See function sympy.concrete.guess.guess_generating_function_rational and function mpmath.pade """ p1, q1 = [Integer(1)], [Integer(0)] p2, q2 = [Integer(0)], [Integer(1)] while len(l): b = 0 while l[b]==0: b += 1 if b == len(l): return m = [Integer(1)/l[b]] for k in range(b+1, len(l)): s = 0 for j in range(b, k): s -= l[j+1] * m[b-j-1] m.append(s/l[b]) l = m a, l[0] = l[0], 0 p = [0] * max(len(p2), b+len(p1)) q = [0] * max(len(q2), b+len(q1)) for k in range(len(p2)): p[k] = a*p2[k] for k in range(b, b+len(p1)): p[k] += p1[k-b] for k in range(len(q2)): q[k] = a*q2[k] for k in range(b, b+len(q1)): q[k] += q1[k-b] while p[-1]==0: p.pop() while q[-1]==0: q.pop() p1, p2 = p2, p q1, q2 = q2, q # yield result from sympy import denom, lcm, simplify as simp c = 1 for x in p: c = lcm(c, denom(x)) for x in q: c = lcm(c, denom(x)) out = ( sum(c*e*X**k for k, e in enumerate(p)) / sum(c*e*X**k for k, e in enumerate(q)) ) if simplify: yield(simp(out)) else: yield out return
e92731488c137f53c21a3766bac10a80d22a3b17219256e876b3852d815c6e16
""" Convergence acceleration / extrapolation methods for series and sequences. References: Carl M. Bender & Steven A. Orszag, "Advanced Mathematical Methods for Scientists and Engineers: Asymptotic Methods and Perturbation Theory", Springer 1999. (Shanks transformation: pp. 368-375, Richardson extrapolation: pp. 375-377.) """ from sympy import factorial, Integer, S def richardson(A, k, n, N): """ Calculate an approximation for lim k->oo A(k) using Richardson extrapolation with the terms A(n), A(n+1), ..., A(n+N+1). Choosing N ~= 2*n often gives good results. Examples ======== A simple example is to calculate exp(1) using the limit definition. This limit converges slowly; n = 100 only produces two accurate digits: >>> from sympy.abc import n >>> e = (1 + 1/n)**n >>> print(round(e.subs(n, 100).evalf(), 10)) 2.7048138294 Richardson extrapolation with 11 appropriately chosen terms gives a value that is accurate to the indicated precision: >>> from sympy import E >>> from sympy.series.acceleration import richardson >>> print(round(richardson(e, n, 10, 20).evalf(), 10)) 2.7182818285 >>> print(round(E.evalf(), 10)) 2.7182818285 Another useful application is to speed up convergence of series. Computing 100 terms of the zeta(2) series 1/k**2 yields only two accurate digits: >>> from sympy.abc import k, n >>> from sympy import Sum >>> A = Sum(k**-2, (k, 1, n)) >>> print(round(A.subs(n, 100).evalf(), 10)) 1.6349839002 Richardson extrapolation performs much better: >>> from sympy import pi >>> print(round(richardson(A, n, 10, 20).evalf(), 10)) 1.6449340668 >>> print(round(((pi**2)/6).evalf(), 10)) # Exact value 1.6449340668 """ s = S.Zero for j in range(0, N + 1): s += A.subs(k, Integer(n + j)).doit() * (n + j)**N * (-1)**(j + N) / \ (factorial(j) * factorial(N - j)) return s def shanks(A, k, n, m=1): """ Calculate an approximation for lim k->oo A(k) using the n-term Shanks transformation S(A)(n). With m > 1, calculate the m-fold recursive Shanks transformation S(S(...S(A)...))(n). The Shanks transformation is useful for summing Taylor series that converge slowly near a pole or singularity, e.g. for log(2): >>> from sympy.abc import k, n >>> from sympy import Sum, Integer >>> from sympy.series.acceleration import shanks >>> A = Sum(Integer(-1)**(k+1) / k, (k, 1, n)) >>> print(round(A.subs(n, 100).doit().evalf(), 10)) 0.6881721793 >>> print(round(shanks(A, n, 25).evalf(), 10)) 0.6931396564 >>> print(round(shanks(A, n, 25, 5).evalf(), 10)) 0.6931471806 The correct value is 0.6931471805599453094172321215. """ table = [A.subs(k, Integer(j)).doit() for j in range(n + m + 2)] table2 = table[:] for i in range(1, m + 1): for j in range(i, n + m + 1): x, y, z = table[j - 1], table[j], table[j + 1] table2[j] = (z*x - y**2) / (z + x - 2*y) table = table2[:] return table[n]
c7ee40cf75828b2d1b45b8e39ff5403c54e2d991f1f823abbd18e2398d549e63
""" Limits ====== Implemented according to the PhD thesis http://www.cybertester.com/data/gruntz.pdf, which contains very thorough descriptions of the algorithm including many examples. We summarize here the gist of it. All functions are sorted according to how rapidly varying they are at infinity using the following rules. Any two functions f and g can be compared using the properties of L: L=lim log|f(x)| / log|g(x)| (for x -> oo) We define >, < ~ according to:: 1. f > g .... L=+-oo we say that: - f is greater than any power of g - f is more rapidly varying than g - f goes to infinity/zero faster than g 2. f < g .... L=0 we say that: - f is lower than any power of g 3. f ~ g .... L!=0, +-oo we say that: - both f and g are bounded from above and below by suitable integral powers of the other Examples ======== :: 2 < x < exp(x) < exp(x**2) < exp(exp(x)) 2 ~ 3 ~ -5 x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x)) f ~ 1/f So we can divide all the functions into comparability classes (x and x^2 belong to one class, exp(x) and exp(-x) belong to some other class). In principle, we could compare any two functions, but in our algorithm, we don't compare anything below the class 2~3~-5 (for example log(x) is below this), so we set 2~3~-5 as the lowest comparability class. Given the function f, we find the list of most rapidly varying (mrv set) subexpressions of it. This list belongs to the same comparability class. Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an element "w" (either from the list or a new one) from the same comparability class which goes to zero at infinity. In our example we set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it into f. Then we expand f into a series in w:: f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0<e1<...<en, c0!=0 but for x->oo, lim f = lim c0*w^e0, because all the other terms go to zero, because w goes to zero faster than the ci and ei. So:: for e0>0, lim f = 0 for e0<0, lim f = +-oo (the sign depends on the sign of c0) for e0=0, lim f = lim c0 We need to recursively compute limits at several places of the algorithm, but as is shown in the PhD thesis, it always finishes. Important functions from the implementation: compare(a, b, x) compares "a" and "b" by computing the limit L. mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e" rewrite(e, Omega, x, wsym) rewrites "e" in terms of w leadterm(f, x) returns the lowest power term in the series of f mrv_leadterm(e, x) returns the lead term (c0, e0) for e limitinf(e, x) computes lim e (for x->oo) limit(e, z, z0) computes any limit by converting it to the case x->oo All the functions are really simple and straightforward except rewrite(), which is the most difficult/complex part of the algorithm. When the algorithm fails, the bugs are usually in the series expansion (i.e. in SymPy) or in rewrite. This code is almost exact rewrite of the Maple code inside the Gruntz thesis. Debugging --------- Because the gruntz algorithm is highly recursive, it's difficult to figure out what went wrong inside a debugger. Instead, turn on nice debug prints by defining the environment variable SYMPY_DEBUG. For example: [user@localhost]: SYMPY_DEBUG=True ./bin/isympy In [1]: limit(sin(x)/x, x, 0) limitinf(_x*sin(1/_x), _x) = 1 +-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0) | +-mrv(_x*sin(1/_x), _x) = set([_x]) | | +-mrv(_x, _x) = set([_x]) | | +-mrv(sin(1/_x), _x) = set([_x]) | | +-mrv(1/_x, _x) = set([_x]) | | +-mrv(_x, _x) = set([_x]) | +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0) | +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x) | +-sign(_x, _x) = 1 | +-mrv_leadterm(1, _x) = (1, 0) +-sign(0, _x) = 0 +-limitinf(1, _x) = 1 And check manually which line is wrong. Then go to the source code and debug this function to figure out the exact problem. """ from functools import reduce from sympy import cacheit from sympy.core import Basic, S, oo, I, Dummy, Wild, Mul from sympy.functions import log, exp from sympy.series.order import Order from sympy.simplify.powsimp import powsimp, powdenest from sympy.utilities.misc import debug_decorator as debug from sympy.utilities.timeutils import timethis timeit = timethis('gruntz') def compare(a, b, x): """Returns "<" if a<b, "=" for a == b, ">" for a>b""" # log(exp(...)) must always be simplified here for termination la, lb = log(a), log(b) if isinstance(a, Basic) and isinstance(a, exp): la = a.args[0] if isinstance(b, Basic) and isinstance(b, exp): lb = b.args[0] c = limitinf(la/lb, x) if c == 0: return "<" elif c.is_infinite: return ">" else: return "=" class SubsSet(dict): """ Stores (expr, dummy) pairs, and how to rewrite expr-s. Explanation =========== The gruntz algorithm needs to rewrite certain expressions in term of a new variable w. We cannot use subs, because it is just too smart for us. For example:: > Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))] > O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w] > e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p)) > e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1]) -1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p)) is really not what we want! So we do it the hard way and keep track of all the things we potentially want to substitute by dummy variables. Consider the expression:: exp(x - exp(-x)) + exp(x) + x. The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}. We introduce corresponding dummy variables d1, d2, d3 and rewrite:: d3 + d1 + x. This class first of all keeps track of the mapping expr->variable, i.e. will at this stage be a dictionary:: {exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}. [It turns out to be more convenient this way round.] But sometimes expressions in the mrv set have other expressions from the mrv set as subexpressions, and we need to keep track of that as well. In this case, d3 is really exp(x - d2), so rewrites at this stage is:: {d3: exp(x-d2)}. The function rewrite uses all this information to correctly rewrite our expression in terms of w. In this case w can be chosen to be exp(-x), i.e. d2. The correct rewriting then is:: exp(-w)/w + 1/w + x. """ def __init__(self): self.rewrites = {} def __repr__(self): return super().__repr__() + ', ' + self.rewrites.__repr__() def __getitem__(self, key): if not key in self: self[key] = Dummy() return dict.__getitem__(self, key) def do_subs(self, e): """Substitute the variables with expressions""" for expr, var in self.items(): e = e.xreplace({var: expr}) return e def meets(self, s2): """Tell whether or not self and s2 have non-empty intersection""" return set(self.keys()).intersection(list(s2.keys())) != set() def union(self, s2, exps=None): """Compute the union of self and s2, adjusting exps""" res = self.copy() tr = {} for expr, var in s2.items(): if expr in self: if exps: exps = exps.xreplace({var: res[expr]}) tr[var] = res[expr] else: res[expr] = var for var, rewr in s2.rewrites.items(): res.rewrites[var] = rewr.xreplace(tr) return res, exps def copy(self): """Create a shallow copy of SubsSet""" r = SubsSet() r.rewrites = self.rewrites.copy() for expr, var in self.items(): r[expr] = var return r @debug def mrv(e, x): """Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e', and e rewritten in terms of these""" e = powsimp(e, deep=True, combine='exp') if not isinstance(e, Basic): raise TypeError("e should be an instance of Basic") if not e.has(x): return SubsSet(), e elif e == x: s = SubsSet() return s, s[x] elif e.is_Mul or e.is_Add: i, d = e.as_independent(x) # throw away x-independent terms if d.func != e.func: s, expr = mrv(d, x) return s, e.func(i, expr) a, b = d.as_two_terms() s1, e1 = mrv(a, x) s2, e2 = mrv(b, x) return mrv_max1(s1, s2, e.func(i, e1, e2), x) elif e.is_Pow: e1 = S.One while e.is_Pow: b1 = e.base e1 *= e.exp e = b1 if b1 == 1: return SubsSet(), b1 if e1.has(x): base_lim = limitinf(b1, x) if base_lim is S.One: return mrv(exp(e1 * (b1 - 1)), x) return mrv(exp(e1 * log(b1)), x) else: s, expr = mrv(b1, x) return s, expr**e1 elif isinstance(e, log): s, expr = mrv(e.args[0], x) return s, log(expr) elif isinstance(e, exp): # We know from the theory of this algorithm that exp(log(...)) may always # be simplified here, and doing so is vital for termination. if isinstance(e.args[0], log): return mrv(e.args[0].args[0], x) # if a product has an infinite factor the result will be # infinite if there is no zero, otherwise NaN; here, we # consider the result infinite if any factor is infinite li = limitinf(e.args[0], x) if any(_.is_infinite for _ in Mul.make_args(li)): s1 = SubsSet() e1 = s1[e] s2, e2 = mrv(e.args[0], x) su = s1.union(s2)[0] su.rewrites[e1] = exp(e2) return mrv_max3(s1, e1, s2, exp(e2), su, e1, x) else: s, expr = mrv(e.args[0], x) return s, exp(expr) elif e.is_Function: l = [mrv(a, x) for a in e.args] l2 = [s for (s, _) in l if s != SubsSet()] if len(l2) != 1: # e.g. something like BesselJ(x, x) raise NotImplementedError("MRV set computation for functions in" " several variables not implemented.") s, ss = l2[0], SubsSet() args = [ss.do_subs(x[1]) for x in l] return s, e.func(*args) elif e.is_Derivative: raise NotImplementedError("MRV set computation for derviatives" " not implemented yet.") return mrv(e.args[0], x) raise NotImplementedError( "Don't know how to calculate the mrv of '%s'" % e) def mrv_max3(f, expsf, g, expsg, union, expsboth, x): """ Computes the maximum of two sets of expressions f and g, which are in the same comparability class, i.e. max() compares (two elements of) f and g and returns either (f, expsf) [if f is larger], (g, expsg) [if g is larger] or (union, expsboth) [if f, g are of the same class]. """ if not isinstance(f, SubsSet): raise TypeError("f should be an instance of SubsSet") if not isinstance(g, SubsSet): raise TypeError("g should be an instance of SubsSet") if f == SubsSet(): return g, expsg elif g == SubsSet(): return f, expsf elif f.meets(g): return union, expsboth c = compare(list(f.keys())[0], list(g.keys())[0], x) if c == ">": return f, expsf elif c == "<": return g, expsg else: if c != "=": raise ValueError("c should be =") return union, expsboth def mrv_max1(f, g, exps, x): """Computes the maximum of two sets of expressions f and g, which are in the same comparability class, i.e. mrv_max1() compares (two elements of) f and g and returns the set, which is in the higher comparability class of the union of both, if they have the same order of variation. Also returns exps, with the appropriate substitutions made. """ u, b = f.union(g, exps) return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps), u, b, x) @debug @cacheit @timeit def sign(e, x): """ Returns a sign of an expression e(x) for x->oo. :: e > 0 for x sufficiently large ... 1 e == 0 for x sufficiently large ... 0 e < 0 for x sufficiently large ... -1 The result of this function is currently undefined if e changes sign arbitrarily often for arbitrarily large x (e.g. sin(x)). Note that this returns zero only if e is *constantly* zero for x sufficiently large. [If e is constant, of course, this is just the same thing as the sign of e.] """ from sympy import sign as _sign if not isinstance(e, Basic): raise TypeError("e should be an instance of Basic") if e.is_positive: return 1 elif e.is_negative: return -1 elif e.is_zero: return 0 elif not e.has(x): return _sign(e) elif e == x: return 1 elif e.is_Mul: a, b = e.as_two_terms() sa = sign(a, x) if not sa: return 0 return sa * sign(b, x) elif isinstance(e, exp): return 1 elif e.is_Pow: s = sign(e.base, x) if s == 1: return 1 if e.exp.is_Integer: return s**e.exp elif isinstance(e, log): return sign(e.args[0] - 1, x) # if all else fails, do it the hard way c0, e0 = mrv_leadterm(e, x) return sign(c0, x) @debug @timeit @cacheit def limitinf(e, x, leadsimp=False): """Limit e(x) for x-> oo. Explanation =========== If ``leadsimp`` is True, an attempt is made to simplify the leading term of the series expansion of ``e``. That may succeed even if ``e`` cannot be simplified. """ # rewrite e in terms of tractable functions only if not e.has(x): return e # e is a constant if e.has(Order): e = e.expand().removeO() if not x.is_positive or x.is_integer: # We make sure that x.is_positive is True and x.is_integer is None # so we get all the correct mathematical behavior from the expression. # We need a fresh variable. p = Dummy('p', positive=True) e = e.subs(x, p) x = p e = e.rewrite('tractable', deep=True, limitvar=x) e = powdenest(e) c0, e0 = mrv_leadterm(e, x) sig = sign(e0, x) if sig == 1: return S.Zero # e0>0: lim f = 0 elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0) if c0.match(I*Wild("a", exclude=[I])): return c0*oo s = sign(c0, x) # the leading term shouldn't be 0: if s == 0: raise ValueError("Leading term should not be 0") return s*oo elif sig == 0: if leadsimp: c0 = c0.simplify() return limitinf(c0, x, leadsimp) # e0=0: lim f = lim c0 else: raise ValueError("{} could not be evaluated".format(sig)) def moveup2(s, x): r = SubsSet() for expr, var in s.items(): r[expr.xreplace({x: exp(x)})] = var for var, expr in s.rewrites.items(): r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)}) return r def moveup(l, x): return [e.xreplace({x: exp(x)}) for e in l] @debug @timeit def calculate_series(e, x, logx=None): """ Calculates at least one term of the series of ``e`` in ``x``. This is a place that fails most often, so it is in its own function. """ from sympy.polys import cancel for t in e.lseries(x, logx=logx): t = cancel(t) if t.has(exp) and t.has(log): t = powdenest(t) if t.simplify(): break return t @debug @timeit @cacheit def mrv_leadterm(e, x): """Returns (c0, e0) for e.""" Omega = SubsSet() if not e.has(x): return (e, S.Zero) if Omega == SubsSet(): Omega, exps = mrv(e, x) if not Omega: # e really does not depend on x after simplification return exps, S.Zero if x in Omega: # move the whole omega up (exponentiate each term): Omega_up = moveup2(Omega, x) e_up = moveup([e], x)[0] exps_up = moveup([exps], x)[0] # NOTE: there is no need to move this down! e = e_up Omega = Omega_up exps = exps_up # # The positive dummy, w, is used here so log(w*2) etc. will expand; # a unique dummy is needed in this algorithm # # For limits of complex functions, the algorithm would have to be # improved, or just find limits of Re and Im components separately. # w = Dummy("w", real=True, positive=True) f, logw = rewrite(exps, Omega, x, w) series = calculate_series(f, w, logx=logw) return series.leadterm(w) def build_expression_tree(Omega, rewrites): r""" Helper function for rewrite. We need to sort Omega (mrv set) so that we replace an expression before we replace any expression in terms of which it has to be rewritten:: e1 ---> e2 ---> e3 \ -> e4 Here we can do e1, e2, e3, e4 or e1, e2, e4, e3. To do this we assemble the nodes into a tree, and sort them by height. This function builds the tree, rewrites then sorts the nodes. """ class Node: def ht(self): return reduce(lambda x, y: x + y, [x.ht() for x in self.before], 1) nodes = {} for expr, v in Omega: n = Node() n.before = [] n.var = v n.expr = expr nodes[v] = n for _, v in Omega: if v in rewrites: n = nodes[v] r = rewrites[v] for _, v2 in Omega: if r.has(v2): n.before.append(nodes[v2]) return nodes @debug @timeit def rewrite(e, Omega, x, wsym): """e(x) ... the function Omega ... the mrv set wsym ... the symbol which is going to be used for w Returns the rewritten e in terms of w and log(w). See test_rewrite1() for examples and correct results. """ from sympy import ilcm if not isinstance(Omega, SubsSet): raise TypeError("Omega should be an instance of SubsSet") if len(Omega) == 0: raise ValueError("Length can not be 0") # all items in Omega must be exponentials for t in Omega.keys(): if not isinstance(t, exp): raise ValueError("Value should be exp") rewrites = Omega.rewrites Omega = list(Omega.items()) nodes = build_expression_tree(Omega, rewrites) Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True) # make sure we know the sign of each exp() term; after the loop, # g is going to be the "w" - the simplest one in the mrv set for g, _ in Omega: sig = sign(g.args[0], x) if sig != 1 and sig != -1: raise NotImplementedError('Result depends on the sign of %s' % sig) if sig == 1: wsym = 1/wsym # if g goes to oo, substitute 1/w # O2 is a list, which results by rewriting each item in Omega using "w" O2 = [] denominators = [] for f, var in Omega: c = limitinf(f.args[0]/g.args[0], x) if c.is_Rational: denominators.append(c.q) arg = f.args[0] if var in rewrites: if not isinstance(rewrites[var], exp): raise ValueError("Value should be exp") arg = rewrites[var].args[0] O2.append((var, exp((arg - c*g.args[0]).expand())*wsym**c)) # Remember that Omega contains subexpressions of "e". So now we find # them in "e" and substitute them for our rewriting, stored in O2 # the following powsimp is necessary to automatically combine exponentials, # so that the .xreplace() below succeeds: # TODO this should not be necessary f = powsimp(e, deep=True, combine='exp') for a, b in O2: f = f.xreplace({a: b}) for _, var in Omega: assert not f.has(var) # finally compute the logarithm of w (logw). logw = g.args[0] if sig == 1: logw = -logw # log(w)->log(1/w)=-log(w) # Some parts of sympy have difficulty computing series expansions with # non-integral exponents. The following heuristic improves the situation: exponent = reduce(ilcm, denominators, 1) f = f.subs({wsym: wsym**exponent}) logw /= exponent return f, logw def gruntz(e, z, z0, dir="+"): """ Compute the limit of e(z) at the point z0 using the Gruntz algorithm. Explanation =========== ``z0`` can be any expression, including oo and -oo. For ``dir="+"`` (default) it calculates the limit from the right (z->z0+) and for ``dir="-"`` the limit from the left (z->z0-). For infinite z0 (oo or -oo), the dir argument doesn't matter. This algorithm is fully described in the module docstring in the gruntz.py file. It relies heavily on the series expansion. Most frequently, gruntz() is only used if the faster limit() function (which uses heuristics) fails. """ if not z.is_symbol: raise NotImplementedError("Second argument must be a Symbol") # convert all limits to the limit z->oo; sign of z is handled in limitinf r = None if z0 == oo: e0 = e elif z0 == -oo: e0 = e.subs(z, -z) else: if str(dir) == "-": e0 = e.subs(z, z0 - 1/z) elif str(dir) == "+": e0 = e.subs(z, z0 + 1/z) else: raise NotImplementedError("dir must be '+' or '-'") try: r = limitinf(e0, z) except ValueError: r = limitinf(e0, z, leadsimp=True) # This is a bit of a heuristic for nice results... we always rewrite # tractable functions in terms of familiar intractable ones. # It might be nicer to rewrite the exactly to what they were initially, # but that would take some work to implement. return r.rewrite('intractable', deep=True)
b2b4ff5b0446f19ada9c3a75154d98dbde1f8e0f8d7219128ef2e308f587e82c
from sympy.core.basic import Basic from sympy.core.cache import cacheit from sympy.core.compatibility import is_sequence, iterable, ordered from sympy.core.containers import Tuple from sympy.core.decorators import call_highest_priority from sympy.core.parameters import global_parameters from sympy.core.function import AppliedUndef from sympy.core.mul import Mul from sympy.core.numbers import Integer from sympy.core.relational import Eq from sympy.core.singleton import S, Singleton from sympy.core.symbol import Dummy, Symbol, Wild from sympy.core.sympify import sympify from sympy.polys import lcm, factor from sympy.sets.sets import Interval, Intersection from sympy.simplify import simplify from sympy.tensor.indexed import Idx from sympy.utilities.iterables import flatten from sympy import expand ############################################################################### # SEQUENCES # ############################################################################### class SeqBase(Basic): """Base class for sequences""" is_commutative = True _op_priority = 15 @staticmethod def _start_key(expr): """Return start (if possible) else S.Infinity. adapted from Set._infimum_key """ try: start = expr.start except (NotImplementedError, AttributeError, ValueError): start = S.Infinity return start def _intersect_interval(self, other): """Returns start and stop. Takes intersection over the two intervals. """ interval = Intersection(self.interval, other.interval) return interval.inf, interval.sup @property def gen(self): """Returns the generator for the sequence""" raise NotImplementedError("(%s).gen" % self) @property def interval(self): """The interval on which the sequence is defined""" raise NotImplementedError("(%s).interval" % self) @property def start(self): """The starting point of the sequence. This point is included""" raise NotImplementedError("(%s).start" % self) @property def stop(self): """The ending point of the sequence. This point is included""" raise NotImplementedError("(%s).stop" % self) @property def length(self): """Length of the sequence""" raise NotImplementedError("(%s).length" % self) @property def variables(self): """Returns a tuple of variables that are bounded""" return () @property def free_symbols(self): """ This method returns the symbols in the object, excluding those that take on a specific value (i.e. the dummy symbols). Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n, m >>> SeqFormula(m*n**2, (n, 0, 5)).free_symbols {m} """ return ({j for i in self.args for j in i.free_symbols .difference(self.variables)}) @cacheit def coeff(self, pt): """Returns the coefficient at point pt""" if pt < self.start or pt > self.stop: raise IndexError("Index %s out of bounds %s" % (pt, self.interval)) return self._eval_coeff(pt) def _eval_coeff(self, pt): raise NotImplementedError("The _eval_coeff method should be added to" "%s to return coefficient so it is available" "when coeff calls it." % self.func) def _ith_point(self, i): """Returns the i'th point of a sequence. Explanation =========== If start point is negative infinity, point is returned from the end. Assumes the first point to be indexed zero. Examples ========= >>> from sympy import oo >>> from sympy.series.sequences import SeqPer bounded >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(0) -10 >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(5) -5 End is at infinity >>> SeqPer((1, 2, 3), (0, oo))._ith_point(5) 5 Starts at negative infinity >>> SeqPer((1, 2, 3), (-oo, 0))._ith_point(5) -5 """ if self.start is S.NegativeInfinity: initial = self.stop else: initial = self.start if self.start is S.NegativeInfinity: step = -1 else: step = 1 return initial + i*step def _add(self, other): """ Should only be used internally. Explanation =========== self._add(other) returns a new, term-wise added sequence if self knows how to add with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqAdd` class. """ return None def _mul(self, other): """ Should only be used internally. Explanation =========== self._mul(other) returns a new, term-wise multiplied sequence if self knows how to multiply with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqMul` class. """ return None def coeff_mul(self, other): """ Should be used when ``other`` is not a sequence. Should be defined to define custom behaviour. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2).coeff_mul(2) SeqFormula(2*n**2, (n, 0, oo)) Notes ===== '*' defines multiplication of sequences with sequences only. """ return Mul(self, other) def __add__(self, other): """Returns the term-wise addition of 'self' and 'other'. ``other`` should be a sequence. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) + SeqFormula(n**3) SeqFormula(n**3 + n**2, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot add sequence and %s' % type(other)) return SeqAdd(self, other) @call_highest_priority('__add__') def __radd__(self, other): return self + other def __sub__(self, other): """Returns the term-wise subtraction of ``self`` and ``other``. ``other`` should be a sequence. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) - (SeqFormula(n)) SeqFormula(n**2 - n, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot subtract sequence and %s' % type(other)) return SeqAdd(self, -other) @call_highest_priority('__sub__') def __rsub__(self, other): return (-self) + other def __neg__(self): """Negates the sequence. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> -SeqFormula(n**2) SeqFormula(-n**2, (n, 0, oo)) """ return self.coeff_mul(-1) def __mul__(self, other): """Returns the term-wise multiplication of 'self' and 'other'. ``other`` should be a sequence. For ``other`` not being a sequence see :func:`coeff_mul` method. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) * (SeqFormula(n)) SeqFormula(n**3, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot multiply sequence and %s' % type(other)) return SeqMul(self, other) @call_highest_priority('__mul__') def __rmul__(self, other): return self * other def __iter__(self): for i in range(self.length): pt = self._ith_point(i) yield self.coeff(pt) def __getitem__(self, index): if isinstance(index, int): index = self._ith_point(index) return self.coeff(index) elif isinstance(index, slice): start, stop = index.start, index.stop if start is None: start = 0 if stop is None: stop = self.length return [self.coeff(self._ith_point(i)) for i in range(start, stop, index.step or 1)] def find_linear_recurrence(self,n,d=None,gfvar=None): r""" Finds the shortest linear recurrence that satisfies the first n terms of sequence of order `\leq` ``n/2`` if possible. If ``d`` is specified, find shortest linear recurrence of order `\leq` min(d, n/2) if possible. Returns list of coefficients ``[b(1), b(2), ...]`` corresponding to the recurrence relation ``x(n) = b(1)*x(n-1) + b(2)*x(n-2) + ...`` Returns ``[]`` if no recurrence is found. If gfvar is specified, also returns ordinary generating function as a function of gfvar. Examples ======== >>> from sympy import sequence, sqrt, oo, lucas >>> from sympy.abc import n, x, y >>> sequence(n**2).find_linear_recurrence(10, 2) [] >>> sequence(n**2).find_linear_recurrence(10) [3, -3, 1] >>> sequence(2**n).find_linear_recurrence(10) [2] >>> sequence(23*n**4+91*n**2).find_linear_recurrence(10) [5, -10, 10, -5, 1] >>> sequence(sqrt(5)*(((1 + sqrt(5))/2)**n - (-(1 + sqrt(5))/2)**(-n))/5).find_linear_recurrence(10) [1, 1] >>> sequence(x+y*(-2)**(-n), (n, 0, oo)).find_linear_recurrence(30) [1/2, 1/2] >>> sequence(3*5**n + 12).find_linear_recurrence(20,gfvar=x) ([6, -5], 3*(5 - 21*x)/((x - 1)*(5*x - 1))) >>> sequence(lucas(n)).find_linear_recurrence(15,gfvar=x) ([1, 1], (x - 2)/(x**2 + x - 1)) """ from sympy.matrices import Matrix x = [simplify(expand(t)) for t in self[:n]] lx = len(x) if d is None: r = lx//2 else: r = min(d,lx//2) coeffs = [] for l in range(1, r+1): l2 = 2*l mlist = [] for k in range(l): mlist.append(x[k:k+l]) m = Matrix(mlist) if m.det() != 0: y = simplify(m.LUsolve(Matrix(x[l:l2]))) if lx == l2: coeffs = flatten(y[::-1]) break mlist = [] for k in range(l,lx-l): mlist.append(x[k:k+l]) m = Matrix(mlist) if m*y == Matrix(x[l2:]): coeffs = flatten(y[::-1]) break if gfvar is None: return coeffs else: l = len(coeffs) if l == 0: return [], None else: n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l for i in range(l-1): n += x[i]*gfvar**i for j in range(l-i-1): n -= coeffs[i]*x[j]*gfvar**(i+j+1) d -= coeffs[i]*gfvar**(i+1) return coeffs, simplify(factor(n)/factor(d)) class EmptySequence(SeqBase, metaclass=Singleton): """Represents an empty sequence. The empty sequence is also available as a singleton as ``S.EmptySequence``. Examples ======== >>> from sympy import EmptySequence, SeqPer >>> from sympy.abc import x >>> EmptySequence EmptySequence >>> SeqPer((1, 2), (x, 0, 10)) + EmptySequence SeqPer((1, 2), (x, 0, 10)) >>> SeqPer((1, 2)) * EmptySequence EmptySequence >>> EmptySequence.coeff_mul(-1) EmptySequence """ @property def interval(self): return S.EmptySet @property def length(self): return S.Zero def coeff_mul(self, coeff): """See docstring of SeqBase.coeff_mul""" return self def __iter__(self): return iter([]) class SeqExpr(SeqBase): """Sequence expression class. Various sequences should inherit from this class. Examples ======== >>> from sympy.series.sequences import SeqExpr >>> from sympy.abc import x >>> s = SeqExpr((1, 2, 3), (x, 0, 10)) >>> s.gen (1, 2, 3) >>> s.interval Interval(0, 10) >>> s.length 11 See Also ======== sympy.series.sequences.SeqPer sympy.series.sequences.SeqFormula """ @property def gen(self): return self.args[0] @property def interval(self): return Interval(self.args[1][1], self.args[1][2]) @property def start(self): return self.interval.inf @property def stop(self): return self.interval.sup @property def length(self): return self.stop - self.start + 1 @property def variables(self): return (self.args[1][0],) class SeqPer(SeqExpr): """ Represents a periodic sequence. The elements are repeated after a given period. Examples ======== >>> from sympy import SeqPer, oo >>> from sympy.abc import k >>> s = SeqPer((1, 2, 3), (0, 5)) >>> s.periodical (1, 2, 3) >>> s.period 3 For value at a particular point >>> s.coeff(3) 1 supports slicing >>> s[:] [1, 2, 3, 1, 2, 3] iterable >>> list(s) [1, 2, 3, 1, 2, 3] sequence starts from negative infinity >>> SeqPer((1, 2, 3), (-oo, 0))[0:6] [1, 2, 3, 1, 2, 3] Periodic formulas >>> SeqPer((k, k**2, k**3), (k, 0, oo))[0:6] [0, 1, 8, 3, 16, 125] See Also ======== sympy.series.sequences.SeqFormula """ def __new__(cls, periodical, limits=None): periodical = sympify(periodical) def _find_x(periodical): free = periodical.free_symbols if len(periodical.free_symbols) == 1: return free.pop() else: return Dummy('k') x, start, stop = None, None, None if limits is None: x, start, stop = _find_x(periodical), 0, S.Infinity if is_sequence(limits, Tuple): if len(limits) == 3: x, start, stop = limits elif len(limits) == 2: x = _find_x(periodical) start, stop = limits if not isinstance(x, (Symbol, Idx)) or start is None or stop is None: raise ValueError('Invalid limits given: %s' % str(limits)) if start is S.NegativeInfinity and stop is S.Infinity: raise ValueError("Both the start and end value" "cannot be unbounded") limits = sympify((x, start, stop)) if is_sequence(periodical, Tuple): periodical = sympify(tuple(flatten(periodical))) else: raise ValueError("invalid period %s should be something " "like e.g (1, 2) " % periodical) if Interval(limits[1], limits[2]) is S.EmptySet: return S.EmptySequence return Basic.__new__(cls, periodical, limits) @property def period(self): return len(self.gen) @property def periodical(self): return self.gen def _eval_coeff(self, pt): if self.start is S.NegativeInfinity: idx = (self.stop - pt) % self.period else: idx = (pt - self.start) % self.period return self.periodical[idx].subs(self.variables[0], pt) def _add(self, other): """See docstring of SeqBase._add""" if isinstance(other, SeqPer): per1, lper1 = self.periodical, self.period per2, lper2 = other.periodical, other.period per_length = lcm(lper1, lper2) new_per = [] for x in range(per_length): ele1 = per1[x % lper1] ele2 = per2[x % lper2] new_per.append(ele1 + ele2) start, stop = self._intersect_interval(other) return SeqPer(new_per, (self.variables[0], start, stop)) def _mul(self, other): """See docstring of SeqBase._mul""" if isinstance(other, SeqPer): per1, lper1 = self.periodical, self.period per2, lper2 = other.periodical, other.period per_length = lcm(lper1, lper2) new_per = [] for x in range(per_length): ele1 = per1[x % lper1] ele2 = per2[x % lper2] new_per.append(ele1 * ele2) start, stop = self._intersect_interval(other) return SeqPer(new_per, (self.variables[0], start, stop)) def coeff_mul(self, coeff): """See docstring of SeqBase.coeff_mul""" coeff = sympify(coeff) per = [x * coeff for x in self.periodical] return SeqPer(per, self.args[1]) class SeqFormula(SeqExpr): """ Represents sequence based on a formula. Elements are generated using a formula. Examples ======== >>> from sympy import SeqFormula, oo, Symbol >>> n = Symbol('n') >>> s = SeqFormula(n**2, (n, 0, 5)) >>> s.formula n**2 For value at a particular point >>> s.coeff(3) 9 supports slicing >>> s[:] [0, 1, 4, 9, 16, 25] iterable >>> list(s) [0, 1, 4, 9, 16, 25] sequence starts from negative infinity >>> SeqFormula(n**2, (-oo, 0))[0:6] [0, 1, 4, 9, 16, 25] See Also ======== sympy.series.sequences.SeqPer """ def __new__(cls, formula, limits=None): formula = sympify(formula) def _find_x(formula): free = formula.free_symbols if len(free) == 1: return free.pop() elif not free: return Dummy('k') else: raise ValueError( " specify dummy variables for %s. If the formula contains" " more than one free symbol, a dummy variable should be" " supplied explicitly e.g., SeqFormula(m*n**2, (n, 0, 5))" % formula) x, start, stop = None, None, None if limits is None: x, start, stop = _find_x(formula), 0, S.Infinity if is_sequence(limits, Tuple): if len(limits) == 3: x, start, stop = limits elif len(limits) == 2: x = _find_x(formula) start, stop = limits if not isinstance(x, (Symbol, Idx)) or start is None or stop is None: raise ValueError('Invalid limits given: %s' % str(limits)) if start is S.NegativeInfinity and stop is S.Infinity: raise ValueError("Both the start and end value " "cannot be unbounded") limits = sympify((x, start, stop)) if Interval(limits[1], limits[2]) is S.EmptySet: return S.EmptySequence return Basic.__new__(cls, formula, limits) @property def formula(self): return self.gen def _eval_coeff(self, pt): d = self.variables[0] return self.formula.subs(d, pt) def _add(self, other): """See docstring of SeqBase._add""" if isinstance(other, SeqFormula): form1, v1 = self.formula, self.variables[0] form2, v2 = other.formula, other.variables[0] formula = form1 + form2.subs(v2, v1) start, stop = self._intersect_interval(other) return SeqFormula(formula, (v1, start, stop)) def _mul(self, other): """See docstring of SeqBase._mul""" if isinstance(other, SeqFormula): form1, v1 = self.formula, self.variables[0] form2, v2 = other.formula, other.variables[0] formula = form1 * form2.subs(v2, v1) start, stop = self._intersect_interval(other) return SeqFormula(formula, (v1, start, stop)) def coeff_mul(self, coeff): """See docstring of SeqBase.coeff_mul""" coeff = sympify(coeff) formula = self.formula * coeff return SeqFormula(formula, self.args[1]) def expand(self, *args, **kwargs): return SeqFormula(expand(self.formula, *args, **kwargs), self.args[1]) class RecursiveSeq(SeqBase): """ A finite degree recursive sequence. Explanation =========== That is, a sequence a(n) that depends on a fixed, finite number of its previous values. The general form is a(n) = f(a(n - 1), a(n - 2), ..., a(n - d)) for some fixed, positive integer d, where f is some function defined by a SymPy expression. Parameters ========== recurrence : SymPy expression defining recurrence This is *not* an equality, only the expression that the nth term is equal to. For example, if :code:`a(n) = f(a(n - 1), ..., a(n - d))`, then the expression should be :code:`f(a(n - 1), ..., a(n - d))`. yn : applied undefined function Represents the nth term of the sequence as e.g. :code:`y(n)` where :code:`y` is an undefined function and `n` is the sequence index. n : symbolic argument The name of the variable that the recurrence is in, e.g., :code:`n` if the recurrence function is :code:`y(n)`. initial : iterable with length equal to the degree of the recurrence The initial values of the recurrence. start : start value of sequence (inclusive) Examples ======== >>> from sympy import Function, symbols >>> from sympy.series.sequences import RecursiveSeq >>> y = Function("y") >>> n = symbols("n") >>> fib = RecursiveSeq(y(n - 1) + y(n - 2), y(n), n, [0, 1]) >>> fib.coeff(3) # Value at a particular point 2 >>> fib[:6] # supports slicing [0, 1, 1, 2, 3, 5] >>> fib.recurrence # inspect recurrence Eq(y(n), y(n - 2) + y(n - 1)) >>> fib.degree # automatically determine degree 2 >>> for x in zip(range(10), fib): # supports iteration ... print(x) (0, 0) (1, 1) (2, 1) (3, 2) (4, 3) (5, 5) (6, 8) (7, 13) (8, 21) (9, 34) See Also ======== sympy.series.sequences.SeqFormula """ def __new__(cls, recurrence, yn, n, initial=None, start=0): if not isinstance(yn, AppliedUndef): raise TypeError("recurrence sequence must be an applied undefined function" ", found `{}`".format(yn)) if not isinstance(n, Basic) or not n.is_symbol: raise TypeError("recurrence variable must be a symbol" ", found `{}`".format(n)) if yn.args != (n,): raise TypeError("recurrence sequence does not match symbol") y = yn.func k = Wild("k", exclude=(n,)) degree = 0 # Find all applications of y in the recurrence and check that: # 1. The function y is only being used with a single argument; and # 2. All arguments are n + k for constant negative integers k. prev_ys = recurrence.find(y) for prev_y in prev_ys: if len(prev_y.args) != 1: raise TypeError("Recurrence should be in a single variable") shift = prev_y.args[0].match(n + k)[k] if not (shift.is_constant() and shift.is_integer and shift < 0): raise TypeError("Recurrence should have constant," " negative, integer shifts" " (found {})".format(prev_y)) if -shift > degree: degree = -shift if not initial: initial = [Dummy("c_{}".format(k)) for k in range(degree)] if len(initial) != degree: raise ValueError("Number of initial terms must equal degree") degree = Integer(degree) start = sympify(start) initial = Tuple(*(sympify(x) for x in initial)) seq = Basic.__new__(cls, recurrence, yn, n, initial, start) seq.cache = {y(start + k): init for k, init in enumerate(initial)} seq.degree = degree return seq @property def _recurrence(self): """Equation defining recurrence.""" return self.args[0] @property def recurrence(self): """Equation defining recurrence.""" return Eq(self.yn, self.args[0]) @property def yn(self): """Applied function representing the nth term""" return self.args[1] @property def y(self): """Undefined function for the nth term of the sequence""" return self.yn.func @property def n(self): """Sequence index symbol""" return self.args[2] @property def initial(self): """The initial values of the sequence""" return self.args[3] @property def start(self): """The starting point of the sequence. This point is included""" return self.args[4] @property def stop(self): """The ending point of the sequence. (oo)""" return S.Infinity @property def interval(self): """Interval on which sequence is defined.""" return (self.start, S.Infinity) def _eval_coeff(self, index): if index - self.start < len(self.cache): return self.cache[self.y(index)] for current in range(len(self.cache), index + 1): # Use xreplace over subs for performance. # See issue #10697. seq_index = self.start + current current_recurrence = self._recurrence.xreplace({self.n: seq_index}) new_term = current_recurrence.xreplace(self.cache) self.cache[self.y(seq_index)] = new_term return self.cache[self.y(self.start + current)] def __iter__(self): index = self.start while True: yield self._eval_coeff(index) index += 1 def sequence(seq, limits=None): """ Returns appropriate sequence object. Explanation =========== If ``seq`` is a sympy sequence, returns :class:`SeqPer` object otherwise returns :class:`SeqFormula` object. Examples ======== >>> from sympy import sequence >>> from sympy.abc import n >>> sequence(n**2, (n, 0, 5)) SeqFormula(n**2, (n, 0, 5)) >>> sequence((1, 2, 3), (n, 0, 5)) SeqPer((1, 2, 3), (n, 0, 5)) See Also ======== sympy.series.sequences.SeqPer sympy.series.sequences.SeqFormula """ seq = sympify(seq) if is_sequence(seq, Tuple): return SeqPer(seq, limits) else: return SeqFormula(seq, limits) ############################################################################### # OPERATIONS # ############################################################################### class SeqExprOp(SeqBase): """ Base class for operations on sequences. Examples ======== >>> from sympy.series.sequences import SeqExprOp, sequence >>> from sympy.abc import n >>> s1 = sequence(n**2, (n, 0, 10)) >>> s2 = sequence((1, 2, 3), (n, 5, 10)) >>> s = SeqExprOp(s1, s2) >>> s.gen (n**2, (1, 2, 3)) >>> s.interval Interval(5, 10) >>> s.length 6 See Also ======== sympy.series.sequences.SeqAdd sympy.series.sequences.SeqMul """ @property def gen(self): """Generator for the sequence. returns a tuple of generators of all the argument sequences. """ return tuple(a.gen for a in self.args) @property def interval(self): """Sequence is defined on the intersection of all the intervals of respective sequences """ return Intersection(*(a.interval for a in self.args)) @property def start(self): return self.interval.inf @property def stop(self): return self.interval.sup @property def variables(self): """Cumulative of all the bound variables""" return tuple(flatten([a.variables for a in self.args])) @property def length(self): return self.stop - self.start + 1 class SeqAdd(SeqExprOp): """Represents term-wise addition of sequences. Rules: * The interval on which sequence is defined is the intersection of respective intervals of sequences. * Anything + :class:`EmptySequence` remains unchanged. * Other rules are defined in ``_add`` methods of sequence classes. Examples ======== >>> from sympy import EmptySequence, oo, SeqAdd, SeqPer, SeqFormula >>> from sympy.abc import n >>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), EmptySequence) SeqPer((1, 2), (n, 0, oo)) >>> SeqAdd(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10))) EmptySequence >>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2, (n, 0, oo))) SeqAdd(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo))) >>> SeqAdd(SeqFormula(n**3), SeqFormula(n**2)) SeqFormula(n**3 + n**2, (n, 0, oo)) See Also ======== sympy.series.sequences.SeqMul """ def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) # flatten inputs args = list(args) # adapted from sympy.sets.sets.Union def _flatten(arg): if isinstance(arg, SeqBase): if isinstance(arg, SeqAdd): return sum(map(_flatten, arg.args), []) else: return [arg] if iterable(arg): return sum(map(_flatten, arg), []) raise TypeError("Input must be Sequences or " " iterables of Sequences") args = _flatten(args) args = [a for a in args if a is not S.EmptySequence] # Addition of no sequences is EmptySequence if not args: return S.EmptySequence if Intersection(*(a.interval for a in args)) is S.EmptySet: return S.EmptySequence # reduce using known rules if evaluate: return SeqAdd.reduce(args) args = list(ordered(args, SeqBase._start_key)) return Basic.__new__(cls, *args) @staticmethod def reduce(args): """Simplify :class:`SeqAdd` using known rules. Iterates through all pairs and ask the constituent sequences if they can simplify themselves with any other constituent. Notes ===== adapted from ``Union.reduce`` """ new_args = True while new_args: for id1, s in enumerate(args): new_args = False for id2, t in enumerate(args): if id1 == id2: continue new_seq = s._add(t) # This returns None if s does not know how to add # with t. Returns the newly added sequence otherwise if new_seq is not None: new_args = [a for a in args if a not in (s, t)] new_args.append(new_seq) break if new_args: args = new_args break if len(args) == 1: return args.pop() else: return SeqAdd(args, evaluate=False) def _eval_coeff(self, pt): """adds up the coefficients of all the sequences at point pt""" return sum(a.coeff(pt) for a in self.args) class SeqMul(SeqExprOp): r"""Represents term-wise multiplication of sequences. Explanation =========== Handles multiplication of sequences only. For multiplication with other objects see :func:`SeqBase.coeff_mul`. Rules: * The interval on which sequence is defined is the intersection of respective intervals of sequences. * Anything \* :class:`EmptySequence` returns :class:`EmptySequence`. * Other rules are defined in ``_mul`` methods of sequence classes. Examples ======== >>> from sympy import EmptySequence, oo, SeqMul, SeqPer, SeqFormula >>> from sympy.abc import n >>> SeqMul(SeqPer((1, 2), (n, 0, oo)), EmptySequence) EmptySequence >>> SeqMul(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10))) EmptySequence >>> SeqMul(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2)) SeqMul(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo))) >>> SeqMul(SeqFormula(n**3), SeqFormula(n**2)) SeqFormula(n**5, (n, 0, oo)) See Also ======== sympy.series.sequences.SeqAdd """ def __new__(cls, *args, **kwargs): evaluate = kwargs.get('evaluate', global_parameters.evaluate) # flatten inputs args = list(args) # adapted from sympy.sets.sets.Union def _flatten(arg): if isinstance(arg, SeqBase): if isinstance(arg, SeqMul): return sum(map(_flatten, arg.args), []) else: return [arg] elif iterable(arg): return sum(map(_flatten, arg), []) raise TypeError("Input must be Sequences or " " iterables of Sequences") args = _flatten(args) # Multiplication of no sequences is EmptySequence if not args: return S.EmptySequence if Intersection(*(a.interval for a in args)) is S.EmptySet: return S.EmptySequence # reduce using known rules if evaluate: return SeqMul.reduce(args) args = list(ordered(args, SeqBase._start_key)) return Basic.__new__(cls, *args) @staticmethod def reduce(args): """Simplify a :class:`SeqMul` using known rules. Explanation =========== Iterates through all pairs and ask the constituent sequences if they can simplify themselves with any other constituent. Notes ===== adapted from ``Union.reduce`` """ new_args = True while new_args: for id1, s in enumerate(args): new_args = False for id2, t in enumerate(args): if id1 == id2: continue new_seq = s._mul(t) # This returns None if s does not know how to multiply # with t. Returns the newly multiplied sequence otherwise if new_seq is not None: new_args = [a for a in args if a not in (s, t)] new_args.append(new_seq) break if new_args: args = new_args break if len(args) == 1: return args.pop() else: return SeqMul(args, evaluate=False) def _eval_coeff(self, pt): """multiplies the coefficients of all the sequences at point pt""" val = 1 for a in self.args: val *= a.coeff(pt) return val
155f8dca55b30fa9d3ada8333a838fcf2a574be92e8f02adab33ca76f5b8dca4
"""Limits of sequences""" from sympy.core.add import Add from sympy.core.function import PoleError from sympy.core.power import Pow from sympy.core.singleton import S from sympy.core.symbol import Dummy from sympy.core.sympify import sympify from sympy.functions.combinatorial.numbers import fibonacci from sympy.functions.combinatorial.factorials import factorial, subfactorial from sympy.functions.special.gamma_functions import gamma from sympy.functions.elementary.complexes import Abs from sympy.functions.elementary.miscellaneous import Max, Min from sympy.functions.elementary.trigonometric import cos, sin from sympy.series.limits import Limit def difference_delta(expr, n=None, step=1): """Difference Operator. Explanation =========== Discrete analog of differential operator. Given a sequence x[n], returns the sequence x[n + step] - x[n]. Examples ======== >>> from sympy import difference_delta as dd >>> from sympy.abc import n >>> dd(n*(n + 1), n) 2*n + 2 >>> dd(n*(n + 1), n, 2) 4*n + 6 References ========== .. [1] https://reference.wolfram.com/language/ref/DifferenceDelta.html """ expr = sympify(expr) if n is None: f = expr.free_symbols if len(f) == 1: n = f.pop() elif len(f) == 0: return S.Zero else: raise ValueError("Since there is more than one variable in the" " expression, a variable must be supplied to" " take the difference of %s" % expr) step = sympify(step) if step.is_number is False or step.is_finite is False: raise ValueError("Step should be a finite number.") if hasattr(expr, '_eval_difference_delta'): result = expr._eval_difference_delta(n, step) if result: return result return expr.subs(n, n + step) - expr def dominant(expr, n): """Finds the dominant term in a sum, that is a term that dominates every other term. Explanation =========== If limit(a/b, n, oo) is oo then a dominates b. If limit(a/b, n, oo) is 0 then b dominates a. Otherwise, a and b are comparable. If there is no unique dominant term, then returns ``None``. Examples ======== >>> from sympy import Sum >>> from sympy.series.limitseq import dominant >>> from sympy.abc import n, k >>> dominant(5*n**3 + 4*n**2 + n + 1, n) 5*n**3 >>> dominant(2**n + Sum(k, (k, 0, n)), n) 2**n See Also ======== sympy.series.limitseq.dominant """ terms = Add.make_args(expr.expand(func=True)) term0 = terms[-1] comp = [term0] # comparable terms for t in terms[:-1]: e = (term0 / t).gammasimp() l = limit_seq(e, n) if l is None: return None elif l.is_zero: term0 = t comp = [term0] elif l not in [S.Infinity, S.NegativeInfinity]: comp.append(t) if len(comp) > 1: return None return term0 def _limit_inf(expr, n): try: return Limit(expr, n, S.Infinity).doit(deep=False) except (NotImplementedError, PoleError): return None def _limit_seq(expr, n, trials): from sympy.concrete.summations import Sum for i in range(trials): if not expr.has(Sum): result = _limit_inf(expr, n) if result is not None: return result num, den = expr.as_numer_denom() if not den.has(n) or not num.has(n): result = _limit_inf(expr.doit(), n) if result is not None: return result return None num, den = (difference_delta(t.expand(), n) for t in [num, den]) expr = (num / den).gammasimp() if not expr.has(Sum): result = _limit_inf(expr, n) if result is not None: return result num, den = expr.as_numer_denom() num = dominant(num, n) if num is None: return None den = dominant(den, n) if den is None: return None expr = (num / den).gammasimp() def limit_seq(expr, n=None, trials=5): """Finds the limit of a sequence as index ``n`` tends to infinity. Parameters ========== expr : Expr SymPy expression for the ``n-th`` term of the sequence n : Symbol, optional The index of the sequence, an integer that tends to positive infinity. If None, inferred from the expression unless it has multiple symbols. trials: int, optional The algorithm is highly recursive. ``trials`` is a safeguard from infinite recursion in case the limit is not easily computed by the algorithm. Try increasing ``trials`` if the algorithm returns ``None``. Admissible Terms ================ The algorithm is designed for sequences built from rational functions, indefinite sums, and indefinite products over an indeterminate n. Terms of alternating sign are also allowed, but more complex oscillatory behavior is not supported. Examples ======== >>> from sympy import limit_seq, Sum, binomial >>> from sympy.abc import n, k, m >>> limit_seq((5*n**3 + 3*n**2 + 4) / (3*n**3 + 4*n - 5), n) 5/3 >>> limit_seq(binomial(2*n, n) / Sum(binomial(2*k, k), (k, 1, n)), n) 3/4 >>> limit_seq(Sum(k**2 * Sum(2**m/m, (m, 1, k)), (k, 1, n)) / (2**n*n), n) 4 See Also ======== sympy.series.limitseq.dominant References ========== .. [1] Computing Limits of Sequences - Manuel Kauers """ from sympy.concrete.summations import Sum from sympy.calculus.util import AccumulationBounds if n is None: free = expr.free_symbols if len(free) == 1: n = free.pop() elif not free: return expr else: raise ValueError("Expression has more than one variable. " "Please specify a variable.") elif n not in expr.free_symbols: return expr expr = expr.rewrite(fibonacci, S.GoldenRatio) expr = expr.rewrite(factorial, subfactorial, gamma) n_ = Dummy("n", integer=True, positive=True) n1 = Dummy("n", odd=True, positive=True) n2 = Dummy("n", even=True, positive=True) # If there is a negative term raised to a power involving n, or a # trigonometric function, then consider even and odd n separately. powers = (p.as_base_exp() for p in expr.atoms(Pow)) if (any(b.is_negative and e.has(n) for b, e in powers) or expr.has(cos, sin)): L1 = _limit_seq(expr.xreplace({n: n1}), n1, trials) if L1 is not None: L2 = _limit_seq(expr.xreplace({n: n2}), n2, trials) if L1 != L2: if L1.is_comparable and L2.is_comparable: return AccumulationBounds(Min(L1, L2), Max(L1, L2)) else: return None else: L1 = _limit_seq(expr.xreplace({n: n_}), n_, trials) if L1 is not None: return L1 else: if expr.is_Add: limits = [limit_seq(term, n, trials) for term in expr.args] if any(result is None for result in limits): return None else: return Add(*limits) # Maybe the absolute value is easier to deal with (though not if # it has a Sum). If it tends to 0, the limit is 0. elif not expr.has(Sum): lim = _limit_seq(Abs(expr.xreplace({n: n_})), n_, trials) if lim is not None and lim.is_zero: return S.Zero
ce69e8fbac10d5cc9e3f83a40f9980f8c8096ed7a1402e1eef6bec847459eec7
"""Fourier Series""" from sympy import pi, oo, Wild from sympy.core.expr import Expr from sympy.core.add import Add from sympy.core.compatibility import is_sequence from sympy.core.containers import Tuple from sympy.core.singleton import S from sympy.core.symbol import Dummy, Symbol from sympy.core.sympify import sympify from sympy.functions.elementary.trigonometric import sin, cos, sinc from sympy.series.series_class import SeriesBase from sympy.series.sequences import SeqFormula from sympy.sets.sets import Interval from sympy.simplify.fu import TR2, TR1, TR10, sincos_to_sum def fourier_cos_seq(func, limits, n): """Returns the cos sequence in a Fourier series""" from sympy.integrals import integrate x, L = limits[0], limits[2] - limits[1] cos_term = cos(2*n*pi*x / L) formula = 2 * cos_term * integrate(func * cos_term, limits) / L a0 = formula.subs(n, S.Zero) / 2 return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits) / L, (n, 1, oo)) def fourier_sin_seq(func, limits, n): """Returns the sin sequence in a Fourier series""" from sympy.integrals import integrate x, L = limits[0], limits[2] - limits[1] sin_term = sin(2*n*pi*x / L) return SeqFormula(2 * sin_term * integrate(func * sin_term, limits) / L, (n, 1, oo)) def _process_limits(func, limits): """ Limits should be of the form (x, start, stop). x should be a symbol. Both start and stop should be bounded. Explanation =========== * If x is not given, x is determined from func. * If limits is None. Limit of the form (x, -pi, pi) is returned. Examples ======== >>> from sympy.series.fourier import _process_limits as pari >>> from sympy.abc import x >>> pari(x**2, (x, -2, 2)) (x, -2, 2) >>> pari(x**2, (-2, 2)) (x, -2, 2) >>> pari(x**2, None) (x, -pi, pi) """ def _find_x(func): free = func.free_symbols if len(free) == 1: return free.pop() elif not free: return Dummy('k') else: raise ValueError( " specify dummy variables for %s. If the function contains" " more than one free symbol, a dummy variable should be" " supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))" % func) x, start, stop = None, None, None if limits is None: x, start, stop = _find_x(func), -pi, pi if is_sequence(limits, Tuple): if len(limits) == 3: x, start, stop = limits elif len(limits) == 2: x = _find_x(func) start, stop = limits if not isinstance(x, Symbol) or start is None or stop is None: raise ValueError('Invalid limits given: %s' % str(limits)) unbounded = [S.NegativeInfinity, S.Infinity] if start in unbounded or stop in unbounded: raise ValueError("Both the start and end value should be bounded") return sympify((x, start, stop)) def finite_check(f, x, L): def check_fx(exprs, x): return x not in exprs.free_symbols def check_sincos(_expr, x, L): if isinstance(_expr, (sin, cos)): sincos_args = _expr.args[0] if sincos_args.match(a*(pi/L)*x + b) is not None: return True else: return False _expr = sincos_to_sum(TR2(TR1(f))) add_coeff = _expr.as_coeff_add() a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k != S.Zero, ]) b = Wild('b', properties=[lambda k: x not in k.free_symbols, ]) for s in add_coeff[1]: mul_coeffs = s.as_coeff_mul()[1] for t in mul_coeffs: if not (check_fx(t, x) or check_sincos(t, x, L)): return False, f return True, _expr class FourierSeries(SeriesBase): r"""Represents Fourier sine/cosine series. Explanation =========== This class only represents a fourier series. No computation is performed. For how to compute Fourier series, see the :func:`fourier_series` docstring. See Also ======== sympy.series.fourier.fourier_series """ def __new__(cls, *args): args = map(sympify, args) return Expr.__new__(cls, *args) @property def function(self): return self.args[0] @property def x(self): return self.args[1][0] @property def period(self): return (self.args[1][1], self.args[1][2]) @property def a0(self): return self.args[2][0] @property def an(self): return self.args[2][1] @property def bn(self): return self.args[2][2] @property def interval(self): return Interval(0, oo) @property def start(self): return self.interval.inf @property def stop(self): return self.interval.sup @property def length(self): return oo @property def L(self): return abs(self.period[1] - self.period[0]) / 2 def _eval_subs(self, old, new): x = self.x if old.has(x): return self def truncate(self, n=3): """ Return the first n nonzero terms of the series. If ``n`` is None return an iterator. Parameters ========== n : int or None Amount of non-zero terms in approximation or None. Returns ======= Expr or iterator : Approximation of function expanded into Fourier series. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x, (x, -pi, pi)) >>> s.truncate(4) 2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2 See Also ======== sympy.series.fourier.FourierSeries.sigma_approximation """ if n is None: return iter(self) terms = [] for t in self: if len(terms) == n: break if t is not S.Zero: terms.append(t) return Add(*terms) def sigma_approximation(self, n=3): r""" Return :math:`\sigma`-approximation of Fourier series with respect to order n. Explanation =========== Sigma approximation adjusts a Fourier summation to eliminate the Gibbs phenomenon which would otherwise occur at discontinuities. A sigma-approximated summation for a Fourier series of a T-periodical function can be written as .. math:: s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1} \operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot \left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr) + b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right], where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier series coefficients and :math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos :math:`\sigma` factor (expressed in terms of normalized :math:`\operatorname{sinc}` function). Parameters ========== n : int Highest order of the terms taken into account in approximation. Returns ======= Expr : Sigma approximation of function expanded into Fourier series. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x, (x, -pi, pi)) >>> s.sigma_approximation(4) 2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3 See Also ======== sympy.series.fourier.FourierSeries.truncate Notes ===== The behaviour of :meth:`~sympy.series.fourier.FourierSeries.sigma_approximation` is different from :meth:`~sympy.series.fourier.FourierSeries.truncate` - it takes all nonzero terms of degree smaller than n, rather than first n nonzero ones. References ========== .. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon .. [2] https://en.wikipedia.org/wiki/Sigma_approximation """ terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n]) if t is not S.Zero] return Add(*terms) def shift(self, s): """ Shift the function by a term independent of x. Explanation =========== f(x) -> f(x) + s This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.shift(1).truncate() -4*cos(x) + cos(2*x) + 1 + pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) a0 = self.a0 + s sfunc = self.function + s return self.func(sfunc, self.args[1], (a0, self.an, self.bn)) def shiftx(self, s): """ Shift x by a term independent of x. Explanation =========== f(x) -> f(x + s) This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.shiftx(1).truncate() -4*cos(x + 1) + cos(2*x + 2) + pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) an = self.an.subs(x, x + s) bn = self.bn.subs(x, x + s) sfunc = self.function.subs(x, x + s) return self.func(sfunc, self.args[1], (self.a0, an, bn)) def scale(self, s): """ Scale the function by a term independent of x. Explanation =========== f(x) -> s * f(x) This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.scale(2).truncate() -8*cos(x) + 2*cos(2*x) + 2*pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) an = self.an.coeff_mul(s) bn = self.bn.coeff_mul(s) a0 = self.a0 * s sfunc = self.args[0] * s return self.func(sfunc, self.args[1], (a0, an, bn)) def scalex(self, s): """ Scale x by a term independent of x. Explanation =========== f(x) -> f(s*x) This is fast, if Fourier series of f(x) is already computed. Examples ======== >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> s = fourier_series(x**2, (x, -pi, pi)) >>> s.scalex(2).truncate() -4*cos(2*x) + cos(4*x) + pi**2/3 """ s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) an = self.an.subs(x, x * s) bn = self.bn.subs(x, x * s) sfunc = self.function.subs(x, x * s) return self.func(sfunc, self.args[1], (self.a0, an, bn)) def _eval_as_leading_term(self, x, cdir=0): for t in self: if t is not S.Zero: return t def _eval_term(self, pt): if pt == 0: return self.a0 return self.an.coeff(pt) + self.bn.coeff(pt) def __neg__(self): return self.scale(-1) def __add__(self, other): if isinstance(other, FourierSeries): if self.period != other.period: raise ValueError("Both the series should have same periods") x, y = self.x, other.x function = self.function + other.function.subs(y, x) if self.x not in function.free_symbols: return function an = self.an + other.an bn = self.bn + other.bn a0 = self.a0 + other.a0 return self.func(function, self.args[1], (a0, an, bn)) return Add(self, other) def __sub__(self, other): return self.__add__(-other) class FiniteFourierSeries(FourierSeries): r"""Represents Finite Fourier sine/cosine series. For how to compute Fourier series, see the :func:`fourier_series` docstring. Parameters ========== f : Expr Expression for finding fourier_series limits : ( x, start, stop) x is the independent variable for the expression f (start, stop) is the period of the fourier series exprs: (a0, an, bn) or Expr a0 is the constant term a0 of the fourier series an is a dictionary of coefficients of cos terms an[k] = coefficient of cos(pi*(k/L)*x) bn is a dictionary of coefficients of sin terms bn[k] = coefficient of sin(pi*(k/L)*x) or exprs can be an expression to be converted to fourier form Methods ======= This class is an extension of FourierSeries class. Please refer to sympy.series.fourier.FourierSeries for further information. See Also ======== sympy.series.fourier.FourierSeries sympy.series.fourier.fourier_series """ def __new__(cls, f, limits, exprs): f = sympify(f) limits = sympify(limits) exprs = sympify(exprs) if not (type(exprs) == Tuple and len(exprs) == 3): # exprs is not of form (a0, an, bn) # Converts the expression to fourier form c, e = exprs.as_coeff_add() rexpr = c + Add(*[TR10(i) for i in e]) a0, exp_ls = rexpr.expand(trig=False, power_base=False, power_exp=False, log=False).as_coeff_add() x = limits[0] L = abs(limits[2] - limits[1]) / 2 a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k is not S.Zero, ]) b = Wild('b', properties=[lambda k: x not in k.free_symbols, ]) an = dict() bn = dict() # separates the coefficients of sin and cos terms in dictionaries an, and bn for p in exp_ls: t = p.match(b * cos(a * (pi / L) * x)) q = p.match(b * sin(a * (pi / L) * x)) if t: an[t[a]] = t[b] + an.get(t[a], S.Zero) elif q: bn[q[a]] = q[b] + bn.get(q[a], S.Zero) else: a0 += p exprs = Tuple(a0, an, bn) return Expr.__new__(cls, f, limits, exprs) @property def interval(self): _length = 1 if self.a0 else 0 _length += max(set(self.an.keys()).union(set(self.bn.keys()))) + 1 return Interval(0, _length) @property def length(self): return self.stop - self.start def shiftx(self, s): s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) _expr = self.truncate().subs(x, x + s) sfunc = self.function.subs(x, x + s) return self.func(sfunc, self.args[1], _expr) def scale(self, s): s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) _expr = self.truncate() * s sfunc = self.function * s return self.func(sfunc, self.args[1], _expr) def scalex(self, s): s, x = sympify(s), self.x if x in s.free_symbols: raise ValueError("'%s' should be independent of %s" % (s, x)) _expr = self.truncate().subs(x, x * s) sfunc = self.function.subs(x, x * s) return self.func(sfunc, self.args[1], _expr) def _eval_term(self, pt): if pt == 0: return self.a0 _term = self.an.get(pt, S.Zero) * cos(pt * (pi / self.L) * self.x) \ + self.bn.get(pt, S.Zero) * sin(pt * (pi / self.L) * self.x) return _term def __add__(self, other): if isinstance(other, FourierSeries): return other.__add__(fourier_series(self.function, self.args[1],\ finite=False)) elif isinstance(other, FiniteFourierSeries): if self.period != other.period: raise ValueError("Both the series should have same periods") x, y = self.x, other.x function = self.function + other.function.subs(y, x) if self.x not in function.free_symbols: return function return fourier_series(function, limits=self.args[1]) def fourier_series(f, limits=None, finite=True): r"""Computes the Fourier trigonometric series expansion. Explanation =========== Fourier trigonometric series of $f(x)$ over the interval $(a, b)$ is defined as: .. math:: \frac{a_0}{2} + \sum_{n=1}^{\infty} (a_n \cos(\frac{2n \pi x}{L}) + b_n \sin(\frac{2n \pi x}{L})) where the coefficients are: .. math:: L = b - a .. math:: a_0 = \frac{2}{L} \int_{a}^{b}{f(x) dx} .. math:: a_n = \frac{2}{L} \int_{a}^{b}{f(x) \cos(\frac{2n \pi x}{L}) dx} .. math:: b_n = \frac{2}{L} \int_{a}^{b}{f(x) \sin(\frac{2n \pi x}{L}) dx} The condition whether the function $f(x)$ given should be periodic or not is more than necessary, because it is sufficient to consider the series to be converging to $f(x)$ only in the given interval, not throughout the whole real line. This also brings a lot of ease for the computation because you don't have to make $f(x)$ artificially periodic by wrapping it with piecewise, modulo operations, but you can shape the function to look like the desired periodic function only in the interval $(a, b)$, and the computed series will automatically become the series of the periodic version of $f(x)$. This property is illustrated in the examples section below. Parameters ========== limits : (sym, start, end), optional *sym* denotes the symbol the series is computed with respect to. *start* and *end* denotes the start and the end of the interval where the fourier series converges to the given function. Default range is specified as $-\pi$ and $\pi$. Returns ======= FourierSeries A symbolic object representing the Fourier trigonometric series. Examples ======== Computing the Fourier series of $f(x) = x^2$: >>> from sympy import fourier_series, pi >>> from sympy.abc import x >>> f = x**2 >>> s = fourier_series(f, (x, -pi, pi)) >>> s1 = s.truncate(n=3) >>> s1 -4*cos(x) + cos(2*x) + pi**2/3 Shifting of the Fourier series: >>> s.shift(1).truncate() -4*cos(x) + cos(2*x) + 1 + pi**2/3 >>> s.shiftx(1).truncate() -4*cos(x + 1) + cos(2*x + 2) + pi**2/3 Scaling of the Fourier series: >>> s.scale(2).truncate() -8*cos(x) + 2*cos(2*x) + 2*pi**2/3 >>> s.scalex(2).truncate() -4*cos(2*x) + cos(4*x) + pi**2/3 Computing the Fourier series of $f(x) = x$: This illustrates how truncating to the higher order gives better convergence. .. plot:: :context: reset :format: doctest :include-source: True >>> from sympy import fourier_series, pi, plot >>> from sympy.abc import x >>> f = x >>> s = fourier_series(f, (x, -pi, pi)) >>> s1 = s.truncate(n = 3) >>> s2 = s.truncate(n = 5) >>> s3 = s.truncate(n = 7) >>> p = plot(f, s1, s2, s3, (x, -pi, pi), show=False, legend=True) >>> p[0].line_color = (0, 0, 0) >>> p[0].label = 'x' >>> p[1].line_color = (0.7, 0.7, 0.7) >>> p[1].label = 'n=3' >>> p[2].line_color = (0.5, 0.5, 0.5) >>> p[2].label = 'n=5' >>> p[3].line_color = (0.3, 0.3, 0.3) >>> p[3].label = 'n=7' >>> p.show() This illustrates how the series converges to different sawtooth waves if the different ranges are specified. .. plot:: :context: close-figs :format: doctest :include-source: True >>> s1 = fourier_series(x, (x, -1, 1)).truncate(10) >>> s2 = fourier_series(x, (x, -pi, pi)).truncate(10) >>> s3 = fourier_series(x, (x, 0, 1)).truncate(10) >>> p = plot(x, s1, s2, s3, (x, -5, 5), show=False, legend=True) >>> p[0].line_color = (0, 0, 0) >>> p[0].label = 'x' >>> p[1].line_color = (0.7, 0.7, 0.7) >>> p[1].label = '[-1, 1]' >>> p[2].line_color = (0.5, 0.5, 0.5) >>> p[2].label = '[-pi, pi]' >>> p[3].line_color = (0.3, 0.3, 0.3) >>> p[3].label = '[0, 1]' >>> p.show() Notes ===== Computing Fourier series can be slow due to the integration required in computing an, bn. It is faster to compute Fourier series of a function by using shifting and scaling on an already computed Fourier series rather than computing again. e.g. If the Fourier series of ``x**2`` is known the Fourier series of ``x**2 - 1`` can be found by shifting by ``-1``. See Also ======== sympy.series.fourier.FourierSeries References ========== .. [1] https://mathworld.wolfram.com/FourierSeries.html """ f = sympify(f) limits = _process_limits(f, limits) x = limits[0] if x not in f.free_symbols: return f if finite: L = abs(limits[2] - limits[1]) / 2 is_finite, res_f = finite_check(f, x, L) if is_finite: return FiniteFourierSeries(f, limits, res_f) n = Dummy('n') center = (limits[1] + limits[2]) / 2 if center.is_zero: neg_f = f.subs(x, -x) if f == neg_f: a0, an = fourier_cos_seq(f, limits, n) bn = SeqFormula(0, (1, oo)) return FourierSeries(f, limits, (a0, an, bn)) elif f == -neg_f: a0 = S.Zero an = SeqFormula(0, (1, oo)) bn = fourier_sin_seq(f, limits, n) return FourierSeries(f, limits, (a0, an, bn)) a0, an = fourier_cos_seq(f, limits, n) bn = fourier_sin_seq(f, limits, n) return FourierSeries(f, limits, (a0, an, bn))
599dd819eb44315f69768e80b5bd8468b03b2508e7241e50eae45c869572da23
""" This module implements the Residue function and related tools for working with residues. """ from sympy import sympify from sympy.utilities.timeutils import timethis @timethis('residue') def residue(expr, x, x0): """ Finds the residue of ``expr`` at the point x=x0. The residue is defined as the coefficient of ``1/(x-x0)`` in the power series expansion about ``x=x0``. Examples ======== >>> from sympy import Symbol, residue, sin >>> x = Symbol("x") >>> residue(1/x, x, 0) 1 >>> residue(1/x**2, x, 0) 0 >>> residue(2/sin(x), x, 0) 2 This function is essential for the Residue Theorem [1]. References ========== .. [1] https://en.wikipedia.org/wiki/Residue_theorem """ # The current implementation uses series expansion to # calculate it. A more general implementation is explained in # the section 5.6 of the Bronstein's book {M. Bronstein: # Symbolic Integration I, Springer Verlag (2005)}. For purely # rational functions, the algorithm is much easier. See # sections 2.4, 2.5, and 2.7 (this section actually gives an # algorithm for computing any Laurent series coefficient for # a rational function). The theory in section 2.4 will help to # understand why the resultant works in the general algorithm. # For the definition of a resultant, see section 1.4 (and any # previous sections for more review). from sympy import collect, Mul, Order, S expr = sympify(expr) if x0 != 0: expr = expr.subs(x, x + x0) for n in [0, 1, 2, 4, 8, 16, 32]: s = expr.nseries(x, n=n) if not s.has(Order) or s.getn() >= 0: break s = collect(s.removeO(), x) if s.is_Add: args = s.args else: args = [s] res = S.Zero for arg in args: c, m = arg.as_coeff_mul(x) m = Mul(*m) if not (m == 1 or m == x or (m.is_Pow and m.exp.is_Integer)): raise NotImplementedError('term of unexpected form: %s' % m) if m == 1/x: res += c return res
1782a267b5b0344397fead091ee6ee71aca404dae013f1acdfad2393116e0622
"""Formal Power Series""" from collections import defaultdict from sympy import oo, zoo, nan from sympy.core.add import Add from sympy.core.compatibility import iterable from sympy.core.expr import Expr from sympy.core.function import Derivative, Function, expand from sympy.core.mul import Mul from sympy.core.numbers import Rational from sympy.core.relational import Eq from sympy.sets.sets import Interval from sympy.core.singleton import S from sympy.core.symbol import Wild, Dummy, symbols, Symbol from sympy.core.sympify import sympify from sympy.discrete.convolutions import convolution from sympy.functions.combinatorial.factorials import binomial, factorial, rf from sympy.functions.combinatorial.numbers import bell from sympy.functions.elementary.integers import floor, frac, ceiling from sympy.functions.elementary.miscellaneous import Min, Max from sympy.functions.elementary.piecewise import Piecewise from sympy.series.limits import Limit from sympy.series.order import Order from sympy.simplify.powsimp import powsimp from sympy.series.sequences import sequence from sympy.series.series_class import SeriesBase def rational_algorithm(f, x, k, order=4, full=False): """ Rational algorithm for computing formula of coefficients of Formal Power Series of a function. Explanation =========== Applicable when f(x) or some derivative of f(x) is a rational function in x. :func:`rational_algorithm` uses :func:`~.apart` function for partial fraction decomposition. :func:`~.apart` by default uses 'undetermined coefficients method'. By setting ``full=True``, 'Bronstein's algorithm' can be used instead. Looks for derivative of a function up to 4'th order (by default). This can be overridden using order option. Parameters ========== x : Symbol order : int, optional Order of the derivative of ``f``, Default is 4. full : bool Returns ======= formula : Expr ind : Expr Independent terms. order : int full : bool Examples ======== >>> from sympy import log, atan >>> from sympy.series.formal import rational_algorithm as ra >>> from sympy.abc import x, k >>> ra(1 / (1 - x), x, k) (1, 0, 0) >>> ra(log(1 + x), x, k) (-(-1)**(-k)/k, 0, 1) >>> ra(atan(x), x, k, full=True) ((-I*(-I)**(-k)/2 + I*I**(-k)/2)/k, 0, 1) Notes ===== By setting ``full=True``, range of admissible functions to be solved using ``rational_algorithm`` can be increased. This option should be used carefully as it can significantly slow down the computation as ``doit`` is performed on the :class:`~.RootSum` object returned by the :func:`~.apart` function. Use ``full=False`` whenever possible. See Also ======== sympy.polys.partfrac.apart References ========== .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf .. [2] Power Series in Computer Algebra - Wolfram Koepf """ from sympy.polys import RootSum, apart from sympy.integrals import integrate diff = f ds = [] # list of diff for i in range(order + 1): if i: diff = diff.diff(x) if diff.is_rational_function(x): coeff, sep = S.Zero, S.Zero terms = apart(diff, x, full=full) if terms.has(RootSum): terms = terms.doit() for t in Add.make_args(terms): num, den = t.as_numer_denom() if not den.has(x): sep += t else: if isinstance(den, Mul): # m*(n*x - a)**j -> (n*x - a)**j ind = den.as_independent(x) den = ind[1] num /= ind[0] # (n*x - a)**j -> (x - b) den, j = den.as_base_exp() a, xterm = den.as_coeff_add(x) # term -> m/x**n if not a: sep += t continue xc = xterm[0].coeff(x) a /= -xc num /= xc**j ak = ((-1)**j * num * binomial(j + k - 1, k).rewrite(factorial) / a**(j + k)) coeff += ak # Hacky, better way? if coeff.is_zero: return None if (coeff.has(x) or coeff.has(zoo) or coeff.has(oo) or coeff.has(nan)): return None for j in range(i): coeff = (coeff / (k + j + 1)) sep = integrate(sep, x) sep += (ds.pop() - sep).limit(x, 0) # constant of integration return (coeff.subs(k, k - i), sep, i) else: ds.append(diff) return None def rational_independent(terms, x): """ Returns a list of all the rationally independent terms. Examples ======== >>> from sympy import sin, cos >>> from sympy.series.formal import rational_independent >>> from sympy.abc import x >>> rational_independent([cos(x), sin(x)], x) [cos(x), sin(x)] >>> rational_independent([x**2, sin(x), x*sin(x), x**3], x) [x**3 + x**2, x*sin(x) + sin(x)] """ if not terms: return [] ind = terms[0:1] for t in terms[1:]: n = t.as_independent(x)[1] for i, term in enumerate(ind): d = term.as_independent(x)[1] q = (n / d).cancel() if q.is_rational_function(x): ind[i] += t break else: ind.append(t) return ind def simpleDE(f, x, g, order=4): r""" Generates simple DE. Explanation =========== DE is of the form .. math:: f^k(x) + \sum\limits_{j=0}^{k-1} A_j f^j(x) = 0 where :math:`A_j` should be rational function in x. Generates DE's upto order 4 (default). DE's can also have free parameters. By increasing order, higher order DE's can be found. Yields a tuple of (DE, order). """ from sympy.solvers.solveset import linsolve a = symbols('a:%d' % (order)) def _makeDE(k): eq = f.diff(x, k) + Add(*[a[i]*f.diff(x, i) for i in range(0, k)]) DE = g(x).diff(x, k) + Add(*[a[i]*g(x).diff(x, i) for i in range(0, k)]) return eq, DE found = False for k in range(1, order + 1): eq, DE = _makeDE(k) eq = eq.expand() terms = eq.as_ordered_terms() ind = rational_independent(terms, x) if found or len(ind) == k: sol = dict(zip(a, (i for s in linsolve(ind, a[:k]) for i in s))) if sol: found = True DE = DE.subs(sol) DE = DE.as_numer_denom()[0] DE = DE.factor().as_coeff_mul(Derivative)[1][0] yield DE.collect(Derivative(g(x))), k def exp_re(DE, r, k): """Converts a DE with constant coefficients (explike) into a RE. Explanation =========== Performs the substitution: .. math:: f^j(x) \\to r(k + j) Normalises the terms so that lowest order of a term is always r(k). Examples ======== >>> from sympy import Function, Derivative >>> from sympy.series.formal import exp_re >>> from sympy.abc import x, k >>> f, r = Function('f'), Function('r') >>> exp_re(-f(x) + Derivative(f(x)), r, k) -r(k) + r(k + 1) >>> exp_re(Derivative(f(x), x) + Derivative(f(x), (x, 2)), r, k) r(k) + r(k + 1) See Also ======== sympy.series.formal.hyper_re """ RE = S.Zero g = DE.atoms(Function).pop() mini = None for t in Add.make_args(DE): coeff, d = t.as_independent(g) if isinstance(d, Derivative): j = d.derivative_count else: j = 0 if mini is None or j < mini: mini = j RE += coeff * r(k + j) if mini: RE = RE.subs(k, k - mini) return RE def hyper_re(DE, r, k): """ Converts a DE into a RE. Explanation =========== Performs the substitution: .. math:: x^l f^j(x) \\to (k + 1 - l)_j . a_{k + j - l} Normalises the terms so that lowest order of a term is always r(k). Examples ======== >>> from sympy import Function, Derivative >>> from sympy.series.formal import hyper_re >>> from sympy.abc import x, k >>> f, r = Function('f'), Function('r') >>> hyper_re(-f(x) + Derivative(f(x)), r, k) (k + 1)*r(k + 1) - r(k) >>> hyper_re(-x*f(x) + Derivative(f(x), (x, 2)), r, k) (k + 2)*(k + 3)*r(k + 3) - r(k) See Also ======== sympy.series.formal.exp_re """ RE = S.Zero g = DE.atoms(Function).pop() x = g.atoms(Symbol).pop() mini = None for t in Add.make_args(DE.expand()): coeff, d = t.as_independent(g) c, v = coeff.as_independent(x) l = v.as_coeff_exponent(x)[1] if isinstance(d, Derivative): j = d.derivative_count else: j = 0 RE += c * rf(k + 1 - l, j) * r(k + j - l) if mini is None or j - l < mini: mini = j - l RE = RE.subs(k, k - mini) m = Wild('m') return RE.collect(r(k + m)) def _transformation_a(f, x, P, Q, k, m, shift): f *= x**(-shift) P = P.subs(k, k + shift) Q = Q.subs(k, k + shift) return f, P, Q, m def _transformation_c(f, x, P, Q, k, m, scale): f = f.subs(x, x**scale) P = P.subs(k, k / scale) Q = Q.subs(k, k / scale) m *= scale return f, P, Q, m def _transformation_e(f, x, P, Q, k, m): f = f.diff(x) P = P.subs(k, k + 1) * (k + m + 1) Q = Q.subs(k, k + 1) * (k + 1) return f, P, Q, m def _apply_shift(sol, shift): return [(res, cond + shift) for res, cond in sol] def _apply_scale(sol, scale): return [(res, cond / scale) for res, cond in sol] def _apply_integrate(sol, x, k): return [(res / ((cond + 1)*(cond.as_coeff_Add()[1].coeff(k))), cond + 1) for res, cond in sol] def _compute_formula(f, x, P, Q, k, m, k_max): """Computes the formula for f.""" from sympy.polys import roots sol = [] for i in range(k_max + 1, k_max + m + 1): if (i < 0) == True: continue r = f.diff(x, i).limit(x, 0) / factorial(i) if r.is_zero: continue kterm = m*k + i res = r p = P.subs(k, kterm) q = Q.subs(k, kterm) c1 = p.subs(k, 1/k).leadterm(k)[0] c2 = q.subs(k, 1/k).leadterm(k)[0] res *= (-c1 / c2)**k for r, mul in roots(p, k).items(): res *= rf(-r, k)**mul for r, mul in roots(q, k).items(): res /= rf(-r, k)**mul sol.append((res, kterm)) return sol def _rsolve_hypergeometric(f, x, P, Q, k, m): """ Recursive wrapper to rsolve_hypergeometric. Explanation =========== Returns a Tuple of (formula, series independent terms, maximum power of x in independent terms) if successful otherwise ``None``. See :func:`rsolve_hypergeometric` for details. """ from sympy.polys import lcm, roots from sympy.integrals import integrate # transformation - c proots, qroots = roots(P, k), roots(Q, k) all_roots = dict(proots) all_roots.update(qroots) scale = lcm([r.as_numer_denom()[1] for r, t in all_roots.items() if r.is_rational]) f, P, Q, m = _transformation_c(f, x, P, Q, k, m, scale) # transformation - a qroots = roots(Q, k) if qroots: k_min = Min(*qroots.keys()) else: k_min = S.Zero shift = k_min + m f, P, Q, m = _transformation_a(f, x, P, Q, k, m, shift) l = (x*f).limit(x, 0) if not isinstance(l, Limit) and l != 0: # Ideally should only be l != 0 return None qroots = roots(Q, k) if qroots: k_max = Max(*qroots.keys()) else: k_max = S.Zero ind, mp = S.Zero, -oo for i in range(k_max + m + 1): r = f.diff(x, i).limit(x, 0) / factorial(i) if r.is_finite is False: old_f = f f, P, Q, m = _transformation_a(f, x, P, Q, k, m, i) f, P, Q, m = _transformation_e(f, x, P, Q, k, m) sol, ind, mp = _rsolve_hypergeometric(f, x, P, Q, k, m) sol = _apply_integrate(sol, x, k) sol = _apply_shift(sol, i) ind = integrate(ind, x) ind += (old_f - ind).limit(x, 0) # constant of integration mp += 1 return sol, ind, mp elif r: ind += r*x**(i + shift) pow_x = Rational((i + shift), scale) if pow_x > mp: mp = pow_x # maximum power of x ind = ind.subs(x, x**(1/scale)) sol = _compute_formula(f, x, P, Q, k, m, k_max) sol = _apply_shift(sol, shift) sol = _apply_scale(sol, scale) return sol, ind, mp def rsolve_hypergeometric(f, x, P, Q, k, m): """ Solves RE of hypergeometric type. Explanation =========== Attempts to solve RE of the form Q(k)*a(k + m) - P(k)*a(k) Transformations that preserve Hypergeometric type: a. x**n*f(x): b(k + m) = R(k - n)*b(k) b. f(A*x): b(k + m) = A**m*R(k)*b(k) c. f(x**n): b(k + n*m) = R(k/n)*b(k) d. f(x**(1/m)): b(k + 1) = R(k*m)*b(k) e. f'(x): b(k + m) = ((k + m + 1)/(k + 1))*R(k + 1)*b(k) Some of these transformations have been used to solve the RE. Returns ======= formula : Expr ind : Expr Independent terms. order : int Examples ======== >>> from sympy import exp, ln, S >>> from sympy.series.formal import rsolve_hypergeometric as rh >>> from sympy.abc import x, k >>> rh(exp(x), x, -S.One, (k + 1), k, 1) (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) >>> rh(ln(1 + x), x, k**2, k*(k + 1), k, 1) (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), Eq(Mod(k, 1), 0)), (0, True)), x, 2) References ========== .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf .. [2] Power Series in Computer Algebra - Wolfram Koepf """ result = _rsolve_hypergeometric(f, x, P, Q, k, m) if result is None: return None sol_list, ind, mp = result sol_dict = defaultdict(lambda: S.Zero) for res, cond in sol_list: j, mk = cond.as_coeff_Add() c = mk.coeff(k) if j.is_integer is False: res *= x**frac(j) j = floor(j) res = res.subs(k, (k - j) / c) cond = Eq(k % c, j % c) sol_dict[cond] += res # Group together formula for same conditions sol = [] for cond, res in sol_dict.items(): sol.append((res, cond)) sol.append((S.Zero, True)) sol = Piecewise(*sol) if mp is -oo: s = S.Zero elif mp.is_integer is False: s = ceiling(mp) else: s = mp + 1 # save all the terms of # form 1/x**k in ind if s < 0: ind += sum(sequence(sol * x**k, (k, s, -1))) s = S.Zero return (sol, ind, s) def _solve_hyper_RE(f, x, RE, g, k): """See docstring of :func:`rsolve_hypergeometric` for details.""" terms = Add.make_args(RE) if len(terms) == 2: gs = list(RE.atoms(Function)) P, Q = map(RE.coeff, gs) m = gs[1].args[0] - gs[0].args[0] if m < 0: P, Q = Q, P m = abs(m) return rsolve_hypergeometric(f, x, P, Q, k, m) def _solve_explike_DE(f, x, DE, g, k): """Solves DE with constant coefficients.""" from sympy.solvers import rsolve for t in Add.make_args(DE): coeff, d = t.as_independent(g) if coeff.free_symbols: return RE = exp_re(DE, g, k) init = {} for i in range(len(Add.make_args(RE))): if i: f = f.diff(x) init[g(k).subs(k, i)] = f.limit(x, 0) sol = rsolve(RE, g(k), init) if sol: return (sol / factorial(k), S.Zero, S.Zero) def _solve_simple(f, x, DE, g, k): """Converts DE into RE and solves using :func:`rsolve`.""" from sympy.solvers import rsolve RE = hyper_re(DE, g, k) init = {} for i in range(len(Add.make_args(RE))): if i: f = f.diff(x) init[g(k).subs(k, i)] = f.limit(x, 0) / factorial(i) sol = rsolve(RE, g(k), init) if sol: return (sol, S.Zero, S.Zero) def _transform_explike_DE(DE, g, x, order, syms): """Converts DE with free parameters into DE with constant coefficients.""" from sympy.solvers.solveset import linsolve eq = [] highest_coeff = DE.coeff(Derivative(g(x), x, order)) for i in range(order): coeff = DE.coeff(Derivative(g(x), x, i)) coeff = (coeff / highest_coeff).expand().collect(x) for t in Add.make_args(coeff): eq.append(t) temp = [] for e in eq: if e.has(x): break elif e.has(Symbol): temp.append(e) else: eq = temp if eq: sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s))) if sol: DE = DE.subs(sol) DE = DE.factor().as_coeff_mul(Derivative)[1][0] DE = DE.collect(Derivative(g(x))) return DE def _transform_DE_RE(DE, g, k, order, syms): """Converts DE with free parameters into RE of hypergeometric type.""" from sympy.solvers.solveset import linsolve RE = hyper_re(DE, g, k) eq = [] for i in range(1, order): coeff = RE.coeff(g(k + i)) eq.append(coeff) sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s))) if sol: m = Wild('m') RE = RE.subs(sol) RE = RE.factor().as_numer_denom()[0].collect(g(k + m)) RE = RE.as_coeff_mul(g)[1][0] for i in range(order): # smallest order should be g(k) if RE.coeff(g(k + i)) and i: RE = RE.subs(k, k - i) break return RE def solve_de(f, x, DE, order, g, k): """ Solves the DE. Explanation =========== Tries to solve DE by either converting into a RE containing two terms or converting into a DE having constant coefficients. Returns ======= formula : Expr ind : Expr Independent terms. order : int Examples ======== >>> from sympy import Derivative as D, Function >>> from sympy import exp, ln >>> from sympy.series.formal import solve_de >>> from sympy.abc import x, k >>> f = Function('f') >>> solve_de(exp(x), x, D(f(x), x) - f(x), 1, f, k) (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) >>> solve_de(ln(1 + x), x, (x + 1)*D(f(x), x, 2) + D(f(x)), 2, f, k) (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), Eq(Mod(k, 1), 0)), (0, True)), x, 2) """ sol = None syms = DE.free_symbols.difference({g, x}) if syms: RE = _transform_DE_RE(DE, g, k, order, syms) else: RE = hyper_re(DE, g, k) if not RE.free_symbols.difference({k}): sol = _solve_hyper_RE(f, x, RE, g, k) if sol: return sol if syms: DE = _transform_explike_DE(DE, g, x, order, syms) if not DE.free_symbols.difference({x}): sol = _solve_explike_DE(f, x, DE, g, k) if sol: return sol def hyper_algorithm(f, x, k, order=4): """ Hypergeometric algorithm for computing Formal Power Series. Explanation =========== Steps: * Generates DE * Convert the DE into RE * Solves the RE Examples ======== >>> from sympy import exp, ln >>> from sympy.series.formal import hyper_algorithm >>> from sympy.abc import x, k >>> hyper_algorithm(exp(x), x, k) (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) >>> hyper_algorithm(ln(1 + x), x, k) (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), Eq(Mod(k, 1), 0)), (0, True)), x, 2) See Also ======== sympy.series.formal.simpleDE sympy.series.formal.solve_de """ g = Function('g') des = [] # list of DE's sol = None for DE, i in simpleDE(f, x, g, order): if DE is not None: sol = solve_de(f, x, DE, i, g, k) if sol: return sol if not DE.free_symbols.difference({x}): des.append(DE) # If nothing works # Try plain rsolve for DE in des: sol = _solve_simple(f, x, DE, g, k) if sol: return sol def _compute_fps(f, x, x0, dir, hyper, order, rational, full): """Recursive wrapper to compute fps. See :func:`compute_fps` for details. """ if x0 in [S.Infinity, S.NegativeInfinity]: dir = S.One if x0 is S.Infinity else -S.One temp = f.subs(x, 1/x) result = _compute_fps(temp, x, 0, dir, hyper, order, rational, full) if result is None: return None return (result[0], result[1].subs(x, 1/x), result[2].subs(x, 1/x)) elif x0 or dir == -S.One: if dir == -S.One: rep = -x + x0 rep2 = -x rep2b = x0 else: rep = x + x0 rep2 = x rep2b = -x0 temp = f.subs(x, rep) result = _compute_fps(temp, x, 0, S.One, hyper, order, rational, full) if result is None: return None return (result[0], result[1].subs(x, rep2 + rep2b), result[2].subs(x, rep2 + rep2b)) if f.is_polynomial(x): k = Dummy('k') ak = sequence(Coeff(f, x, k), (k, 1, oo)) xk = sequence(x**k, (k, 0, oo)) ind = f.coeff(x, 0) return ak, xk, ind # Break instances of Add # this allows application of different # algorithms on different terms increasing the # range of admissible functions. if isinstance(f, Add): result = False ak = sequence(S.Zero, (0, oo)) ind, xk = S.Zero, None for t in Add.make_args(f): res = _compute_fps(t, x, 0, S.One, hyper, order, rational, full) if res: if not result: result = True xk = res[1] if res[0].start > ak.start: seq = ak s, f = ak.start, res[0].start else: seq = res[0] s, f = res[0].start, ak.start save = Add(*[z[0]*z[1] for z in zip(seq[0:(f - s)], xk[s:f])]) ak += res[0] ind += res[2] + save else: ind += t if result: return ak, xk, ind return None # The symbolic term - symb, if present, is being separated from the function # Otherwise symb is being set to S.One syms = f.free_symbols.difference({x}) (f, symb) = expand(f).as_independent(*syms) if symb.is_zero: symb = S.One symb = powsimp(symb) result = None # from here on it's x0=0 and dir=1 handling k = Dummy('k') if rational: result = rational_algorithm(f, x, k, order, full) if result is None and hyper: result = hyper_algorithm(f, x, k, order) if result is None: return None ak = sequence(result[0], (k, result[2], oo)) xk_formula = powsimp(x**k * symb) xk = sequence(xk_formula, (k, 0, oo)) ind = powsimp(result[1] * symb) return ak, xk, ind def compute_fps(f, x, x0=0, dir=1, hyper=True, order=4, rational=True, full=False): """ Computes the formula for Formal Power Series of a function. Explanation =========== Tries to compute the formula by applying the following techniques (in order): * rational_algorithm * Hypergeometric algorithm Parameters ========== x : Symbol x0 : number, optional Point to perform series expansion about. Default is 0. dir : {1, -1, '+', '-'}, optional If dir is 1 or '+' the series is calculated from the right and for -1 or '-' the series is calculated from the left. For smooth functions this flag will not alter the results. Default is 1. hyper : {True, False}, optional Set hyper to False to skip the hypergeometric algorithm. By default it is set to False. order : int, optional Order of the derivative of ``f``, Default is 4. rational : {True, False}, optional Set rational to False to skip rational algorithm. By default it is set to True. full : {True, False}, optional Set full to True to increase the range of rational algorithm. See :func:`rational_algorithm` for details. By default it is set to False. Returns ======= ak : sequence Sequence of coefficients. xk : sequence Sequence of powers of x. ind : Expr Independent terms. mul : Pow Common terms. See Also ======== sympy.series.formal.rational_algorithm sympy.series.formal.hyper_algorithm """ f = sympify(f) x = sympify(x) if not f.has(x): return None x0 = sympify(x0) if dir == '+': dir = S.One elif dir == '-': dir = -S.One elif dir not in [S.One, -S.One]: raise ValueError("Dir must be '+' or '-'") else: dir = sympify(dir) return _compute_fps(f, x, x0, dir, hyper, order, rational, full) class Coeff(Function): """ Coeff(p, x, n) represents the nth coefficient of the polynomial p in x """ @classmethod def eval(cls, p, x, n): if p.is_polynomial(x) and n.is_integer: return p.coeff(x, n) class FormalPowerSeries(SeriesBase): """ Represents Formal Power Series of a function. Explanation =========== No computation is performed. This class should only to be used to represent a series. No checks are performed. For computing a series use :func:`fps`. See Also ======== sympy.series.formal.fps """ def __new__(cls, *args): args = map(sympify, args) return Expr.__new__(cls, *args) def __init__(self, *args): ak = args[4][0] k = ak.variables[0] self.ak_seq = sequence(ak.formula, (k, 1, oo)) self.fact_seq = sequence(factorial(k), (k, 1, oo)) self.bell_coeff_seq = self.ak_seq * self.fact_seq self.sign_seq = sequence((-1, 1), (k, 1, oo)) @property def function(self): return self.args[0] @property def x(self): return self.args[1] @property def x0(self): return self.args[2] @property def dir(self): return self.args[3] @property def ak(self): return self.args[4][0] @property def xk(self): return self.args[4][1] @property def ind(self): return self.args[4][2] @property def interval(self): return Interval(0, oo) @property def start(self): return self.interval.inf @property def stop(self): return self.interval.sup @property def length(self): return oo @property def infinite(self): """Returns an infinite representation of the series""" from sympy.concrete import Sum ak, xk = self.ak, self.xk k = ak.variables[0] inf_sum = Sum(ak.formula * xk.formula, (k, ak.start, ak.stop)) return self.ind + inf_sum def _get_pow_x(self, term): """Returns the power of x in a term.""" xterm, pow_x = term.as_independent(self.x)[1].as_base_exp() if not xterm.has(self.x): return S.Zero return pow_x def polynomial(self, n=6): """ Truncated series as polynomial. Explanation =========== Returns series expansion of ``f`` upto order ``O(x**n)`` as a polynomial(without ``O`` term). """ terms = [] sym = self.free_symbols for i, t in enumerate(self): xp = self._get_pow_x(t) if xp.has(*sym): xp = xp.as_coeff_add(*sym)[0] if xp >= n: break elif xp.is_integer is True and i == n + 1: break elif t is not S.Zero: terms.append(t) return Add(*terms) def truncate(self, n=6): """ Truncated series. Explanation =========== Returns truncated series expansion of f upto order ``O(x**n)``. If n is ``None``, returns an infinite iterator. """ if n is None: return iter(self) x, x0 = self.x, self.x0 pt_xk = self.xk.coeff(n) if x0 is S.NegativeInfinity: x0 = S.Infinity return self.polynomial(n) + Order(pt_xk, (x, x0)) def zero_coeff(self): return self._eval_term(0) def _eval_term(self, pt): try: pt_xk = self.xk.coeff(pt) pt_ak = self.ak.coeff(pt).simplify() # Simplify the coefficients except IndexError: term = S.Zero else: term = (pt_ak * pt_xk) if self.ind: ind = S.Zero sym = self.free_symbols for t in Add.make_args(self.ind): pow_x = self._get_pow_x(t) if pow_x.has(*sym): pow_x = pow_x.as_coeff_add(*sym)[0] if pt == 0 and pow_x < 1: ind += t elif pow_x >= pt and pow_x < pt + 1: ind += t term += ind return term.collect(self.x) def _eval_subs(self, old, new): x = self.x if old.has(x): return self def _eval_as_leading_term(self, x, cdir=0): for t in self: if t is not S.Zero: return t def _eval_derivative(self, x): f = self.function.diff(x) ind = self.ind.diff(x) pow_xk = self._get_pow_x(self.xk.formula) ak = self.ak k = ak.variables[0] if ak.formula.has(x): form = [] for e, c in ak.formula.args: temp = S.Zero for t in Add.make_args(e): pow_x = self._get_pow_x(t) temp += t * (pow_xk + pow_x) form.append((temp, c)) form = Piecewise(*form) ak = sequence(form.subs(k, k + 1), (k, ak.start - 1, ak.stop)) else: ak = sequence((ak.formula * pow_xk).subs(k, k + 1), (k, ak.start - 1, ak.stop)) return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind)) def integrate(self, x=None, **kwargs): """ Integrate Formal Power Series. Examples ======== >>> from sympy import fps, sin, integrate >>> from sympy.abc import x >>> f = fps(sin(x)) >>> f.integrate(x).truncate() -1 + x**2/2 - x**4/24 + O(x**6) >>> integrate(f, (x, 0, 1)) 1 - cos(1) """ from sympy.integrals import integrate if x is None: x = self.x elif iterable(x): return integrate(self.function, x) f = integrate(self.function, x) ind = integrate(self.ind, x) ind += (f - ind).limit(x, 0) # constant of integration pow_xk = self._get_pow_x(self.xk.formula) ak = self.ak k = ak.variables[0] if ak.formula.has(x): form = [] for e, c in ak.formula.args: temp = S.Zero for t in Add.make_args(e): pow_x = self._get_pow_x(t) temp += t / (pow_xk + pow_x + 1) form.append((temp, c)) form = Piecewise(*form) ak = sequence(form.subs(k, k - 1), (k, ak.start + 1, ak.stop)) else: ak = sequence((ak.formula / (pow_xk + 1)).subs(k, k - 1), (k, ak.start + 1, ak.stop)) return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind)) def product(self, other, x=None, n=6): """ Multiplies two Formal Power Series, using discrete convolution and return the truncated terms upto specified order. Parameters ========== n : Number, optional Specifies the order of the term up to which the polynomial should be truncated. Examples ======== >>> from sympy import fps, sin, exp >>> from sympy.abc import x >>> f1 = fps(sin(x)) >>> f2 = fps(exp(x)) >>> f1.product(f2, x).truncate(4) x + x**2 + x**3/3 + O(x**4) See Also ======== sympy.discrete.convolutions sympy.series.formal.FormalPowerSeriesProduct """ if x is None: x = self.x if n is None: return iter(self) other = sympify(other) if not isinstance(other, FormalPowerSeries): raise ValueError("Both series should be an instance of FormalPowerSeries" " class.") if self.dir != other.dir: raise ValueError("Both series should be calculated from the" " same direction.") elif self.x0 != other.x0: raise ValueError("Both series should be calculated about the" " same point.") elif self.x != other.x: raise ValueError("Both series should have the same symbol.") return FormalPowerSeriesProduct(self, other) def coeff_bell(self, n): r""" self.coeff_bell(n) returns a sequence of Bell polynomials of the second kind. Note that ``n`` should be a integer. The second kind of Bell polynomials (are sometimes called "partial" Bell polynomials or incomplete Bell polynomials) are defined as .. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) = \sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n} \frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!} \left(\frac{x_1}{1!} \right)^{j_1} \left(\frac{x_2}{2!} \right)^{j_2} \dotsb \left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}. * ``bell(n, k, (x1, x2, ...))`` gives Bell polynomials of the second kind, `B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`. See Also ======== sympy.functions.combinatorial.numbers.bell """ inner_coeffs = [bell(n, j, tuple(self.bell_coeff_seq[:n-j+1])) for j in range(1, n+1)] k = Dummy('k') return sequence(tuple(inner_coeffs), (k, 1, oo)) def compose(self, other, x=None, n=6): r""" Returns the truncated terms of the formal power series of the composed function, up to specified ``n``. Explanation =========== If ``f`` and ``g`` are two formal power series of two different functions, then the coefficient sequence ``ak`` of the composed formal power series `fp` will be as follows. .. math:: \sum\limits_{k=0}^{n} b_k B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) Parameters ========== n : Number, optional Specifies the order of the term up to which the polynomial should be truncated. Examples ======== >>> from sympy import fps, sin, exp >>> from sympy.abc import x >>> f1 = fps(exp(x)) >>> f2 = fps(sin(x)) >>> f1.compose(f2, x).truncate() 1 + x + x**2/2 - x**4/8 - x**5/15 + O(x**6) >>> f1.compose(f2, x).truncate(8) 1 + x + x**2/2 - x**4/8 - x**5/15 - x**6/240 + x**7/90 + O(x**8) See Also ======== sympy.functions.combinatorial.numbers.bell sympy.series.formal.FormalPowerSeriesCompose References ========== .. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974. """ if x is None: x = self.x if n is None: return iter(self) other = sympify(other) if not isinstance(other, FormalPowerSeries): raise ValueError("Both series should be an instance of FormalPowerSeries" " class.") if self.dir != other.dir: raise ValueError("Both series should be calculated from the" " same direction.") elif self.x0 != other.x0: raise ValueError("Both series should be calculated about the" " same point.") elif self.x != other.x: raise ValueError("Both series should have the same symbol.") if other._eval_term(0).as_coeff_mul(other.x)[0] is not S.Zero: raise ValueError("The formal power series of the inner function should not have any " "constant coefficient term.") return FormalPowerSeriesCompose(self, other) def inverse(self, x=None, n=6): r""" Returns the truncated terms of the inverse of the formal power series, up to specified ``n``. Explanation =========== If ``f`` and ``g`` are two formal power series of two different functions, then the coefficient sequence ``ak`` of the composed formal power series ``fp`` will be as follows. .. math:: \sum\limits_{k=0}^{n} (-1)^{k} x_0^{-k-1} B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) Parameters ========== n : Number, optional Specifies the order of the term up to which the polynomial should be truncated. Examples ======== >>> from sympy import fps, exp, cos >>> from sympy.abc import x >>> f1 = fps(exp(x)) >>> f2 = fps(cos(x)) >>> f1.inverse(x).truncate() 1 - x + x**2/2 - x**3/6 + x**4/24 - x**5/120 + O(x**6) >>> f2.inverse(x).truncate(8) 1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + O(x**8) See Also ======== sympy.functions.combinatorial.numbers.bell sympy.series.formal.FormalPowerSeriesInverse References ========== .. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974. """ if x is None: x = self.x if n is None: return iter(self) if self._eval_term(0).is_zero: raise ValueError("Constant coefficient should exist for an inverse of a formal" " power series to exist.") return FormalPowerSeriesInverse(self) def __add__(self, other): other = sympify(other) if isinstance(other, FormalPowerSeries): if self.dir != other.dir: raise ValueError("Both series should be calculated from the" " same direction.") elif self.x0 != other.x0: raise ValueError("Both series should be calculated about the" " same point.") x, y = self.x, other.x f = self.function + other.function.subs(y, x) if self.x not in f.free_symbols: return f ak = self.ak + other.ak if self.ak.start > other.ak.start: seq = other.ak s, e = other.ak.start, self.ak.start else: seq = self.ak s, e = self.ak.start, other.ak.start save = Add(*[z[0]*z[1] for z in zip(seq[0:(e - s)], self.xk[s:e])]) ind = self.ind + other.ind + save return self.func(f, x, self.x0, self.dir, (ak, self.xk, ind)) elif not other.has(self.x): f = self.function + other ind = self.ind + other return self.func(f, self.x, self.x0, self.dir, (self.ak, self.xk, ind)) return Add(self, other) def __radd__(self, other): return self.__add__(other) def __neg__(self): return self.func(-self.function, self.x, self.x0, self.dir, (-self.ak, self.xk, -self.ind)) def __sub__(self, other): return self.__add__(-other) def __rsub__(self, other): return (-self).__add__(other) def __mul__(self, other): other = sympify(other) if other.has(self.x): return Mul(self, other) f = self.function * other ak = self.ak.coeff_mul(other) ind = self.ind * other return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind)) def __rmul__(self, other): return self.__mul__(other) class FiniteFormalPowerSeries(FormalPowerSeries): """Base Class for Product, Compose and Inverse classes""" def __init__(self, *args): pass @property def ffps(self): return self.args[0] @property def gfps(self): return self.args[1] @property def f(self): return self.ffps.function @property def g(self): return self.gfps.function @property def infinite(self): raise NotImplementedError("No infinite version for an object of" " FiniteFormalPowerSeries class.") def _eval_terms(self, n): raise NotImplementedError("(%s)._eval_terms()" % self) def _eval_term(self, pt): raise NotImplementedError("By the current logic, one can get terms" "upto a certain order, instead of getting term by term.") def polynomial(self, n): return self._eval_terms(n) def truncate(self, n=6): ffps = self.ffps pt_xk = ffps.xk.coeff(n) x, x0 = ffps.x, ffps.x0 return self.polynomial(n) + Order(pt_xk, (x, x0)) def _eval_derivative(self, x): raise NotImplementedError def integrate(self, x): raise NotImplementedError class FormalPowerSeriesProduct(FiniteFormalPowerSeries): """Represents the product of two formal power series of two functions. Explanation =========== No computation is performed. Terms are calculated using a term by term logic, instead of a point by point logic. There are two differences between a :obj:`FormalPowerSeries` object and a :obj:`FormalPowerSeriesProduct` object. The first argument contains the two functions involved in the product. Also, the coefficient sequence contains both the coefficient sequence of the formal power series of the involved functions. See Also ======== sympy.series.formal.FormalPowerSeries sympy.series.formal.FiniteFormalPowerSeries """ def __init__(self, *args): ffps, gfps = self.ffps, self.gfps k = ffps.ak.variables[0] self.coeff1 = sequence(ffps.ak.formula, (k, 0, oo)) k = gfps.ak.variables[0] self.coeff2 = sequence(gfps.ak.formula, (k, 0, oo)) @property def function(self): """Function of the product of two formal power series.""" return self.f * self.g def _eval_terms(self, n): """ Returns the first ``n`` terms of the product formal power series. Term by term logic is implemented here. Examples ======== >>> from sympy import fps, sin, exp >>> from sympy.abc import x >>> f1 = fps(sin(x)) >>> f2 = fps(exp(x)) >>> fprod = f1.product(f2, x) >>> fprod._eval_terms(4) x**3/3 + x**2 + x See Also ======== sympy.series.formal.FormalPowerSeries.product """ coeff1, coeff2 = self.coeff1, self.coeff2 aks = convolution(coeff1[:n], coeff2[:n]) terms = [] for i in range(0, n): terms.append(aks[i] * self.ffps.xk.coeff(i)) return Add(*terms) class FormalPowerSeriesCompose(FiniteFormalPowerSeries): """ Represents the composed formal power series of two functions. Explanation =========== No computation is performed. Terms are calculated using a term by term logic, instead of a point by point logic. There are two differences between a :obj:`FormalPowerSeries` object and a :obj:`FormalPowerSeriesCompose` object. The first argument contains the outer function and the inner function involved in the omposition. Also, the coefficient sequence contains the generic sequence which is to be multiplied by a custom ``bell_seq`` finite sequence. The finite terms will then be added up to get the final terms. See Also ======== sympy.series.formal.FormalPowerSeries sympy.series.formal.FiniteFormalPowerSeries """ @property def function(self): """Function for the composed formal power series.""" f, g, x = self.f, self.g, self.ffps.x return f.subs(x, g) def _eval_terms(self, n): """ Returns the first `n` terms of the composed formal power series. Term by term logic is implemented here. Explanation =========== The coefficient sequence of the :obj:`FormalPowerSeriesCompose` object is the generic sequence. It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get the final terms for the polynomial. Examples ======== >>> from sympy import fps, sin, exp >>> from sympy.abc import x >>> f1 = fps(exp(x)) >>> f2 = fps(sin(x)) >>> fcomp = f1.compose(f2, x) >>> fcomp._eval_terms(6) -x**5/15 - x**4/8 + x**2/2 + x + 1 >>> fcomp._eval_terms(8) x**7/90 - x**6/240 - x**5/15 - x**4/8 + x**2/2 + x + 1 See Also ======== sympy.series.formal.FormalPowerSeries.compose sympy.series.formal.FormalPowerSeries.coeff_bell """ ffps, gfps = self.ffps, self.gfps terms = [ffps.zero_coeff()] for i in range(1, n): bell_seq = gfps.coeff_bell(i) seq = (ffps.bell_coeff_seq * bell_seq) terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i)) return Add(*terms) class FormalPowerSeriesInverse(FiniteFormalPowerSeries): """ Represents the Inverse of a formal power series. Explanation =========== No computation is performed. Terms are calculated using a term by term logic, instead of a point by point logic. There is a single difference between a :obj:`FormalPowerSeries` object and a :obj:`FormalPowerSeriesInverse` object. The coefficient sequence contains the generic sequence which is to be multiplied by a custom ``bell_seq`` finite sequence. The finite terms will then be added up to get the final terms. See Also ======== sympy.series.formal.FormalPowerSeries sympy.series.formal.FiniteFormalPowerSeries """ def __init__(self, *args): ffps = self.ffps k = ffps.xk.variables[0] inv = ffps.zero_coeff() inv_seq = sequence(inv ** (-(k + 1)), (k, 1, oo)) self.aux_seq = ffps.sign_seq * ffps.fact_seq * inv_seq @property def function(self): """Function for the inverse of a formal power series.""" f = self.f return 1 / f @property def g(self): raise ValueError("Only one function is considered while performing" "inverse of a formal power series.") @property def gfps(self): raise ValueError("Only one function is considered while performing" "inverse of a formal power series.") def _eval_terms(self, n): """ Returns the first ``n`` terms of the composed formal power series. Term by term logic is implemented here. Explanation =========== The coefficient sequence of the `FormalPowerSeriesInverse` object is the generic sequence. It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get the final terms for the polynomial. Examples ======== >>> from sympy import fps, exp, cos >>> from sympy.abc import x >>> f1 = fps(exp(x)) >>> f2 = fps(cos(x)) >>> finv1, finv2 = f1.inverse(), f2.inverse() >>> finv1._eval_terms(6) -x**5/120 + x**4/24 - x**3/6 + x**2/2 - x + 1 >>> finv2._eval_terms(8) 61*x**6/720 + 5*x**4/24 + x**2/2 + 1 See Also ======== sympy.series.formal.FormalPowerSeries.inverse sympy.series.formal.FormalPowerSeries.coeff_bell """ ffps = self.ffps terms = [ffps.zero_coeff()] for i in range(1, n): bell_seq = ffps.coeff_bell(i) seq = (self.aux_seq * bell_seq) terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i)) return Add(*terms) def fps(f, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, full=False): """ Generates Formal Power Series of ``f``. Explanation =========== Returns the formal series expansion of ``f`` around ``x = x0`` with respect to ``x`` in the form of a ``FormalPowerSeries`` object. Formal Power Series is represented using an explicit formula computed using different algorithms. See :func:`compute_fps` for the more details regarding the computation of formula. Parameters ========== x : Symbol, optional If x is None and ``f`` is univariate, the univariate symbols will be supplied, otherwise an error will be raised. x0 : number, optional Point to perform series expansion about. Default is 0. dir : {1, -1, '+', '-'}, optional If dir is 1 or '+' the series is calculated from the right and for -1 or '-' the series is calculated from the left. For smooth functions this flag will not alter the results. Default is 1. hyper : {True, False}, optional Set hyper to False to skip the hypergeometric algorithm. By default it is set to False. order : int, optional Order of the derivative of ``f``, Default is 4. rational : {True, False}, optional Set rational to False to skip rational algorithm. By default it is set to True. full : {True, False}, optional Set full to True to increase the range of rational algorithm. See :func:`rational_algorithm` for details. By default it is set to False. Examples ======== >>> from sympy import fps, ln, atan, sin >>> from sympy.abc import x, n Rational Functions >>> fps(ln(1 + x)).truncate() x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6) >>> fps(atan(x), full=True).truncate() x - x**3/3 + x**5/5 + O(x**6) Symbolic Functions >>> fps(x**n*sin(x**2), x).truncate(8) -x**(n + 6)/6 + x**(n + 2) + O(x**(n + 8)) See Also ======== sympy.series.formal.FormalPowerSeries sympy.series.formal.compute_fps """ f = sympify(f) if x is None: free = f.free_symbols if len(free) == 1: x = free.pop() elif not free: return f else: raise NotImplementedError("multivariate formal power series") result = compute_fps(f, x, x0, dir, hyper, order, rational, full) if result is None: return f return FormalPowerSeries(f, x, x0, dir, result)
0f456f20dfc036e0227a6e1a78690a8878f42ae9d5db9638300934066d3cf0a9
from sympy.core import S, sympify, Expr, Rational, Dummy from sympy.core import Add, Mul, expand_power_base, expand_log from sympy.core.cache import cacheit from sympy.core.compatibility import default_sort_key, is_sequence from sympy.core.containers import Tuple from sympy.sets.sets import Complement from sympy.utilities.iterables import uniq class Order(Expr): r""" Represents the limiting behavior of some function. Explanation =========== The order of a function characterizes the function based on the limiting behavior of the function as it goes to some limit. Only taking the limit point to be a number is currently supported. This is expressed in big O notation [1]_. The formal definition for the order of a function `g(x)` about a point `a` is such that `g(x) = O(f(x))` as `x \rightarrow a` if and only if for any `\delta > 0` there exists a `M > 0` such that `|g(x)| \leq M|f(x)|` for `|x-a| < \delta`. This is equivalent to `\lim_{x \rightarrow a} \sup |g(x)/f(x)| < \infty`. Let's illustrate it on the following example by taking the expansion of `\sin(x)` about 0: .. math :: \sin(x) = x - x^3/3! + O(x^5) where in this case `O(x^5) = x^5/5! - x^7/7! + \cdots`. By the definition of `O`, for any `\delta > 0` there is an `M` such that: .. math :: |x^5/5! - x^7/7! + ....| <= M|x^5| \text{ for } |x| < \delta or by the alternate definition: .. math :: \lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| < \infty which surely is true, because .. math :: \lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| = 1/5! As it is usually used, the order of a function can be intuitively thought of representing all terms of powers greater than the one specified. For example, `O(x^3)` corresponds to any terms proportional to `x^3, x^4,\ldots` and any higher power. For a polynomial, this leaves terms proportional to `x^2`, `x` and constants. Examples ======== >>> from sympy import O, oo, cos, pi >>> from sympy.abc import x, y >>> O(x + x**2) O(x) >>> O(x + x**2, (x, 0)) O(x) >>> O(x + x**2, (x, oo)) O(x**2, (x, oo)) >>> O(1 + x*y) O(1, x, y) >>> O(1 + x*y, (x, 0), (y, 0)) O(1, x, y) >>> O(1 + x*y, (x, oo), (y, oo)) O(x*y, (x, oo), (y, oo)) >>> O(1) in O(1, x) True >>> O(1, x) in O(1) False >>> O(x) in O(1, x) True >>> O(x**2) in O(x) True >>> O(x)*x O(x**2) >>> O(x) - O(x) O(x) >>> O(cos(x)) O(1) >>> O(cos(x), (x, pi/2)) O(x - pi/2, (x, pi/2)) References ========== .. [1] `Big O notation <https://en.wikipedia.org/wiki/Big_O_notation>`_ Notes ===== In ``O(f(x), x)`` the expression ``f(x)`` is assumed to have a leading term. ``O(f(x), x)`` is automatically transformed to ``O(f(x).as_leading_term(x),x)``. ``O(expr*f(x), x)`` is ``O(f(x), x)`` ``O(expr, x)`` is ``O(1)`` ``O(0, x)`` is 0. Multivariate O is also supported: ``O(f(x, y), x, y)`` is transformed to ``O(f(x, y).as_leading_term(x,y).as_leading_term(y), x, y)`` In the multivariate case, it is assumed the limits w.r.t. the various symbols commute. If no symbols are passed then all symbols in the expression are used and the limit point is assumed to be zero. """ is_Order = True __slots__ = () @cacheit def __new__(cls, expr, *args, **kwargs): expr = sympify(expr) if not args: if expr.is_Order: variables = expr.variables point = expr.point else: variables = list(expr.free_symbols) point = [S.Zero]*len(variables) else: args = list(args if is_sequence(args) else [args]) variables, point = [], [] if is_sequence(args[0]): for a in args: v, p = list(map(sympify, a)) variables.append(v) point.append(p) else: variables = list(map(sympify, args)) point = [S.Zero]*len(variables) if not all(v.is_symbol for v in variables): raise TypeError('Variables are not symbols, got %s' % variables) if len(list(uniq(variables))) != len(variables): raise ValueError('Variables are supposed to be unique symbols, got %s' % variables) if expr.is_Order: expr_vp = dict(expr.args[1:]) new_vp = dict(expr_vp) vp = dict(zip(variables, point)) for v, p in vp.items(): if v in new_vp.keys(): if p != new_vp[v]: raise NotImplementedError( "Mixing Order at different points is not supported.") else: new_vp[v] = p if set(expr_vp.keys()) == set(new_vp.keys()): return expr else: variables = list(new_vp.keys()) point = [new_vp[v] for v in variables] if expr is S.NaN: return S.NaN if any(x in p.free_symbols for x in variables for p in point): raise ValueError('Got %s as a point.' % point) if variables: if any(p != point[0] for p in point): raise NotImplementedError( "Multivariable orders at different points are not supported.") if point[0] is S.Infinity: s = {k: 1/Dummy() for k in variables} rs = {1/v: 1/k for k, v in s.items()} elif point[0] is S.NegativeInfinity: s = {k: -1/Dummy() for k in variables} rs = {-1/v: -1/k for k, v in s.items()} elif point[0] is not S.Zero: s = {k: Dummy() + point[0] for k in variables} rs = {v - point[0]: k - point[0] for k, v in s.items()} else: s = () rs = () expr = expr.subs(s) if expr.is_Add: expr = expr.factor() if s: args = tuple([r[0] for r in rs.items()]) else: args = tuple(variables) if len(variables) > 1: # XXX: better way? We need this expand() to # workaround e.g: expr = x*(x + y). # (x*(x + y)).as_leading_term(x, y) currently returns # x*y (wrong order term!). That's why we want to deal with # expand()'ed expr (handled in "if expr.is_Add" branch below). expr = expr.expand() old_expr = None while old_expr != expr: old_expr = expr if expr.is_Add: lst = expr.extract_leading_order(args) expr = Add(*[f.expr for (e, f) in lst]) elif expr: expr = expr.as_leading_term(*args) expr = expr.as_independent(*args, as_Add=False)[1] expr = expand_power_base(expr) expr = expand_log(expr) if len(args) == 1: # The definition of O(f(x)) symbol explicitly stated that # the argument of f(x) is irrelevant. That's why we can # combine some power exponents (only "on top" of the # expression tree for f(x)), e.g.: # x**p * (-x)**q -> x**(p+q) for real p, q. x = args[0] margs = list(Mul.make_args( expr.as_independent(x, as_Add=False)[1])) for i, t in enumerate(margs): if t.is_Pow: b, q = t.args if b in (x, -x) and q.is_real and not q.has(x): margs[i] = x**q elif b.is_Pow and not b.exp.has(x): b, r = b.args if b in (x, -x) and r.is_real: margs[i] = x**(r*q) elif b.is_Mul and b.args[0] is S.NegativeOne: b = -b if b.is_Pow and not b.exp.has(x): b, r = b.args if b in (x, -x) and r.is_real: margs[i] = x**(r*q) expr = Mul(*margs) expr = expr.subs(rs) if expr.is_Order: expr = expr.expr if not expr.has(*variables) and not expr.is_zero: expr = S.One # create Order instance: vp = dict(zip(variables, point)) variables.sort(key=default_sort_key) point = [vp[v] for v in variables] args = (expr,) + Tuple(*zip(variables, point)) obj = Expr.__new__(cls, *args) return obj def _eval_nseries(self, x, n, logx, cdir=0): return self @property def expr(self): return self.args[0] @property def variables(self): if self.args[1:]: return tuple(x[0] for x in self.args[1:]) else: return () @property def point(self): if self.args[1:]: return tuple(x[1] for x in self.args[1:]) else: return () @property def free_symbols(self): return self.expr.free_symbols | set(self.variables) def _eval_power(b, e): if e.is_Number and e.is_nonnegative: return b.func(b.expr ** e, *b.args[1:]) if e == O(1): return b return def as_expr_variables(self, order_symbols): if order_symbols is None: order_symbols = self.args[1:] else: if (not all(o[1] == order_symbols[0][1] for o in order_symbols) and not all(p == self.point[0] for p in self.point)): # pragma: no cover raise NotImplementedError('Order at points other than 0 ' 'or oo not supported, got %s as a point.' % self.point) if order_symbols and order_symbols[0][1] != self.point[0]: raise NotImplementedError( "Multiplying Order at different points is not supported.") order_symbols = dict(order_symbols) for s, p in dict(self.args[1:]).items(): if s not in order_symbols.keys(): order_symbols[s] = p order_symbols = sorted(order_symbols.items(), key=lambda x: default_sort_key(x[0])) return self.expr, tuple(order_symbols) def removeO(self): return S.Zero def getO(self): return self @cacheit def contains(self, expr): r""" Return True if expr belongs to Order(self.expr, \*self.variables). Return False if self belongs to expr. Return None if the inclusion relation cannot be determined (e.g. when self and expr have different symbols). """ from sympy import powsimp if expr.is_zero: return True if expr is S.NaN: return False point = self.point[0] if self.point else S.Zero if expr.is_Order: if (any(p != point for p in expr.point) or any(p != point for p in self.point)): return None if expr.expr == self.expr: # O(1) + O(1), O(1) + O(1, x), etc. return all([x in self.args[1:] for x in expr.args[1:]]) if expr.expr.is_Add: return all([self.contains(x) for x in expr.expr.args]) if self.expr.is_Add and point.is_zero: return any([self.func(x, *self.args[1:]).contains(expr) for x in self.expr.args]) if self.variables and expr.variables: common_symbols = tuple( [s for s in self.variables if s in expr.variables]) elif self.variables: common_symbols = self.variables else: common_symbols = expr.variables if not common_symbols: return None if (self.expr.is_Pow and len(self.variables) == 1 and self.variables == expr.variables): symbol = self.variables[0] other = expr.expr.as_independent(symbol, as_Add=False)[1] if (other.is_Pow and other.base == symbol and self.expr.base == symbol): if point.is_zero: rv = (self.expr.exp - other.exp).is_nonpositive if point.is_infinite: rv = (self.expr.exp - other.exp).is_nonnegative if rv is not None: return rv r = None ratio = self.expr/expr.expr ratio = powsimp(ratio, deep=True, combine='exp') for s in common_symbols: from sympy.series.limits import Limit l = Limit(ratio, s, point).doit(heuristics=False) if not isinstance(l, Limit): l = l != 0 else: l = None if r is None: r = l else: if r != l: return return r if self.expr.is_Pow and len(self.variables) == 1: symbol = self.variables[0] other = expr.as_independent(symbol, as_Add=False)[1] if (other.is_Pow and other.base == symbol and self.expr.base == symbol): if point.is_zero: rv = (self.expr.exp - other.exp).is_nonpositive if point.is_infinite: rv = (self.expr.exp - other.exp).is_nonnegative if rv is not None: return rv obj = self.func(expr, *self.args[1:]) return self.contains(obj) def __contains__(self, other): result = self.contains(other) if result is None: raise TypeError('contains did not evaluate to a bool') return result def _eval_subs(self, old, new): if old in self.variables: newexpr = self.expr.subs(old, new) i = self.variables.index(old) newvars = list(self.variables) newpt = list(self.point) if new.is_symbol: newvars[i] = new else: syms = new.free_symbols if len(syms) == 1 or old in syms: if old in syms: var = self.variables[i] else: var = syms.pop() # First, try to substitute self.point in the "new" # expr to see if this is a fixed point. # E.g. O(y).subs(y, sin(x)) point = new.subs(var, self.point[i]) if point != self.point[i]: from sympy.solvers.solveset import solveset d = Dummy() sol = solveset(old - new.subs(var, d), d) if isinstance(sol, Complement): e1 = sol.args[0] e2 = sol.args[1] sol = set(e1) - set(e2) res = [dict(zip((d, ), sol))] point = d.subs(res[0]).limit(old, self.point[i]) newvars[i] = var newpt[i] = point elif old not in syms: del newvars[i], newpt[i] if not syms and new == self.point[i]: newvars.extend(syms) newpt.extend([S.Zero]*len(syms)) else: return return Order(newexpr, *zip(newvars, newpt)) def _eval_conjugate(self): expr = self.expr._eval_conjugate() if expr is not None: return self.func(expr, *self.args[1:]) def _eval_derivative(self, x): return self.func(self.expr.diff(x), *self.args[1:]) or self def _eval_transpose(self): expr = self.expr._eval_transpose() if expr is not None: return self.func(expr, *self.args[1:]) def _sage_(self): #XXX: SAGE doesn't have Order yet. Let's return 0 instead. return Rational(0)._sage_() def __neg__(self): return self O = Order
e0e507ba7e8bb078fb58623d24d4999021bf310beca7ba15c93585904dcdfd0d
from collections import defaultdict from functools import reduce from sympy.core import (sympify, Basic, S, Expr, expand_mul, factor_terms, Mul, Dummy, igcd, FunctionClass, Add, symbols, Wild, expand) from sympy.core.cache import cacheit from sympy.core.compatibility import iterable, SYMPY_INTS from sympy.core.function import count_ops, _mexpand from sympy.core.numbers import I, Integer from sympy.functions import sin, cos, exp, cosh, tanh, sinh, tan, cot, coth from sympy.functions.elementary.hyperbolic import HyperbolicFunction from sympy.functions.elementary.trigonometric import TrigonometricFunction from sympy.polys import Poly, factor, cancel, parallel_poly_from_expr from sympy.polys.domains import ZZ from sympy.polys.polyerrors import PolificationFailed from sympy.polys.polytools import groebner from sympy.simplify.cse_main import cse from sympy.strategies.core import identity from sympy.strategies.tree import greedy from sympy.utilities.misc import debug def trigsimp_groebner(expr, hints=[], quick=False, order="grlex", polynomial=False): """ Simplify trigonometric expressions using a groebner basis algorithm. Explanation =========== This routine takes a fraction involving trigonometric or hyperbolic expressions, and tries to simplify it. The primary metric is the total degree. Some attempts are made to choose the simplest possible expression of the minimal degree, but this is non-rigorous, and also very slow (see the ``quick=True`` option). If ``polynomial`` is set to True, instead of simplifying numerator and denominator together, this function just brings numerator and denominator into a canonical form. This is much faster, but has potentially worse results. However, if the input is a polynomial, then the result is guaranteed to be an equivalent polynomial of minimal degree. The most important option is hints. Its entries can be any of the following: - a natural number - a function - an iterable of the form (func, var1, var2, ...) - anything else, interpreted as a generator A number is used to indicate that the search space should be increased. A function is used to indicate that said function is likely to occur in a simplified expression. An iterable is used indicate that func(var1 + var2 + ...) is likely to occur in a simplified . An additional generator also indicates that it is likely to occur. (See examples below). This routine carries out various computationally intensive algorithms. The option ``quick=True`` can be used to suppress one particularly slow step (at the expense of potentially more complicated results, but never at the expense of increased total degree). Examples ======== >>> from sympy.abc import x, y >>> from sympy import sin, tan, cos, sinh, cosh, tanh >>> from sympy.simplify.trigsimp import trigsimp_groebner Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens: >>> ex = sin(x)*cos(x) >>> trigsimp_groebner(ex) sin(x)*cos(x) This is because ``trigsimp_groebner`` only looks for a simplification involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try ``2*x`` by passing ``hints=[2]``: >>> trigsimp_groebner(ex, hints=[2]) sin(2*x)/2 >>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2]) -cos(2*x) Increasing the search space this way can quickly become expensive. A much faster way is to give a specific expression that is likely to occur: >>> trigsimp_groebner(ex, hints=[sin(2*x)]) sin(2*x)/2 Hyperbolic expressions are similarly supported: >>> trigsimp_groebner(sinh(2*x)/sinh(x)) 2*cosh(x) Note how no hints had to be passed, since the expression already involved ``2*x``. The tangent function is also supported. You can either pass ``tan`` in the hints, to indicate that tan should be tried whenever cosine or sine are, or you can pass a specific generator: >>> trigsimp_groebner(sin(x)/cos(x), hints=[tan]) tan(x) >>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)]) tanh(x) Finally, you can use the iterable form to suggest that angle sum formulae should be tried: >>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y)) >>> trigsimp_groebner(ex, hints=[(tan, x, y)]) tan(x + y) """ # TODO # - preprocess by replacing everything by funcs we can handle # - optionally use cot instead of tan # - more intelligent hinting. # For example, if the ideal is small, and we have sin(x), sin(y), # add sin(x + y) automatically... ? # - algebraic numbers ... # - expressions of lowest degree are not distinguished properly # e.g. 1 - sin(x)**2 # - we could try to order the generators intelligently, so as to influence # which monomials appear in the quotient basis # THEORY # ------ # Ratsimpmodprime above can be used to "simplify" a rational function # modulo a prime ideal. "Simplify" mainly means finding an equivalent # expression of lower total degree. # # We intend to use this to simplify trigonometric functions. To do that, # we need to decide (a) which ring to use, and (b) modulo which ideal to # simplify. In practice, (a) means settling on a list of "generators" # a, b, c, ..., such that the fraction we want to simplify is a rational # function in a, b, c, ..., with coefficients in ZZ (integers). # (2) means that we have to decide what relations to impose on the # generators. There are two practical problems: # (1) The ideal has to be *prime* (a technical term). # (2) The relations have to be polynomials in the generators. # # We typically have two kinds of generators: # - trigonometric expressions, like sin(x), cos(5*x), etc # - "everything else", like gamma(x), pi, etc. # # Since this function is trigsimp, we will concentrate on what to do with # trigonometric expressions. We can also simplify hyperbolic expressions, # but the extensions should be clear. # # One crucial point is that all *other* generators really should behave # like indeterminates. In particular if (say) "I" is one of them, then # in fact I**2 + 1 = 0 and we may and will compute non-sensical # expressions. However, we can work with a dummy and add the relation # I**2 + 1 = 0 to our ideal, then substitute back in the end. # # Now regarding trigonometric generators. We split them into groups, # according to the argument of the trigonometric functions. We want to # organise this in such a way that most trigonometric identities apply in # the same group. For example, given sin(x), cos(2*x) and cos(y), we would # group as [sin(x), cos(2*x)] and [cos(y)]. # # Our prime ideal will be built in three steps: # (1) For each group, compute a "geometrically prime" ideal of relations. # Geometrically prime means that it generates a prime ideal in # CC[gens], not just ZZ[gens]. # (2) Take the union of all the generators of the ideals for all groups. # By the geometric primality condition, this is still prime. # (3) Add further inter-group relations which preserve primality. # # Step (1) works as follows. We will isolate common factors in the # argument, so that all our generators are of the form sin(n*x), cos(n*x) # or tan(n*x), with n an integer. Suppose first there are no tan terms. # The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since # X**2 + Y**2 - 1 is irreducible over CC. # Now, if we have a generator sin(n*x), than we can, using trig identities, # express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this # relation to the ideal, preserving geometric primality, since the quotient # ring is unchanged. # Thus we have treated all sin and cos terms. # For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0. # (This requires of course that we already have relations for cos(n*x) and # sin(n*x).) It is not obvious, but it seems that this preserves geometric # primality. # XXX A real proof would be nice. HELP! # Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of # CC[S, C, T]: # - it suffices to show that the projective closure in CP**3 is # irreducible # - using the half-angle substitutions, we can express sin(x), tan(x), # cos(x) as rational functions in tan(x/2) # - from this, we get a rational map from CP**1 to our curve # - this is a morphism, hence the curve is prime # # Step (2) is trivial. # # Step (3) works by adding selected relations of the form # sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is # preserved by the same argument as before. def parse_hints(hints): """Split hints into (n, funcs, iterables, gens).""" n = 1 funcs, iterables, gens = [], [], [] for e in hints: if isinstance(e, (SYMPY_INTS, Integer)): n = e elif isinstance(e, FunctionClass): funcs.append(e) elif iterable(e): iterables.append((e[0], e[1:])) # XXX sin(x+2y)? # Note: we go through polys so e.g. # sin(-x) -> -sin(x) -> sin(x) gens.extend(parallel_poly_from_expr( [e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens) else: gens.append(e) return n, funcs, iterables, gens def build_ideal(x, terms): """ Build generators for our ideal. ``Terms`` is an iterable with elements of the form (fn, coeff), indicating that we have a generator fn(coeff*x). If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed to appear in terms. Similarly for hyperbolic functions. For tan(n*x), sin(n*x) and cos(n*x) are guaranteed. """ I = [] y = Dummy('y') for fn, coeff in terms: for c, s, t, rel in ( [cos, sin, tan, cos(x)**2 + sin(x)**2 - 1], [cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]): if coeff == 1 and fn in [c, s]: I.append(rel) elif fn == t: I.append(t(coeff*x)*c(coeff*x) - s(coeff*x)) elif fn in [c, s]: cn = fn(coeff*y).expand(trig=True).subs(y, x) I.append(fn(coeff*x) - cn) return list(set(I)) def analyse_gens(gens, hints): """ Analyse the generators ``gens``, using the hints ``hints``. The meaning of ``hints`` is described in the main docstring. Return a new list of generators, and also the ideal we should work with. """ # First parse the hints n, funcs, iterables, extragens = parse_hints(hints) debug('n=%s' % n, 'funcs:', funcs, 'iterables:', iterables, 'extragens:', extragens) # We just add the extragens to gens and analyse them as before gens = list(gens) gens.extend(extragens) # remove duplicates funcs = list(set(funcs)) iterables = list(set(iterables)) gens = list(set(gens)) # all the functions we can do anything with allfuncs = {sin, cos, tan, sinh, cosh, tanh} # sin(3*x) -> ((3, x), sin) trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens if g.func in allfuncs] # Our list of new generators - start with anything that we cannot # work with (i.e. is not a trigonometric term) freegens = [g for g in gens if g.func not in allfuncs] newgens = [] trigdict = {} for (coeff, var), fn in trigterms: trigdict.setdefault(var, []).append((coeff, fn)) res = [] # the ideal for key, val in trigdict.items(): # We have now assembeled a dictionary. Its keys are common # arguments in trigonometric expressions, and values are lists of # pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we # need to deal with fn(coeff*x0). We take the rational gcd of the # coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol", # all other arguments are integral multiples thereof. # We will build an ideal which works with sin(x), cos(x). # If hint tan is provided, also work with tan(x). Moreover, if # n > 1, also work with sin(k*x) for k <= n, and similarly for cos # (and tan if the hint is provided). Finally, any generators which # the ideal does not work with but we need to accommodate (either # because it was in expr or because it was provided as a hint) # we also build into the ideal. # This selection process is expressed in the list ``terms``. # build_ideal then generates the actual relations in our ideal, # from this list. fns = [x[1] for x in val] val = [x[0] for x in val] gcd = reduce(igcd, val) terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)] fs = set(funcs + fns) for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]): if any(x in fs for x in (c, s, t)): fs.add(c) fs.add(s) for fn in fs: for k in range(1, n + 1): terms.append((fn, k)) extra = [] for fn, v in terms: if fn == tan: extra.append((sin, v)) extra.append((cos, v)) if fn in [sin, cos] and tan in fs: extra.append((tan, v)) if fn == tanh: extra.append((sinh, v)) extra.append((cosh, v)) if fn in [sinh, cosh] and tanh in fs: extra.append((tanh, v)) terms.extend(extra) x = gcd*Mul(*key) r = build_ideal(x, terms) res.extend(r) newgens.extend({fn(v*x) for fn, v in terms}) # Add generators for compound expressions from iterables for fn, args in iterables: if fn == tan: # Tan expressions are recovered from sin and cos. iterables.extend([(sin, args), (cos, args)]) elif fn == tanh: # Tanh expressions are recovered from sihn and cosh. iterables.extend([(sinh, args), (cosh, args)]) else: dummys = symbols('d:%i' % len(args), cls=Dummy) expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args))) res.append(fn(Add(*args)) - expr) if myI in gens: res.append(myI**2 + 1) freegens.remove(myI) newgens.append(myI) return res, freegens, newgens myI = Dummy('I') expr = expr.subs(S.ImaginaryUnit, myI) subs = [(myI, S.ImaginaryUnit)] num, denom = cancel(expr).as_numer_denom() try: (pnum, pdenom), opt = parallel_poly_from_expr([num, denom]) except PolificationFailed: return expr debug('initial gens:', opt.gens) ideal, freegens, gens = analyse_gens(opt.gens, hints) debug('ideal:', ideal) debug('new gens:', gens, " -- len", len(gens)) debug('free gens:', freegens, " -- len", len(gens)) # NOTE we force the domain to be ZZ to stop polys from injecting generators # (which is usually a sign of a bug in the way we build the ideal) if not gens: return expr G = groebner(ideal, order=order, gens=gens, domain=ZZ) debug('groebner basis:', list(G), " -- len", len(G)) # If our fraction is a polynomial in the free generators, simplify all # coefficients separately: from sympy.simplify.ratsimp import ratsimpmodprime if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)): num = Poly(num, gens=gens+freegens).eject(*gens) res = [] for monom, coeff in num.terms(): ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens) # We compute the transitive closure of all generators that can # be reached from our generators through relations in the ideal. changed = True while changed: changed = False for p in ideal: p = Poly(p) if not ourgens.issuperset(p.gens) and \ not p.has_only_gens(*set(p.gens).difference(ourgens)): changed = True ourgens.update(p.exclude().gens) # NOTE preserve order! realgens = [x for x in gens if x in ourgens] # The generators of the ideal have now been (implicitly) split # into two groups: those involving ourgens and those that don't. # Since we took the transitive closure above, these two groups # live in subgrings generated by a *disjoint* set of variables. # Any sensible groebner basis algorithm will preserve this disjoint # structure (i.e. the elements of the groebner basis can be split # similarly), and and the two subsets of the groebner basis then # form groebner bases by themselves. (For the smaller generating # sets, of course.) ourG = [g.as_expr() for g in G.polys if g.has_only_gens(*ourgens.intersection(g.gens))] res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \ ratsimpmodprime(coeff/denom, ourG, order=order, gens=realgens, quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)) return Add(*res) # NOTE The following is simpler and has less assumptions on the # groebner basis algorithm. If the above turns out to be broken, # use this. return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \ ratsimpmodprime(coeff/denom, list(G), order=order, gens=gens, quick=quick, domain=ZZ) for monom, coeff in num.terms()]) else: return ratsimpmodprime( expr, list(G), order=order, gens=freegens+gens, quick=quick, domain=ZZ, polynomial=polynomial).subs(subs) _trigs = (TrigonometricFunction, HyperbolicFunction) def trigsimp(expr, **opts): """ reduces expression by using known trig identities Explanation =========== method: - Determine the method to use. Valid choices are 'matching' (default), 'groebner', 'combined', and 'fu'. If 'matching', simplify the expression recursively by targeting common patterns. If 'groebner', apply an experimental groebner basis algorithm. In this case further options are forwarded to ``trigsimp_groebner``, please refer to its docstring. If 'combined', first run the groebner basis algorithm with small default parameters, then run the 'matching' algorithm. 'fu' runs the collection of trigonometric transformations described by Fu, et al. (see the `fu` docstring). Examples ======== >>> from sympy import trigsimp, sin, cos, log >>> from sympy.abc import x >>> e = 2*sin(x)**2 + 2*cos(x)**2 >>> trigsimp(e) 2 Simplification occurs wherever trigonometric functions are located. >>> trigsimp(log(e)) log(2) Using `method="groebner"` (or `"combined"`) might lead to greater simplification. The old trigsimp routine can be accessed as with method 'old'. >>> from sympy import coth, tanh >>> t = 3*tanh(x)**7 - 2/coth(x)**7 >>> trigsimp(t, method='old') == t True >>> trigsimp(t) tanh(x)**7 """ from sympy.simplify.fu import fu expr = sympify(expr) _eval_trigsimp = getattr(expr, '_eval_trigsimp', None) if _eval_trigsimp is not None: return _eval_trigsimp(**opts) old = opts.pop('old', False) if not old: opts.pop('deep', None) opts.pop('recursive', None) method = opts.pop('method', 'matching') else: method = 'old' def groebnersimp(ex, **opts): def traverse(e): if e.is_Atom: return e args = [traverse(x) for x in e.args] if e.is_Function or e.is_Pow: args = [trigsimp_groebner(x, **opts) for x in args] return e.func(*args) new = traverse(ex) if not isinstance(new, Expr): return new return trigsimp_groebner(new, **opts) trigsimpfunc = { 'fu': (lambda x: fu(x, **opts)), 'matching': (lambda x: futrig(x)), 'groebner': (lambda x: groebnersimp(x, **opts)), 'combined': (lambda x: futrig(groebnersimp(x, polynomial=True, hints=[2, tan]))), 'old': lambda x: trigsimp_old(x, **opts), }[method] return trigsimpfunc(expr) def exptrigsimp(expr): """ Simplifies exponential / trigonometric / hyperbolic functions. Examples ======== >>> from sympy import exptrigsimp, exp, cosh, sinh >>> from sympy.abc import z >>> exptrigsimp(exp(z) + exp(-z)) 2*cosh(z) >>> exptrigsimp(cosh(z) - sinh(z)) exp(-z) """ from sympy.simplify.fu import hyper_as_trig, TR2i from sympy.simplify.simplify import bottom_up def exp_trig(e): # select the better of e, and e rewritten in terms of exp or trig # functions choices = [e] if e.has(*_trigs): choices.append(e.rewrite(exp)) choices.append(e.rewrite(cos)) return min(*choices, key=count_ops) newexpr = bottom_up(expr, exp_trig) def f(rv): if not rv.is_Mul: return rv commutative_part, noncommutative_part = rv.args_cnc() # Since as_powers_dict loses order information, # if there is more than one noncommutative factor, # it should only be used to simplify the commutative part. if (len(noncommutative_part) > 1): return f(Mul(*commutative_part))*Mul(*noncommutative_part) rvd = rv.as_powers_dict() newd = rvd.copy() def signlog(expr, sign=1): if expr is S.Exp1: return sign, 1 elif isinstance(expr, exp): return sign, expr.args[0] elif sign == 1: return signlog(-expr, sign=-1) else: return None, None ee = rvd[S.Exp1] for k in rvd: if k.is_Add and len(k.args) == 2: # k == c*(1 + sign*E**x) c = k.args[0] sign, x = signlog(k.args[1]/c) if not x: continue m = rvd[k] newd[k] -= m if ee == -x*m/2: # sinh and cosh newd[S.Exp1] -= ee ee = 0 if sign == 1: newd[2*c*cosh(x/2)] += m else: newd[-2*c*sinh(x/2)] += m elif newd[1 - sign*S.Exp1**x] == -m: # tanh del newd[1 - sign*S.Exp1**x] if sign == 1: newd[-c/tanh(x/2)] += m else: newd[-c*tanh(x/2)] += m else: newd[1 + sign*S.Exp1**x] += m newd[c] += m return Mul(*[k**newd[k] for k in newd]) newexpr = bottom_up(newexpr, f) # sin/cos and sinh/cosh ratios to tan and tanh, respectively if newexpr.has(HyperbolicFunction): e, f = hyper_as_trig(newexpr) newexpr = f(TR2i(e)) if newexpr.has(TrigonometricFunction): newexpr = TR2i(newexpr) # can we ever generate an I where there was none previously? if not (newexpr.has(I) and not expr.has(I)): expr = newexpr return expr #-------------------- the old trigsimp routines --------------------- def trigsimp_old(expr, *, first=True, **opts): """ Reduces expression by using known trig identities. Notes ===== deep: - Apply trigsimp inside all objects with arguments recursive: - Use common subexpression elimination (cse()) and apply trigsimp recursively (this is quite expensive if the expression is large) method: - Determine the method to use. Valid choices are 'matching' (default), 'groebner', 'combined', 'fu' and 'futrig'. If 'matching', simplify the expression recursively by pattern matching. If 'groebner', apply an experimental groebner basis algorithm. In this case further options are forwarded to ``trigsimp_groebner``, please refer to its docstring. If 'combined', first run the groebner basis algorithm with small default parameters, then run the 'matching' algorithm. 'fu' runs the collection of trigonometric transformations described by Fu, et al. (see the `fu` docstring) while `futrig` runs a subset of Fu-transforms that mimic the behavior of `trigsimp`. compare: - show input and output from `trigsimp` and `futrig` when different, but returns the `trigsimp` value. Examples ======== >>> from sympy import trigsimp, sin, cos, log, cot >>> from sympy.abc import x >>> e = 2*sin(x)**2 + 2*cos(x)**2 >>> trigsimp(e, old=True) 2 >>> trigsimp(log(e), old=True) log(2*sin(x)**2 + 2*cos(x)**2) >>> trigsimp(log(e), deep=True, old=True) log(2) Using `method="groebner"` (or `"combined"`) can sometimes lead to a lot more simplification: >>> e = (-sin(x) + 1)/cos(x) + cos(x)/(-sin(x) + 1) >>> trigsimp(e, old=True) (1 - sin(x))/cos(x) + cos(x)/(1 - sin(x)) >>> trigsimp(e, method="groebner", old=True) 2/cos(x) >>> trigsimp(1/cot(x)**2, compare=True, old=True) futrig: tan(x)**2 cot(x)**(-2) """ old = expr if first: if not expr.has(*_trigs): return expr trigsyms = set().union(*[t.free_symbols for t in expr.atoms(*_trigs)]) if len(trigsyms) > 1: from sympy.simplify.simplify import separatevars d = separatevars(expr) if d.is_Mul: d = separatevars(d, dict=True) or d if isinstance(d, dict): expr = 1 for k, v in d.items(): # remove hollow factoring was = v v = expand_mul(v) opts['first'] = False vnew = trigsimp(v, **opts) if vnew == v: vnew = was expr *= vnew old = expr else: if d.is_Add: for s in trigsyms: r, e = expr.as_independent(s) if r: opts['first'] = False expr = r + trigsimp(e, **opts) if not expr.is_Add: break old = expr recursive = opts.pop('recursive', False) deep = opts.pop('deep', False) method = opts.pop('method', 'matching') def groebnersimp(ex, deep, **opts): def traverse(e): if e.is_Atom: return e args = [traverse(x) for x in e.args] if e.is_Function or e.is_Pow: args = [trigsimp_groebner(x, **opts) for x in args] return e.func(*args) if deep: ex = traverse(ex) return trigsimp_groebner(ex, **opts) trigsimpfunc = { 'matching': (lambda x, d: _trigsimp(x, d)), 'groebner': (lambda x, d: groebnersimp(x, d, **opts)), 'combined': (lambda x, d: _trigsimp(groebnersimp(x, d, polynomial=True, hints=[2, tan]), d)) }[method] if recursive: w, g = cse(expr) g = trigsimpfunc(g[0], deep) for sub in reversed(w): g = g.subs(sub[0], sub[1]) g = trigsimpfunc(g, deep) result = g else: result = trigsimpfunc(expr, deep) if opts.get('compare', False): f = futrig(old) if f != result: print('\tfutrig:', f) return result def _dotrig(a, b): """Helper to tell whether ``a`` and ``b`` have the same sorts of symbols in them -- no need to test hyperbolic patterns against expressions that have no hyperbolics in them.""" return a.func == b.func and ( a.has(TrigonometricFunction) and b.has(TrigonometricFunction) or a.has(HyperbolicFunction) and b.has(HyperbolicFunction)) _trigpat = None def _trigpats(): global _trigpat a, b, c = symbols('a b c', cls=Wild) d = Wild('d', commutative=False) # for the simplifications like sinh/cosh -> tanh: # DO NOT REORDER THE FIRST 14 since these are assumed to be in this # order in _match_div_rewrite. matchers_division = ( (a*sin(b)**c/cos(b)**c, a*tan(b)**c, sin(b), cos(b)), (a*tan(b)**c*cos(b)**c, a*sin(b)**c, sin(b), cos(b)), (a*cot(b)**c*sin(b)**c, a*cos(b)**c, sin(b), cos(b)), (a*tan(b)**c/sin(b)**c, a/cos(b)**c, sin(b), cos(b)), (a*cot(b)**c/cos(b)**c, a/sin(b)**c, sin(b), cos(b)), (a*cot(b)**c*tan(b)**c, a, sin(b), cos(b)), (a*(cos(b) + 1)**c*(cos(b) - 1)**c, a*(-sin(b)**2)**c, cos(b) + 1, cos(b) - 1), (a*(sin(b) + 1)**c*(sin(b) - 1)**c, a*(-cos(b)**2)**c, sin(b) + 1, sin(b) - 1), (a*sinh(b)**c/cosh(b)**c, a*tanh(b)**c, S.One, S.One), (a*tanh(b)**c*cosh(b)**c, a*sinh(b)**c, S.One, S.One), (a*coth(b)**c*sinh(b)**c, a*cosh(b)**c, S.One, S.One), (a*tanh(b)**c/sinh(b)**c, a/cosh(b)**c, S.One, S.One), (a*coth(b)**c/cosh(b)**c, a/sinh(b)**c, S.One, S.One), (a*coth(b)**c*tanh(b)**c, a, S.One, S.One), (c*(tanh(a) + tanh(b))/(1 + tanh(a)*tanh(b)), tanh(a + b)*c, S.One, S.One), ) matchers_add = ( (c*sin(a)*cos(b) + c*cos(a)*sin(b) + d, sin(a + b)*c + d), (c*cos(a)*cos(b) - c*sin(a)*sin(b) + d, cos(a + b)*c + d), (c*sin(a)*cos(b) - c*cos(a)*sin(b) + d, sin(a - b)*c + d), (c*cos(a)*cos(b) + c*sin(a)*sin(b) + d, cos(a - b)*c + d), (c*sinh(a)*cosh(b) + c*sinh(b)*cosh(a) + d, sinh(a + b)*c + d), (c*cosh(a)*cosh(b) + c*sinh(a)*sinh(b) + d, cosh(a + b)*c + d), ) # for cos(x)**2 + sin(x)**2 -> 1 matchers_identity = ( (a*sin(b)**2, a - a*cos(b)**2), (a*tan(b)**2, a*(1/cos(b))**2 - a), (a*cot(b)**2, a*(1/sin(b))**2 - a), (a*sin(b + c), a*(sin(b)*cos(c) + sin(c)*cos(b))), (a*cos(b + c), a*(cos(b)*cos(c) - sin(b)*sin(c))), (a*tan(b + c), a*((tan(b) + tan(c))/(1 - tan(b)*tan(c)))), (a*sinh(b)**2, a*cosh(b)**2 - a), (a*tanh(b)**2, a - a*(1/cosh(b))**2), (a*coth(b)**2, a + a*(1/sinh(b))**2), (a*sinh(b + c), a*(sinh(b)*cosh(c) + sinh(c)*cosh(b))), (a*cosh(b + c), a*(cosh(b)*cosh(c) + sinh(b)*sinh(c))), (a*tanh(b + c), a*((tanh(b) + tanh(c))/(1 + tanh(b)*tanh(c)))), ) # Reduce any lingering artifacts, such as sin(x)**2 changing # to 1-cos(x)**2 when sin(x)**2 was "simpler" artifacts = ( (a - a*cos(b)**2 + c, a*sin(b)**2 + c, cos), (a - a*(1/cos(b))**2 + c, -a*tan(b)**2 + c, cos), (a - a*(1/sin(b))**2 + c, -a*cot(b)**2 + c, sin), (a - a*cosh(b)**2 + c, -a*sinh(b)**2 + c, cosh), (a - a*(1/cosh(b))**2 + c, a*tanh(b)**2 + c, cosh), (a + a*(1/sinh(b))**2 + c, a*coth(b)**2 + c, sinh), # same as above but with noncommutative prefactor (a*d - a*d*cos(b)**2 + c, a*d*sin(b)**2 + c, cos), (a*d - a*d*(1/cos(b))**2 + c, -a*d*tan(b)**2 + c, cos), (a*d - a*d*(1/sin(b))**2 + c, -a*d*cot(b)**2 + c, sin), (a*d - a*d*cosh(b)**2 + c, -a*d*sinh(b)**2 + c, cosh), (a*d - a*d*(1/cosh(b))**2 + c, a*d*tanh(b)**2 + c, cosh), (a*d + a*d*(1/sinh(b))**2 + c, a*d*coth(b)**2 + c, sinh), ) _trigpat = (a, b, c, d, matchers_division, matchers_add, matchers_identity, artifacts) return _trigpat def _replace_mul_fpowxgpow(expr, f, g, rexp, h, rexph): """Helper for _match_div_rewrite. Replace f(b_)**c_*g(b_)**(rexp(c_)) with h(b)**rexph(c) if f(b_) and g(b_) are both positive or if c_ is an integer. """ # assert expr.is_Mul and expr.is_commutative and f != g fargs = defaultdict(int) gargs = defaultdict(int) args = [] for x in expr.args: if x.is_Pow or x.func in (f, g): b, e = x.as_base_exp() if b.is_positive or e.is_integer: if b.func == f: fargs[b.args[0]] += e continue elif b.func == g: gargs[b.args[0]] += e continue args.append(x) common = set(fargs) & set(gargs) hit = False while common: key = common.pop() fe = fargs.pop(key) ge = gargs.pop(key) if fe == rexp(ge): args.append(h(key)**rexph(fe)) hit = True else: fargs[key] = fe gargs[key] = ge if not hit: return expr while fargs: key, e = fargs.popitem() args.append(f(key)**e) while gargs: key, e = gargs.popitem() args.append(g(key)**e) return Mul(*args) _idn = lambda x: x _midn = lambda x: -x _one = lambda x: S.One def _match_div_rewrite(expr, i): """helper for __trigsimp""" if i == 0: expr = _replace_mul_fpowxgpow(expr, sin, cos, _midn, tan, _idn) elif i == 1: expr = _replace_mul_fpowxgpow(expr, tan, cos, _idn, sin, _idn) elif i == 2: expr = _replace_mul_fpowxgpow(expr, cot, sin, _idn, cos, _idn) elif i == 3: expr = _replace_mul_fpowxgpow(expr, tan, sin, _midn, cos, _midn) elif i == 4: expr = _replace_mul_fpowxgpow(expr, cot, cos, _midn, sin, _midn) elif i == 5: expr = _replace_mul_fpowxgpow(expr, cot, tan, _idn, _one, _idn) # i in (6, 7) is skipped elif i == 8: expr = _replace_mul_fpowxgpow(expr, sinh, cosh, _midn, tanh, _idn) elif i == 9: expr = _replace_mul_fpowxgpow(expr, tanh, cosh, _idn, sinh, _idn) elif i == 10: expr = _replace_mul_fpowxgpow(expr, coth, sinh, _idn, cosh, _idn) elif i == 11: expr = _replace_mul_fpowxgpow(expr, tanh, sinh, _midn, cosh, _midn) elif i == 12: expr = _replace_mul_fpowxgpow(expr, coth, cosh, _midn, sinh, _midn) elif i == 13: expr = _replace_mul_fpowxgpow(expr, coth, tanh, _idn, _one, _idn) else: return None return expr def _trigsimp(expr, deep=False): # protect the cache from non-trig patterns; we only allow # trig patterns to enter the cache if expr.has(*_trigs): return __trigsimp(expr, deep) return expr @cacheit def __trigsimp(expr, deep=False): """recursive helper for trigsimp""" from sympy.simplify.fu import TR10i if _trigpat is None: _trigpats() a, b, c, d, matchers_division, matchers_add, \ matchers_identity, artifacts = _trigpat if expr.is_Mul: # do some simplifications like sin/cos -> tan: if not expr.is_commutative: com, nc = expr.args_cnc() expr = _trigsimp(Mul._from_args(com), deep)*Mul._from_args(nc) else: for i, (pattern, simp, ok1, ok2) in enumerate(matchers_division): if not _dotrig(expr, pattern): continue newexpr = _match_div_rewrite(expr, i) if newexpr is not None: if newexpr != expr: expr = newexpr break else: continue # use SymPy matching instead res = expr.match(pattern) if res and res.get(c, 0): if not res[c].is_integer: ok = ok1.subs(res) if not ok.is_positive: continue ok = ok2.subs(res) if not ok.is_positive: continue # if "a" contains any of trig or hyperbolic funcs with # argument "b" then skip the simplification if any(w.args[0] == res[b] for w in res[a].atoms( TrigonometricFunction, HyperbolicFunction)): continue # simplify and finish: expr = simp.subs(res) break # process below if expr.is_Add: args = [] for term in expr.args: if not term.is_commutative: com, nc = term.args_cnc() nc = Mul._from_args(nc) term = Mul._from_args(com) else: nc = S.One term = _trigsimp(term, deep) for pattern, result in matchers_identity: res = term.match(pattern) if res is not None: term = result.subs(res) break args.append(term*nc) if args != expr.args: expr = Add(*args) expr = min(expr, expand(expr), key=count_ops) if expr.is_Add: for pattern, result in matchers_add: if not _dotrig(expr, pattern): continue expr = TR10i(expr) if expr.has(HyperbolicFunction): res = expr.match(pattern) # if "d" contains any trig or hyperbolic funcs with # argument "a" or "b" then skip the simplification; # this isn't perfect -- see tests if res is None or not (a in res and b in res) or any( w.args[0] in (res[a], res[b]) for w in res[d].atoms( TrigonometricFunction, HyperbolicFunction)): continue expr = result.subs(res) break # Reduce any lingering artifacts, such as sin(x)**2 changing # to 1 - cos(x)**2 when sin(x)**2 was "simpler" for pattern, result, ex in artifacts: if not _dotrig(expr, pattern): continue # Substitute a new wild that excludes some function(s) # to help influence a better match. This is because # sometimes, for example, 'a' would match sec(x)**2 a_t = Wild('a', exclude=[ex]) pattern = pattern.subs(a, a_t) result = result.subs(a, a_t) m = expr.match(pattern) was = None while m and was != expr: was = expr if m[a_t] == 0 or \ -m[a_t] in m[c].args or m[a_t] + m[c] == 0: break if d in m and m[a_t]*m[d] + m[c] == 0: break expr = result.subs(m) m = expr.match(pattern) m.setdefault(c, S.Zero) elif expr.is_Mul or expr.is_Pow or deep and expr.args: expr = expr.func(*[_trigsimp(a, deep) for a in expr.args]) try: if not expr.has(*_trigs): raise TypeError e = expr.atoms(exp) new = expr.rewrite(exp, deep=deep) if new == e: raise TypeError fnew = factor(new) if fnew != new: new = sorted([new, factor(new)], key=count_ops)[0] # if all exp that were introduced disappeared then accept it if not (new.atoms(exp) - e): expr = new except TypeError: pass return expr #------------------- end of old trigsimp routines -------------------- def futrig(e, *, hyper=True, **kwargs): """Return simplified ``e`` using Fu-like transformations. This is not the "Fu" algorithm. This is called by default from ``trigsimp``. By default, hyperbolics subexpressions will be simplified, but this can be disabled by setting ``hyper=False``. Examples ======== >>> from sympy import trigsimp, tan, sinh, tanh >>> from sympy.simplify.trigsimp import futrig >>> from sympy.abc import x >>> trigsimp(1/tan(x)**2) tan(x)**(-2) >>> futrig(sinh(x)/tanh(x)) cosh(x) """ from sympy.simplify.fu import hyper_as_trig from sympy.simplify.simplify import bottom_up e = sympify(e) if not isinstance(e, Basic): return e if not e.args: return e old = e e = bottom_up(e, _futrig) if hyper and e.has(HyperbolicFunction): e, f = hyper_as_trig(e) e = f(bottom_up(e, _futrig)) if e != old and e.is_Mul and e.args[0].is_Rational: # redistribute leading coeff on 2-arg Add e = Mul(*e.as_coeff_Mul()) return e def _futrig(e): """Helper for futrig.""" from sympy.simplify.fu import ( TR1, TR2, TR3, TR2i, TR10, L, TR10i, TR8, TR6, TR15, TR16, TR111, TR5, TRmorrie, TR11, _TR11, TR14, TR22, TR12) from sympy.core.compatibility import _nodes if not e.has(TrigonometricFunction): return e if e.is_Mul: coeff, e = e.as_independent(TrigonometricFunction) else: coeff = None Lops = lambda x: (L(x), x.count_ops(), _nodes(x), len(x.args), x.is_Add) trigs = lambda x: x.has(TrigonometricFunction) tree = [identity, ( TR3, # canonical angles TR1, # sec-csc -> cos-sin TR12, # expand tan of sum lambda x: _eapply(factor, x, trigs), TR2, # tan-cot -> sin-cos [identity, lambda x: _eapply(_mexpand, x, trigs)], TR2i, # sin-cos ratio -> tan lambda x: _eapply(lambda i: factor(i.normal()), x, trigs), TR14, # factored identities TR5, # sin-pow -> cos_pow TR10, # sin-cos of sums -> sin-cos prod TR11, _TR11, TR6, # reduce double angles and rewrite cos pows lambda x: _eapply(factor, x, trigs), TR14, # factored powers of identities [identity, lambda x: _eapply(_mexpand, x, trigs)], TR10i, # sin-cos products > sin-cos of sums TRmorrie, [identity, TR8], # sin-cos products -> sin-cos of sums [identity, lambda x: TR2i(TR2(x))], # tan -> sin-cos -> tan [ lambda x: _eapply(expand_mul, TR5(x), trigs), lambda x: _eapply( expand_mul, TR15(x), trigs)], # pos/neg powers of sin [ lambda x: _eapply(expand_mul, TR6(x), trigs), lambda x: _eapply( expand_mul, TR16(x), trigs)], # pos/neg powers of cos TR111, # tan, sin, cos to neg power -> cot, csc, sec [identity, TR2i], # sin-cos ratio to tan [identity, lambda x: _eapply( expand_mul, TR22(x), trigs)], # tan-cot to sec-csc TR1, TR2, TR2i, [identity, lambda x: _eapply( factor_terms, TR12(x), trigs)], # expand tan of sum )] e = greedy(tree, objective=Lops)(e) if coeff is not None: e = coeff * e return e def _is_Expr(e): """_eapply helper to tell whether ``e`` and all its args are Exprs.""" from sympy import Derivative if isinstance(e, Derivative): return _is_Expr(e.expr) if not isinstance(e, Expr): return False return all(_is_Expr(i) for i in e.args) def _eapply(func, e, cond=None): """Apply ``func`` to ``e`` if all args are Exprs else only apply it to those args that *are* Exprs.""" if not isinstance(e, Expr): return e if _is_Expr(e) or not e.args: return func(e) return e.func(*[ _eapply(func, ei) if (cond is None or cond(ei)) else ei for ei in e.args])
ae2ab378e57f7e275aa6fc57802234060d0b68f572386b66773247aceea65e62
from collections import defaultdict from sympy.core.add import Add from sympy.core.basic import S from sympy.core.compatibility import ordered from sympy.core.expr import Expr from sympy.core.exprtools import Factors, gcd_terms, factor_terms from sympy.core.function import expand_mul from sympy.core.mul import Mul from sympy.core.numbers import pi, I from sympy.core.power import Pow from sympy.core.symbol import Dummy from sympy.core.sympify import sympify from sympy.functions.combinatorial.factorials import binomial from sympy.functions.elementary.hyperbolic import ( cosh, sinh, tanh, coth, sech, csch, HyperbolicFunction) from sympy.functions.elementary.trigonometric import ( cos, sin, tan, cot, sec, csc, sqrt, TrigonometricFunction) from sympy.ntheory.factor_ import perfect_power from sympy.polys.polytools import factor from sympy.simplify.simplify import bottom_up from sympy.strategies.tree import greedy from sympy.strategies.core import identity, debug from sympy import SYMPY_DEBUG # ================== Fu-like tools =========================== def TR0(rv): """Simplification of rational polynomials, trying to simplify the expression, e.g. combine things like 3*x + 2*x, etc.... """ # although it would be nice to use cancel, it doesn't work # with noncommutatives return rv.normal().factor().expand() def TR1(rv): """Replace sec, csc with 1/cos, 1/sin Examples ======== >>> from sympy.simplify.fu import TR1, sec, csc >>> from sympy.abc import x >>> TR1(2*csc(x) + sec(x)) 1/cos(x) + 2/sin(x) """ def f(rv): if isinstance(rv, sec): a = rv.args[0] return S.One/cos(a) elif isinstance(rv, csc): a = rv.args[0] return S.One/sin(a) return rv return bottom_up(rv, f) def TR2(rv): """Replace tan and cot with sin/cos and cos/sin Examples ======== >>> from sympy.simplify.fu import TR2 >>> from sympy.abc import x >>> from sympy import tan, cot, sin, cos >>> TR2(tan(x)) sin(x)/cos(x) >>> TR2(cot(x)) cos(x)/sin(x) >>> TR2(tan(tan(x) - sin(x)/cos(x))) 0 """ def f(rv): if isinstance(rv, tan): a = rv.args[0] return sin(a)/cos(a) elif isinstance(rv, cot): a = rv.args[0] return cos(a)/sin(a) return rv return bottom_up(rv, f) def TR2i(rv, half=False): """Converts ratios involving sin and cos as follows:: sin(x)/cos(x) -> tan(x) sin(x)/(cos(x) + 1) -> tan(x/2) if half=True Examples ======== >>> from sympy.simplify.fu import TR2i >>> from sympy.abc import x, a >>> from sympy import sin, cos >>> TR2i(sin(x)/cos(x)) tan(x) Powers of the numerator and denominator are also recognized >>> TR2i(sin(x)**2/(cos(x) + 1)**2, half=True) tan(x/2)**2 The transformation does not take place unless assumptions allow (i.e. the base must be positive or the exponent must be an integer for both numerator and denominator) >>> TR2i(sin(x)**a/(cos(x) + 1)**a) (cos(x) + 1)**(-a)*sin(x)**a """ def f(rv): if not rv.is_Mul: return rv n, d = rv.as_numer_denom() if n.is_Atom or d.is_Atom: return rv def ok(k, e): # initial filtering of factors return ( (e.is_integer or k.is_positive) and ( k.func in (sin, cos) or (half and k.is_Add and len(k.args) >= 2 and any(any(isinstance(ai, cos) or ai.is_Pow and ai.base is cos for ai in Mul.make_args(a)) for a in k.args)))) n = n.as_powers_dict() ndone = [(k, n.pop(k)) for k in list(n.keys()) if not ok(k, n[k])] if not n: return rv d = d.as_powers_dict() ddone = [(k, d.pop(k)) for k in list(d.keys()) if not ok(k, d[k])] if not d: return rv # factoring if necessary def factorize(d, ddone): newk = [] for k in d: if k.is_Add and len(k.args) > 1: knew = factor(k) if half else factor_terms(k) if knew != k: newk.append((k, knew)) if newk: for i, (k, knew) in enumerate(newk): del d[k] newk[i] = knew newk = Mul(*newk).as_powers_dict() for k in newk: v = d[k] + newk[k] if ok(k, v): d[k] = v else: ddone.append((k, v)) del newk factorize(n, ndone) factorize(d, ddone) # joining t = [] for k in n: if isinstance(k, sin): a = cos(k.args[0], evaluate=False) if a in d and d[a] == n[k]: t.append(tan(k.args[0])**n[k]) n[k] = d[a] = None elif half: a1 = 1 + a if a1 in d and d[a1] == n[k]: t.append((tan(k.args[0]/2))**n[k]) n[k] = d[a1] = None elif isinstance(k, cos): a = sin(k.args[0], evaluate=False) if a in d and d[a] == n[k]: t.append(tan(k.args[0])**-n[k]) n[k] = d[a] = None elif half and k.is_Add and k.args[0] is S.One and \ isinstance(k.args[1], cos): a = sin(k.args[1].args[0], evaluate=False) if a in d and d[a] == n[k] and (d[a].is_integer or \ a.is_positive): t.append(tan(a.args[0]/2)**-n[k]) n[k] = d[a] = None if t: rv = Mul(*(t + [b**e for b, e in n.items() if e]))/\ Mul(*[b**e for b, e in d.items() if e]) rv *= Mul(*[b**e for b, e in ndone])/Mul(*[b**e for b, e in ddone]) return rv return bottom_up(rv, f) def TR3(rv): """Induced formula: example sin(-a) = -sin(a) Examples ======== >>> from sympy.simplify.fu import TR3 >>> from sympy.abc import x, y >>> from sympy import pi >>> from sympy import cos >>> TR3(cos(y - x*(y - x))) cos(x*(x - y) + y) >>> cos(pi/2 + x) -sin(x) >>> cos(30*pi/2 + x) -cos(x) """ from sympy.simplify.simplify import signsimp # Negative argument (already automatic for funcs like sin(-x) -> -sin(x) # but more complicated expressions can use it, too). Also, trig angles # between pi/4 and pi/2 are not reduced to an angle between 0 and pi/4. # The following are automatically handled: # Argument of type: pi/2 +/- angle # Argument of type: pi +/- angle # Argument of type : 2k*pi +/- angle def f(rv): if not isinstance(rv, TrigonometricFunction): return rv rv = rv.func(signsimp(rv.args[0])) if not isinstance(rv, TrigonometricFunction): return rv if (rv.args[0] - S.Pi/4).is_positive is (S.Pi/2 - rv.args[0]).is_positive is True: fmap = {cos: sin, sin: cos, tan: cot, cot: tan, sec: csc, csc: sec} rv = fmap[rv.func](S.Pi/2 - rv.args[0]) return rv return bottom_up(rv, f) def TR4(rv): """Identify values of special angles. a= 0 pi/6 pi/4 pi/3 pi/2 ---------------------------------------------------- cos(a) 0 1/2 sqrt(2)/2 sqrt(3)/2 1 sin(a) 1 sqrt(3)/2 sqrt(2)/2 1/2 0 tan(a) 0 sqt(3)/3 1 sqrt(3) -- Examples ======== >>> from sympy import pi >>> from sympy import cos, sin, tan, cot >>> for s in (0, pi/6, pi/4, pi/3, pi/2): ... print('%s %s %s %s' % (cos(s), sin(s), tan(s), cot(s))) ... 1 0 0 zoo sqrt(3)/2 1/2 sqrt(3)/3 sqrt(3) sqrt(2)/2 sqrt(2)/2 1 1 1/2 sqrt(3)/2 sqrt(3) sqrt(3)/3 0 1 zoo 0 """ # special values at 0, pi/6, pi/4, pi/3, pi/2 already handled return rv def _TR56(rv, f, g, h, max, pow): """Helper for TR5 and TR6 to replace f**2 with h(g**2) Options ======= max : controls size of exponent that can appear on f e.g. if max=4 then f**4 will be changed to h(g**2)**2. pow : controls whether the exponent must be a perfect power of 2 e.g. if pow=True (and max >= 6) then f**6 will not be changed but f**8 will be changed to h(g**2)**4 >>> from sympy.simplify.fu import _TR56 as T >>> from sympy.abc import x >>> from sympy import sin, cos >>> h = lambda x: 1 - x >>> T(sin(x)**3, sin, cos, h, 4, False) sin(x)**3 >>> T(sin(x)**6, sin, cos, h, 6, False) (1 - cos(x)**2)**3 >>> T(sin(x)**6, sin, cos, h, 6, True) sin(x)**6 >>> T(sin(x)**8, sin, cos, h, 10, True) (1 - cos(x)**2)**4 """ def _f(rv): # I'm not sure if this transformation should target all even powers # or only those expressible as powers of 2. Also, should it only # make the changes in powers that appear in sums -- making an isolated # change is not going to allow a simplification as far as I can tell. if not (rv.is_Pow and rv.base.func == f): return rv if not rv.exp.is_real: return rv if (rv.exp < 0) == True: return rv if (rv.exp > max) == True: return rv if rv.exp == 2: return h(g(rv.base.args[0])**2) else: if rv.exp == 4: e = 2 elif not pow: if rv.exp % 2: return rv e = rv.exp//2 else: p = perfect_power(rv.exp) if not p: return rv e = rv.exp//2 return h(g(rv.base.args[0])**2)**e return bottom_up(rv, _f) def TR5(rv, max=4, pow=False): """Replacement of sin**2 with 1 - cos(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR5 >>> from sympy.abc import x >>> from sympy import sin >>> TR5(sin(x)**2) 1 - cos(x)**2 >>> TR5(sin(x)**-2) # unchanged sin(x)**(-2) >>> TR5(sin(x)**4) (1 - cos(x)**2)**2 """ return _TR56(rv, sin, cos, lambda x: 1 - x, max=max, pow=pow) def TR6(rv, max=4, pow=False): """Replacement of cos**2 with 1 - sin(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR6 >>> from sympy.abc import x >>> from sympy import cos >>> TR6(cos(x)**2) 1 - sin(x)**2 >>> TR6(cos(x)**-2) #unchanged cos(x)**(-2) >>> TR6(cos(x)**4) (1 - sin(x)**2)**2 """ return _TR56(rv, cos, sin, lambda x: 1 - x, max=max, pow=pow) def TR7(rv): """Lowering the degree of cos(x)**2. Examples ======== >>> from sympy.simplify.fu import TR7 >>> from sympy.abc import x >>> from sympy import cos >>> TR7(cos(x)**2) cos(2*x)/2 + 1/2 >>> TR7(cos(x)**2 + 1) cos(2*x)/2 + 3/2 """ def f(rv): if not (rv.is_Pow and rv.base.func == cos and rv.exp == 2): return rv return (1 + cos(2*rv.base.args[0]))/2 return bottom_up(rv, f) def TR8(rv, first=True): """Converting products of ``cos`` and/or ``sin`` to a sum or difference of ``cos`` and or ``sin`` terms. Examples ======== >>> from sympy.simplify.fu import TR8 >>> from sympy import cos, sin >>> TR8(cos(2)*cos(3)) cos(5)/2 + cos(1)/2 >>> TR8(cos(2)*sin(3)) sin(5)/2 + sin(1)/2 >>> TR8(sin(2)*sin(3)) -cos(5)/2 + cos(1)/2 """ def f(rv): if not ( rv.is_Mul or rv.is_Pow and rv.base.func in (cos, sin) and (rv.exp.is_integer or rv.base.is_positive)): return rv if first: n, d = [expand_mul(i) for i in rv.as_numer_denom()] newn = TR8(n, first=False) newd = TR8(d, first=False) if newn != n or newd != d: rv = gcd_terms(newn/newd) if rv.is_Mul and rv.args[0].is_Rational and \ len(rv.args) == 2 and rv.args[1].is_Add: rv = Mul(*rv.as_coeff_Mul()) return rv args = {cos: [], sin: [], None: []} for a in ordered(Mul.make_args(rv)): if a.func in (cos, sin): args[a.func].append(a.args[0]) elif (a.is_Pow and a.exp.is_Integer and a.exp > 0 and \ a.base.func in (cos, sin)): # XXX this is ok but pathological expression could be handled # more efficiently as in TRmorrie args[a.base.func].extend([a.base.args[0]]*a.exp) else: args[None].append(a) c = args[cos] s = args[sin] if not (c and s or len(c) > 1 or len(s) > 1): return rv args = args[None] n = min(len(c), len(s)) for i in range(n): a1 = s.pop() a2 = c.pop() args.append((sin(a1 + a2) + sin(a1 - a2))/2) while len(c) > 1: a1 = c.pop() a2 = c.pop() args.append((cos(a1 + a2) + cos(a1 - a2))/2) if c: args.append(cos(c.pop())) while len(s) > 1: a1 = s.pop() a2 = s.pop() args.append((-cos(a1 + a2) + cos(a1 - a2))/2) if s: args.append(sin(s.pop())) return TR8(expand_mul(Mul(*args))) return bottom_up(rv, f) def TR9(rv): """Sum of ``cos`` or ``sin`` terms as a product of ``cos`` or ``sin``. Examples ======== >>> from sympy.simplify.fu import TR9 >>> from sympy import cos, sin >>> TR9(cos(1) + cos(2)) 2*cos(1/2)*cos(3/2) >>> TR9(cos(1) + 2*sin(1) + 2*sin(2)) cos(1) + 4*sin(3/2)*cos(1/2) If no change is made by TR9, no re-arrangement of the expression will be made. For example, though factoring of common term is attempted, if the factored expression wasn't changed, the original expression will be returned: >>> TR9(cos(3) + cos(3)*cos(2)) cos(3) + cos(2)*cos(3) """ def f(rv): if not rv.is_Add: return rv def do(rv, first=True): # cos(a)+/-cos(b) can be combined into a product of cosines and # sin(a)+/-sin(b) can be combined into a product of cosine and # sine. # # If there are more than two args, the pairs which "work" will # have a gcd extractable and the remaining two terms will have # the above structure -- all pairs must be checked to find the # ones that work. args that don't have a common set of symbols # are skipped since this doesn't lead to a simpler formula and # also has the arbitrariness of combining, for example, the x # and y term instead of the y and z term in something like # cos(x) + cos(y) + cos(z). if not rv.is_Add: return rv args = list(ordered(rv.args)) if len(args) != 2: hit = False for i in range(len(args)): ai = args[i] if ai is None: continue for j in range(i + 1, len(args)): aj = args[j] if aj is None: continue was = ai + aj new = do(was) if new != was: args[i] = new # update in place args[j] = None hit = True break # go to next i if hit: rv = Add(*[_f for _f in args if _f]) if rv.is_Add: rv = do(rv) return rv # two-arg Add split = trig_split(*args) if not split: return rv gcd, n1, n2, a, b, iscos = split # application of rule if possible if iscos: if n1 == n2: return gcd*n1*2*cos((a + b)/2)*cos((a - b)/2) if n1 < 0: a, b = b, a return -2*gcd*sin((a + b)/2)*sin((a - b)/2) else: if n1 == n2: return gcd*n1*2*sin((a + b)/2)*cos((a - b)/2) if n1 < 0: a, b = b, a return 2*gcd*cos((a + b)/2)*sin((a - b)/2) return process_common_addends(rv, do) # DON'T sift by free symbols return bottom_up(rv, f) def TR10(rv, first=True): """Separate sums in ``cos`` and ``sin``. Examples ======== >>> from sympy.simplify.fu import TR10 >>> from sympy.abc import a, b, c >>> from sympy import cos, sin >>> TR10(cos(a + b)) -sin(a)*sin(b) + cos(a)*cos(b) >>> TR10(sin(a + b)) sin(a)*cos(b) + sin(b)*cos(a) >>> TR10(sin(a + b + c)) (-sin(a)*sin(b) + cos(a)*cos(b))*sin(c) + \ (sin(a)*cos(b) + sin(b)*cos(a))*cos(c) """ def f(rv): if not rv.func in (cos, sin): return rv f = rv.func arg = rv.args[0] if arg.is_Add: if first: args = list(ordered(arg.args)) else: args = list(arg.args) a = args.pop() b = Add._from_args(args) if b.is_Add: if f == sin: return sin(a)*TR10(cos(b), first=False) + \ cos(a)*TR10(sin(b), first=False) else: return cos(a)*TR10(cos(b), first=False) - \ sin(a)*TR10(sin(b), first=False) else: if f == sin: return sin(a)*cos(b) + cos(a)*sin(b) else: return cos(a)*cos(b) - sin(a)*sin(b) return rv return bottom_up(rv, f) def TR10i(rv): """Sum of products to function of sum. Examples ======== >>> from sympy.simplify.fu import TR10i >>> from sympy import cos, sin, sqrt >>> from sympy.abc import x >>> TR10i(cos(1)*cos(3) + sin(1)*sin(3)) cos(2) >>> TR10i(cos(1)*sin(3) + sin(1)*cos(3) + cos(3)) cos(3) + sin(4) >>> TR10i(sqrt(2)*cos(x)*x + sqrt(6)*sin(x)*x) 2*sqrt(2)*x*sin(x + pi/6) """ global _ROOT2, _ROOT3, _invROOT3 if _ROOT2 is None: _roots() def f(rv): if not rv.is_Add: return rv def do(rv, first=True): # args which can be expressed as A*(cos(a)*cos(b)+/-sin(a)*sin(b)) # or B*(cos(a)*sin(b)+/-cos(b)*sin(a)) can be combined into # A*f(a+/-b) where f is either sin or cos. # # If there are more than two args, the pairs which "work" will have # a gcd extractable and the remaining two terms will have the above # structure -- all pairs must be checked to find the ones that # work. if not rv.is_Add: return rv args = list(ordered(rv.args)) if len(args) != 2: hit = False for i in range(len(args)): ai = args[i] if ai is None: continue for j in range(i + 1, len(args)): aj = args[j] if aj is None: continue was = ai + aj new = do(was) if new != was: args[i] = new # update in place args[j] = None hit = True break # go to next i if hit: rv = Add(*[_f for _f in args if _f]) if rv.is_Add: rv = do(rv) return rv # two-arg Add split = trig_split(*args, two=True) if not split: return rv gcd, n1, n2, a, b, same = split # identify and get c1 to be cos then apply rule if possible if same: # coscos, sinsin gcd = n1*gcd if n1 == n2: return gcd*cos(a - b) return gcd*cos(a + b) else: #cossin, cossin gcd = n1*gcd if n1 == n2: return gcd*sin(a + b) return gcd*sin(b - a) rv = process_common_addends( rv, do, lambda x: tuple(ordered(x.free_symbols))) # need to check for inducible pairs in ratio of sqrt(3):1 that # appeared in different lists when sorting by coefficient while rv.is_Add: byrad = defaultdict(list) for a in rv.args: hit = 0 if a.is_Mul: for ai in a.args: if ai.is_Pow and ai.exp is S.Half and \ ai.base.is_Integer: byrad[ai].append(a) hit = 1 break if not hit: byrad[S.One].append(a) # no need to check all pairs -- just check for the onees # that have the right ratio args = [] for a in byrad: for b in [_ROOT3*a, _invROOT3]: if b in byrad: for i in range(len(byrad[a])): if byrad[a][i] is None: continue for j in range(len(byrad[b])): if byrad[b][j] is None: continue was = Add(byrad[a][i] + byrad[b][j]) new = do(was) if new != was: args.append(new) byrad[a][i] = None byrad[b][j] = None break if args: rv = Add(*(args + [Add(*[_f for _f in v if _f]) for v in byrad.values()])) else: rv = do(rv) # final pass to resolve any new inducible pairs break return rv return bottom_up(rv, f) def TR11(rv, base=None): """Function of double angle to product. The ``base`` argument can be used to indicate what is the un-doubled argument, e.g. if 3*pi/7 is the base then cosine and sine functions with argument 6*pi/7 will be replaced. Examples ======== >>> from sympy.simplify.fu import TR11 >>> from sympy import cos, sin, pi >>> from sympy.abc import x >>> TR11(sin(2*x)) 2*sin(x)*cos(x) >>> TR11(cos(2*x)) -sin(x)**2 + cos(x)**2 >>> TR11(sin(4*x)) 4*(-sin(x)**2 + cos(x)**2)*sin(x)*cos(x) >>> TR11(sin(4*x/3)) 4*(-sin(x/3)**2 + cos(x/3)**2)*sin(x/3)*cos(x/3) If the arguments are simply integers, no change is made unless a base is provided: >>> TR11(cos(2)) cos(2) >>> TR11(cos(4), 2) -sin(2)**2 + cos(2)**2 There is a subtle issue here in that autosimplification will convert some higher angles to lower angles >>> cos(6*pi/7) + cos(3*pi/7) -cos(pi/7) + cos(3*pi/7) The 6*pi/7 angle is now pi/7 but can be targeted with TR11 by supplying the 3*pi/7 base: >>> TR11(_, 3*pi/7) -sin(3*pi/7)**2 + cos(3*pi/7)**2 + cos(3*pi/7) """ def f(rv): if not rv.func in (cos, sin): return rv if base: f = rv.func t = f(base*2) co = S.One if t.is_Mul: co, t = t.as_coeff_Mul() if not t.func in (cos, sin): return rv if rv.args[0] == t.args[0]: c = cos(base) s = sin(base) if f is cos: return (c**2 - s**2)/co else: return 2*c*s/co return rv elif not rv.args[0].is_Number: # make a change if the leading coefficient's numerator is # divisible by 2 c, m = rv.args[0].as_coeff_Mul(rational=True) if c.p % 2 == 0: arg = c.p//2*m/c.q c = TR11(cos(arg)) s = TR11(sin(arg)) if rv.func == sin: rv = 2*s*c else: rv = c**2 - s**2 return rv return bottom_up(rv, f) def _TR11(rv): """ Helper for TR11 to find half-arguments for sin in factors of num/den that appear in cos or sin factors in the den/num. Examples ======== >>> from sympy.simplify.fu import TR11, _TR11 >>> from sympy import cos, sin >>> from sympy.abc import x >>> TR11(sin(x/3)/(cos(x/6))) sin(x/3)/cos(x/6) >>> _TR11(sin(x/3)/(cos(x/6))) 2*sin(x/6) >>> TR11(sin(x/6)/(sin(x/3))) sin(x/6)/sin(x/3) >>> _TR11(sin(x/6)/(sin(x/3))) 1/(2*cos(x/6)) """ def f(rv): if not isinstance(rv, Expr): return rv def sincos_args(flat): # find arguments of sin and cos that # appears as bases in args of flat # and have Integer exponents args = defaultdict(set) for fi in Mul.make_args(flat): b, e = fi.as_base_exp() if e.is_Integer and e > 0: if b.func in (cos, sin): args[b.func].add(b.args[0]) return args num_args, den_args = map(sincos_args, rv.as_numer_denom()) def handle_match(rv, num_args, den_args): # for arg in sin args of num_args, look for arg/2 # in den_args and pass this half-angle to TR11 # for handling in rv for narg in num_args[sin]: half = narg/2 if half in den_args[cos]: func = cos elif half in den_args[sin]: func = sin else: continue rv = TR11(rv, half) den_args[func].remove(half) return rv # sin in num, sin or cos in den rv = handle_match(rv, num_args, den_args) # sin in den, sin or cos in num rv = handle_match(rv, den_args, num_args) return rv return bottom_up(rv, f) def TR12(rv, first=True): """Separate sums in ``tan``. Examples ======== >>> from sympy.abc import x, y >>> from sympy import tan >>> from sympy.simplify.fu import TR12 >>> TR12(tan(x + y)) (tan(x) + tan(y))/(-tan(x)*tan(y) + 1) """ def f(rv): if not rv.func == tan: return rv arg = rv.args[0] if arg.is_Add: if first: args = list(ordered(arg.args)) else: args = list(arg.args) a = args.pop() b = Add._from_args(args) if b.is_Add: tb = TR12(tan(b), first=False) else: tb = tan(b) return (tan(a) + tb)/(1 - tan(a)*tb) return rv return bottom_up(rv, f) def TR12i(rv): """Combine tan arguments as (tan(y) + tan(x))/(tan(x)*tan(y) - 1) -> -tan(x + y). Examples ======== >>> from sympy.simplify.fu import TR12i >>> from sympy import tan >>> from sympy.abc import a, b, c >>> ta, tb, tc = [tan(i) for i in (a, b, c)] >>> TR12i((ta + tb)/(-ta*tb + 1)) tan(a + b) >>> TR12i((ta + tb)/(ta*tb - 1)) -tan(a + b) >>> TR12i((-ta - tb)/(ta*tb - 1)) tan(a + b) >>> eq = (ta + tb)/(-ta*tb + 1)**2*(-3*ta - 3*tc)/(2*(ta*tc - 1)) >>> TR12i(eq.expand()) -3*tan(a + b)*tan(a + c)/(2*(tan(a) + tan(b) - 1)) """ from sympy import factor def f(rv): if not (rv.is_Add or rv.is_Mul or rv.is_Pow): return rv n, d = rv.as_numer_denom() if not d.args or not n.args: return rv dok = {} def ok(di): m = as_f_sign_1(di) if m: g, f, s = m if s is S.NegativeOne and f.is_Mul and len(f.args) == 2 and \ all(isinstance(fi, tan) for fi in f.args): return g, f d_args = list(Mul.make_args(d)) for i, di in enumerate(d_args): m = ok(di) if m: g, t = m s = Add(*[_.args[0] for _ in t.args]) dok[s] = S.One d_args[i] = g continue if di.is_Add: di = factor(di) if di.is_Mul: d_args.extend(di.args) d_args[i] = S.One elif di.is_Pow and (di.exp.is_integer or di.base.is_positive): m = ok(di.base) if m: g, t = m s = Add(*[_.args[0] for _ in t.args]) dok[s] = di.exp d_args[i] = g**di.exp else: di = factor(di) if di.is_Mul: d_args.extend(di.args) d_args[i] = S.One if not dok: return rv def ok(ni): if ni.is_Add and len(ni.args) == 2: a, b = ni.args if isinstance(a, tan) and isinstance(b, tan): return a, b n_args = list(Mul.make_args(factor_terms(n))) hit = False for i, ni in enumerate(n_args): m = ok(ni) if not m: m = ok(-ni) if m: n_args[i] = S.NegativeOne else: if ni.is_Add: ni = factor(ni) if ni.is_Mul: n_args.extend(ni.args) n_args[i] = S.One continue elif ni.is_Pow and ( ni.exp.is_integer or ni.base.is_positive): m = ok(ni.base) if m: n_args[i] = S.One else: ni = factor(ni) if ni.is_Mul: n_args.extend(ni.args) n_args[i] = S.One continue else: continue else: n_args[i] = S.One hit = True s = Add(*[_.args[0] for _ in m]) ed = dok[s] newed = ed.extract_additively(S.One) if newed is not None: if newed: dok[s] = newed else: dok.pop(s) n_args[i] *= -tan(s) if hit: rv = Mul(*n_args)/Mul(*d_args)/Mul(*[(Add(*[ tan(a) for a in i.args]) - 1)**e for i, e in dok.items()]) return rv return bottom_up(rv, f) def TR13(rv): """Change products of ``tan`` or ``cot``. Examples ======== >>> from sympy.simplify.fu import TR13 >>> from sympy import tan, cot >>> TR13(tan(3)*tan(2)) -tan(2)/tan(5) - tan(3)/tan(5) + 1 >>> TR13(cot(3)*cot(2)) cot(2)*cot(5) + 1 + cot(3)*cot(5) """ def f(rv): if not rv.is_Mul: return rv # XXX handle products of powers? or let power-reducing handle it? args = {tan: [], cot: [], None: []} for a in ordered(Mul.make_args(rv)): if a.func in (tan, cot): args[a.func].append(a.args[0]) else: args[None].append(a) t = args[tan] c = args[cot] if len(t) < 2 and len(c) < 2: return rv args = args[None] while len(t) > 1: t1 = t.pop() t2 = t.pop() args.append(1 - (tan(t1)/tan(t1 + t2) + tan(t2)/tan(t1 + t2))) if t: args.append(tan(t.pop())) while len(c) > 1: t1 = c.pop() t2 = c.pop() args.append(1 + cot(t1)*cot(t1 + t2) + cot(t2)*cot(t1 + t2)) if c: args.append(cot(c.pop())) return Mul(*args) return bottom_up(rv, f) def TRmorrie(rv): """Returns cos(x)*cos(2*x)*...*cos(2**(k-1)*x) -> sin(2**k*x)/(2**k*sin(x)) Examples ======== >>> from sympy.simplify.fu import TRmorrie, TR8, TR3 >>> from sympy.abc import x >>> from sympy import Mul, cos, pi >>> TRmorrie(cos(x)*cos(2*x)) sin(4*x)/(4*sin(x)) >>> TRmorrie(7*Mul(*[cos(x) for x in range(10)])) 7*sin(12)*sin(16)*cos(5)*cos(7)*cos(9)/(64*sin(1)*sin(3)) Sometimes autosimplification will cause a power to be not recognized. e.g. in the following, cos(4*pi/7) automatically simplifies to -cos(3*pi/7) so only 2 of the 3 terms are recognized: >>> TRmorrie(cos(pi/7)*cos(2*pi/7)*cos(4*pi/7)) -sin(3*pi/7)*cos(3*pi/7)/(4*sin(pi/7)) A touch by TR8 resolves the expression to a Rational >>> TR8(_) -1/8 In this case, if eq is unsimplified, the answer is obtained directly: >>> eq = cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9) >>> TRmorrie(eq) 1/16 But if angles are made canonical with TR3 then the answer is not simplified without further work: >>> TR3(eq) sin(pi/18)*cos(pi/9)*cos(2*pi/9)/2 >>> TRmorrie(_) sin(pi/18)*sin(4*pi/9)/(8*sin(pi/9)) >>> TR8(_) cos(7*pi/18)/(16*sin(pi/9)) >>> TR3(_) 1/16 The original expression would have resolve to 1/16 directly with TR8, however: >>> TR8(eq) 1/16 References ========== .. [1] https://en.wikipedia.org/wiki/Morrie%27s_law """ def f(rv, first=True): if not rv.is_Mul: return rv if first: n, d = rv.as_numer_denom() return f(n, 0)/f(d, 0) args = defaultdict(list) coss = {} other = [] for c in rv.args: b, e = c.as_base_exp() if e.is_Integer and isinstance(b, cos): co, a = b.args[0].as_coeff_Mul() args[a].append(co) coss[b] = e else: other.append(c) new = [] for a in args: c = args[a] c.sort() while c: k = 0 cc = ci = c[0] while cc in c: k += 1 cc *= 2 if k > 1: newarg = sin(2**k*ci*a)/2**k/sin(ci*a) # see how many times this can be taken take = None ccs = [] for i in range(k): cc /= 2 key = cos(a*cc, evaluate=False) ccs.append(cc) take = min(coss[key], take or coss[key]) # update exponent counts for i in range(k): cc = ccs.pop() key = cos(a*cc, evaluate=False) coss[key] -= take if not coss[key]: c.remove(cc) new.append(newarg**take) else: b = cos(c.pop(0)*a) other.append(b**coss[b]) if new: rv = Mul(*(new + other + [ cos(k*a, evaluate=False) for a in args for k in args[a]])) return rv return bottom_up(rv, f) def TR14(rv, first=True): """Convert factored powers of sin and cos identities into simpler expressions. Examples ======== >>> from sympy.simplify.fu import TR14 >>> from sympy.abc import x, y >>> from sympy import cos, sin >>> TR14((cos(x) - 1)*(cos(x) + 1)) -sin(x)**2 >>> TR14((sin(x) - 1)*(sin(x) + 1)) -cos(x)**2 >>> p1 = (cos(x) + 1)*(cos(x) - 1) >>> p2 = (cos(y) - 1)*2*(cos(y) + 1) >>> p3 = (3*(cos(y) - 1))*(3*(cos(y) + 1)) >>> TR14(p1*p2*p3*(x - 1)) -18*(x - 1)*sin(x)**2*sin(y)**4 """ def f(rv): if not rv.is_Mul: return rv if first: # sort them by location in numerator and denominator # so the code below can just deal with positive exponents n, d = rv.as_numer_denom() if d is not S.One: newn = TR14(n, first=False) newd = TR14(d, first=False) if newn != n or newd != d: rv = newn/newd return rv other = [] process = [] for a in rv.args: if a.is_Pow: b, e = a.as_base_exp() if not (e.is_integer or b.is_positive): other.append(a) continue a = b else: e = S.One m = as_f_sign_1(a) if not m or m[1].func not in (cos, sin): if e is S.One: other.append(a) else: other.append(a**e) continue g, f, si = m process.append((g, e.is_Number, e, f, si, a)) # sort them to get like terms next to each other process = list(ordered(process)) # keep track of whether there was any change nother = len(other) # access keys keys = (g, t, e, f, si, a) = list(range(6)) while process: A = process.pop(0) if process: B = process[0] if A[e].is_Number and B[e].is_Number: # both exponents are numbers if A[f] == B[f]: if A[si] != B[si]: B = process.pop(0) take = min(A[e], B[e]) # reinsert any remainder # the B will likely sort after A so check it first if B[e] != take: rem = [B[i] for i in keys] rem[e] -= take process.insert(0, rem) elif A[e] != take: rem = [A[i] for i in keys] rem[e] -= take process.insert(0, rem) if isinstance(A[f], cos): t = sin else: t = cos other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take) continue elif A[e] == B[e]: # both exponents are equal symbols if A[f] == B[f]: if A[si] != B[si]: B = process.pop(0) take = A[e] if isinstance(A[f], cos): t = sin else: t = cos other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take) continue # either we are done or neither condition above applied other.append(A[a]**A[e]) if len(other) != nother: rv = Mul(*other) return rv return bottom_up(rv, f) def TR15(rv, max=4, pow=False): """Convert sin(x)*-2 to 1 + cot(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR15 >>> from sympy.abc import x >>> from sympy import sin >>> TR15(1 - 1/sin(x)**2) -cot(x)**2 """ def f(rv): if not (isinstance(rv, Pow) and isinstance(rv.base, sin)): return rv ia = 1/rv a = _TR56(ia, sin, cot, lambda x: 1 + x, max=max, pow=pow) if a != ia: rv = a return rv return bottom_up(rv, f) def TR16(rv, max=4, pow=False): """Convert cos(x)*-2 to 1 + tan(x)**2. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR16 >>> from sympy.abc import x >>> from sympy import cos >>> TR16(1 - 1/cos(x)**2) -tan(x)**2 """ def f(rv): if not (isinstance(rv, Pow) and isinstance(rv.base, cos)): return rv ia = 1/rv a = _TR56(ia, cos, tan, lambda x: 1 + x, max=max, pow=pow) if a != ia: rv = a return rv return bottom_up(rv, f) def TR111(rv): """Convert f(x)**-i to g(x)**i where either ``i`` is an integer or the base is positive and f, g are: tan, cot; sin, csc; or cos, sec. Examples ======== >>> from sympy.simplify.fu import TR111 >>> from sympy.abc import x >>> from sympy import tan >>> TR111(1 - 1/tan(x)**2) 1 - cot(x)**2 """ def f(rv): if not ( isinstance(rv, Pow) and (rv.base.is_positive or rv.exp.is_integer and rv.exp.is_negative)): return rv if isinstance(rv.base, tan): return cot(rv.base.args[0])**-rv.exp elif isinstance(rv.base, sin): return csc(rv.base.args[0])**-rv.exp elif isinstance(rv.base, cos): return sec(rv.base.args[0])**-rv.exp return rv return bottom_up(rv, f) def TR22(rv, max=4, pow=False): """Convert tan(x)**2 to sec(x)**2 - 1 and cot(x)**2 to csc(x)**2 - 1. See _TR56 docstring for advanced use of ``max`` and ``pow``. Examples ======== >>> from sympy.simplify.fu import TR22 >>> from sympy.abc import x >>> from sympy import tan, cot >>> TR22(1 + tan(x)**2) sec(x)**2 >>> TR22(1 + cot(x)**2) csc(x)**2 """ def f(rv): if not (isinstance(rv, Pow) and rv.base.func in (cot, tan)): return rv rv = _TR56(rv, tan, sec, lambda x: x - 1, max=max, pow=pow) rv = _TR56(rv, cot, csc, lambda x: x - 1, max=max, pow=pow) return rv return bottom_up(rv, f) def TRpower(rv): """Convert sin(x)**n and cos(x)**n with positive n to sums. Examples ======== >>> from sympy.simplify.fu import TRpower >>> from sympy.abc import x >>> from sympy import cos, sin >>> TRpower(sin(x)**6) -15*cos(2*x)/32 + 3*cos(4*x)/16 - cos(6*x)/32 + 5/16 >>> TRpower(sin(x)**3*cos(2*x)**4) (3*sin(x)/4 - sin(3*x)/4)*(cos(4*x)/2 + cos(8*x)/8 + 3/8) References ========== .. [1] https://en.wikipedia.org/wiki/List_of_trigonometric_identities#Power-reduction_formulae """ def f(rv): if not (isinstance(rv, Pow) and isinstance(rv.base, (sin, cos))): return rv b, n = rv.as_base_exp() x = b.args[0] if n.is_Integer and n.is_positive: if n.is_odd and isinstance(b, cos): rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x) for k in range((n + 1)/2)]) elif n.is_odd and isinstance(b, sin): rv = 2**(1-n)*(-1)**((n-1)/2)*Add(*[binomial(n, k)* (-1)**k*sin((n - 2*k)*x) for k in range((n + 1)/2)]) elif n.is_even and isinstance(b, cos): rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x) for k in range(n/2)]) elif n.is_even and isinstance(b, sin): rv = 2**(1-n)*(-1)**(n/2)*Add(*[binomial(n, k)* (-1)**k*cos((n - 2*k)*x) for k in range(n/2)]) if n.is_even: rv += 2**(-n)*binomial(n, n/2) return rv return bottom_up(rv, f) def L(rv): """Return count of trigonometric functions in expression. Examples ======== >>> from sympy.simplify.fu import L >>> from sympy.abc import x >>> from sympy import cos, sin >>> L(cos(x)+sin(x)) 2 """ return S(rv.count(TrigonometricFunction)) # ============== end of basic Fu-like tools ===================== if SYMPY_DEBUG: (TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13, TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22 )= list(map(debug, (TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13, TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22))) # tuples are chains -- (f, g) -> lambda x: g(f(x)) # lists are choices -- [f, g] -> lambda x: min(f(x), g(x), key=objective) CTR1 = [(TR5, TR0), (TR6, TR0), identity] CTR2 = (TR11, [(TR5, TR0), (TR6, TR0), TR0]) CTR3 = [(TRmorrie, TR8, TR0), (TRmorrie, TR8, TR10i, TR0), identity] CTR4 = [(TR4, TR10i), identity] RL1 = (TR4, TR3, TR4, TR12, TR4, TR13, TR4, TR0) # XXX it's a little unclear how this one is to be implemented # see Fu paper of reference, page 7. What is the Union symbol referring to? # The diagram shows all these as one chain of transformations, but the # text refers to them being applied independently. Also, a break # if L starts to increase has not been implemented. RL2 = [ (TR4, TR3, TR10, TR4, TR3, TR11), (TR5, TR7, TR11, TR4), (CTR3, CTR1, TR9, CTR2, TR4, TR9, TR9, CTR4), identity, ] def fu(rv, measure=lambda x: (L(x), x.count_ops())): """Attempt to simplify expression by using transformation rules given in the algorithm by Fu et al. :func:`fu` will try to minimize the objective function ``measure``. By default this first minimizes the number of trig terms and then minimizes the number of total operations. Examples ======== >>> from sympy.simplify.fu import fu >>> from sympy import cos, sin, tan, pi, S, sqrt >>> from sympy.abc import x, y, a, b >>> fu(sin(50)**2 + cos(50)**2 + sin(pi/6)) 3/2 >>> fu(sqrt(6)*cos(x) + sqrt(2)*sin(x)) 2*sqrt(2)*sin(x + pi/3) CTR1 example >>> eq = sin(x)**4 - cos(y)**2 + sin(y)**2 + 2*cos(x)**2 >>> fu(eq) cos(x)**4 - 2*cos(y)**2 + 2 CTR2 example >>> fu(S.Half - cos(2*x)/2) sin(x)**2 CTR3 example >>> fu(sin(a)*(cos(b) - sin(b)) + cos(a)*(sin(b) + cos(b))) sqrt(2)*sin(a + b + pi/4) CTR4 example >>> fu(sqrt(3)*cos(x)/2 + sin(x)/2) sin(x + pi/3) Example 1 >>> fu(1-sin(2*x)**2/4-sin(y)**2-cos(x)**4) -cos(x)**2 + cos(y)**2 Example 2 >>> fu(cos(4*pi/9)) sin(pi/18) >>> fu(cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9)) 1/16 Example 3 >>> fu(tan(7*pi/18)+tan(5*pi/18)-sqrt(3)*tan(5*pi/18)*tan(7*pi/18)) -sqrt(3) Objective function example >>> fu(sin(x)/cos(x)) # default objective function tan(x) >>> fu(sin(x)/cos(x), measure=lambda x: -x.count_ops()) # maximize op count sin(x)/cos(x) References ========== .. [1] https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.657.2478&rep=rep1&type=pdf """ fRL1 = greedy(RL1, measure) fRL2 = greedy(RL2, measure) was = rv rv = sympify(rv) if not isinstance(rv, Expr): return rv.func(*[fu(a, measure=measure) for a in rv.args]) rv = TR1(rv) if rv.has(tan, cot): rv1 = fRL1(rv) if (measure(rv1) < measure(rv)): rv = rv1 if rv.has(tan, cot): rv = TR2(rv) if rv.has(sin, cos): rv1 = fRL2(rv) rv2 = TR8(TRmorrie(rv1)) rv = min([was, rv, rv1, rv2], key=measure) return min(TR2i(rv), rv, key=measure) def process_common_addends(rv, do, key2=None, key1=True): """Apply ``do`` to addends of ``rv`` that (if ``key1=True``) share at least a common absolute value of their coefficient and the value of ``key2`` when applied to the argument. If ``key1`` is False ``key2`` must be supplied and will be the only key applied. """ # collect by absolute value of coefficient and key2 absc = defaultdict(list) if key1: for a in rv.args: c, a = a.as_coeff_Mul() if c < 0: c = -c a = -a # put the sign on `a` absc[(c, key2(a) if key2 else 1)].append(a) elif key2: for a in rv.args: absc[(S.One, key2(a))].append(a) else: raise ValueError('must have at least one key') args = [] hit = False for k in absc: v = absc[k] c, _ = k if len(v) > 1: e = Add(*v, evaluate=False) new = do(e) if new != e: e = new hit = True args.append(c*e) else: args.append(c*v[0]) if hit: rv = Add(*args) return rv fufuncs = ''' TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 TR8 TR9 TR10 TR10i TR11 TR12 TR13 L TR2i TRmorrie TR12i TR14 TR15 TR16 TR111 TR22'''.split() FU = dict(list(zip(fufuncs, list(map(locals().get, fufuncs))))) def _roots(): global _ROOT2, _ROOT3, _invROOT3 _ROOT2, _ROOT3 = sqrt(2), sqrt(3) _invROOT3 = 1/_ROOT3 _ROOT2 = None def trig_split(a, b, two=False): """Return the gcd, s1, s2, a1, a2, bool where If two is False (default) then:: a + b = gcd*(s1*f(a1) + s2*f(a2)) where f = cos if bool else sin else: if bool, a + b was +/- cos(a1)*cos(a2) +/- sin(a1)*sin(a2) and equals n1*gcd*cos(a - b) if n1 == n2 else n1*gcd*cos(a + b) else a + b was +/- cos(a1)*sin(a2) +/- sin(a1)*cos(a2) and equals n1*gcd*sin(a + b) if n1 = n2 else n1*gcd*sin(b - a) Examples ======== >>> from sympy.simplify.fu import trig_split >>> from sympy.abc import x, y, z >>> from sympy import cos, sin, sqrt >>> trig_split(cos(x), cos(y)) (1, 1, 1, x, y, True) >>> trig_split(2*cos(x), -2*cos(y)) (2, 1, -1, x, y, True) >>> trig_split(cos(x)*sin(y), cos(y)*sin(y)) (sin(y), 1, 1, x, y, True) >>> trig_split(cos(x), -sqrt(3)*sin(x), two=True) (2, 1, -1, x, pi/6, False) >>> trig_split(cos(x), sin(x), two=True) (sqrt(2), 1, 1, x, pi/4, False) >>> trig_split(cos(x), -sin(x), two=True) (sqrt(2), 1, -1, x, pi/4, False) >>> trig_split(sqrt(2)*cos(x), -sqrt(6)*sin(x), two=True) (2*sqrt(2), 1, -1, x, pi/6, False) >>> trig_split(-sqrt(6)*cos(x), -sqrt(2)*sin(x), two=True) (-2*sqrt(2), 1, 1, x, pi/3, False) >>> trig_split(cos(x)/sqrt(6), sin(x)/sqrt(2), two=True) (sqrt(6)/3, 1, 1, x, pi/6, False) >>> trig_split(-sqrt(6)*cos(x)*sin(y), -sqrt(2)*sin(x)*sin(y), two=True) (-2*sqrt(2)*sin(y), 1, 1, x, pi/3, False) >>> trig_split(cos(x), sin(x)) >>> trig_split(cos(x), sin(z)) >>> trig_split(2*cos(x), -sin(x)) >>> trig_split(cos(x), -sqrt(3)*sin(x)) >>> trig_split(cos(x)*cos(y), sin(x)*sin(z)) >>> trig_split(cos(x)*cos(y), sin(x)*sin(y)) >>> trig_split(-sqrt(6)*cos(x), sqrt(2)*sin(x)*sin(y), two=True) """ global _ROOT2, _ROOT3, _invROOT3 if _ROOT2 is None: _roots() a, b = [Factors(i) for i in (a, b)] ua, ub = a.normal(b) gcd = a.gcd(b).as_expr() n1 = n2 = 1 if S.NegativeOne in ua.factors: ua = ua.quo(S.NegativeOne) n1 = -n1 elif S.NegativeOne in ub.factors: ub = ub.quo(S.NegativeOne) n2 = -n2 a, b = [i.as_expr() for i in (ua, ub)] def pow_cos_sin(a, two): """Return ``a`` as a tuple (r, c, s) such that ``a = (r or 1)*(c or 1)*(s or 1)``. Three arguments are returned (radical, c-factor, s-factor) as long as the conditions set by ``two`` are met; otherwise None is returned. If ``two`` is True there will be one or two non-None values in the tuple: c and s or c and r or s and r or s or c with c being a cosine function (if possible) else a sine, and s being a sine function (if possible) else oosine. If ``two`` is False then there will only be a c or s term in the tuple. ``two`` also require that either two cos and/or sin be present (with the condition that if the functions are the same the arguments are different or vice versa) or that a single cosine or a single sine be present with an optional radical. If the above conditions dictated by ``two`` are not met then None is returned. """ c = s = None co = S.One if a.is_Mul: co, a = a.as_coeff_Mul() if len(a.args) > 2 or not two: return None if a.is_Mul: args = list(a.args) else: args = [a] a = args.pop(0) if isinstance(a, cos): c = a elif isinstance(a, sin): s = a elif a.is_Pow and a.exp is S.Half: # autoeval doesn't allow -1/2 co *= a else: return None if args: b = args[0] if isinstance(b, cos): if c: s = b else: c = b elif isinstance(b, sin): if s: c = b else: s = b elif b.is_Pow and b.exp is S.Half: co *= b else: return None return co if co is not S.One else None, c, s elif isinstance(a, cos): c = a elif isinstance(a, sin): s = a if c is None and s is None: return co = co if co is not S.One else None return co, c, s # get the parts m = pow_cos_sin(a, two) if m is None: return coa, ca, sa = m m = pow_cos_sin(b, two) if m is None: return cob, cb, sb = m # check them if (not ca) and cb or ca and isinstance(ca, sin): coa, ca, sa, cob, cb, sb = cob, cb, sb, coa, ca, sa n1, n2 = n2, n1 if not two: # need cos(x) and cos(y) or sin(x) and sin(y) c = ca or sa s = cb or sb if not isinstance(c, s.func): return None return gcd, n1, n2, c.args[0], s.args[0], isinstance(c, cos) else: if not coa and not cob: if (ca and cb and sa and sb): if isinstance(ca, sa.func) is not isinstance(cb, sb.func): return args = {j.args for j in (ca, sa)} if not all(i.args in args for i in (cb, sb)): return return gcd, n1, n2, ca.args[0], sa.args[0], isinstance(ca, sa.func) if ca and sa or cb and sb or \ two and (ca is None and sa is None or cb is None and sb is None): return c = ca or sa s = cb or sb if c.args != s.args: return if not coa: coa = S.One if not cob: cob = S.One if coa is cob: gcd *= _ROOT2 return gcd, n1, n2, c.args[0], pi/4, False elif coa/cob == _ROOT3: gcd *= 2*cob return gcd, n1, n2, c.args[0], pi/3, False elif coa/cob == _invROOT3: gcd *= 2*coa return gcd, n1, n2, c.args[0], pi/6, False def as_f_sign_1(e): """If ``e`` is a sum that can be written as ``g*(a + s)`` where ``s`` is ``+/-1``, return ``g``, ``a``, and ``s`` where ``a`` does not have a leading negative coefficient. Examples ======== >>> from sympy.simplify.fu import as_f_sign_1 >>> from sympy.abc import x >>> as_f_sign_1(x + 1) (1, x, 1) >>> as_f_sign_1(x - 1) (1, x, -1) >>> as_f_sign_1(-x + 1) (-1, x, -1) >>> as_f_sign_1(-x - 1) (-1, x, 1) >>> as_f_sign_1(2*x + 2) (2, x, 1) """ if not e.is_Add or len(e.args) != 2: return # exact match a, b = e.args if a in (S.NegativeOne, S.One): g = S.One if b.is_Mul and b.args[0].is_Number and b.args[0] < 0: a, b = -a, -b g = -g return g, b, a # gcd match a, b = [Factors(i) for i in e.args] ua, ub = a.normal(b) gcd = a.gcd(b).as_expr() if S.NegativeOne in ua.factors: ua = ua.quo(S.NegativeOne) n1 = -1 n2 = 1 elif S.NegativeOne in ub.factors: ub = ub.quo(S.NegativeOne) n1 = 1 n2 = -1 else: n1 = n2 = 1 a, b = [i.as_expr() for i in (ua, ub)] if a is S.One: a, b = b, a n1, n2 = n2, n1 if n1 == -1: gcd = -gcd n2 = -n2 if b is S.One: return gcd, a, n2 def _osborne(e, d): """Replace all hyperbolic functions with trig functions using the Osborne rule. Notes ===== ``d`` is a dummy variable to prevent automatic evaluation of trigonometric/hyperbolic functions. References ========== .. [1] https://en.wikipedia.org/wiki/Hyperbolic_function """ def f(rv): if not isinstance(rv, HyperbolicFunction): return rv a = rv.args[0] a = a*d if not a.is_Add else Add._from_args([i*d for i in a.args]) if isinstance(rv, sinh): return I*sin(a) elif isinstance(rv, cosh): return cos(a) elif isinstance(rv, tanh): return I*tan(a) elif isinstance(rv, coth): return cot(a)/I elif isinstance(rv, sech): return sec(a) elif isinstance(rv, csch): return csc(a)/I else: raise NotImplementedError('unhandled %s' % rv.func) return bottom_up(e, f) def _osbornei(e, d): """Replace all trig functions with hyperbolic functions using the Osborne rule. Notes ===== ``d`` is a dummy variable to prevent automatic evaluation of trigonometric/hyperbolic functions. References ========== .. [1] https://en.wikipedia.org/wiki/Hyperbolic_function """ def f(rv): if not isinstance(rv, TrigonometricFunction): return rv const, x = rv.args[0].as_independent(d, as_Add=True) a = x.xreplace({d: S.One}) + const*I if isinstance(rv, sin): return sinh(a)/I elif isinstance(rv, cos): return cosh(a) elif isinstance(rv, tan): return tanh(a)/I elif isinstance(rv, cot): return coth(a)*I elif isinstance(rv, sec): return sech(a) elif isinstance(rv, csc): return csch(a)*I else: raise NotImplementedError('unhandled %s' % rv.func) return bottom_up(e, f) def hyper_as_trig(rv): """Return an expression containing hyperbolic functions in terms of trigonometric functions. Any trigonometric functions initially present are replaced with Dummy symbols and the function to undo the masking and the conversion back to hyperbolics is also returned. It should always be true that:: t, f = hyper_as_trig(expr) expr == f(t) Examples ======== >>> from sympy.simplify.fu import hyper_as_trig, fu >>> from sympy.abc import x >>> from sympy import cosh, sinh >>> eq = sinh(x)**2 + cosh(x)**2 >>> t, f = hyper_as_trig(eq) >>> f(fu(t)) cosh(2*x) References ========== .. [1] https://en.wikipedia.org/wiki/Hyperbolic_function """ from sympy.simplify.simplify import signsimp from sympy.simplify.radsimp import collect # mask off trig functions trigs = rv.atoms(TrigonometricFunction) reps = [(t, Dummy()) for t in trigs] masked = rv.xreplace(dict(reps)) # get inversion substitutions in place reps = [(v, k) for k, v in reps] d = Dummy() return _osborne(masked, d), lambda x: collect(signsimp( _osbornei(x, d).xreplace(dict(reps))), S.ImaginaryUnit) def sincos_to_sum(expr): """Convert products and powers of sin and cos to sums. Explanation =========== Applied power reduction TRpower first, then expands products, and converts products to sums with TR8. Examples ======== >>> from sympy.simplify.fu import sincos_to_sum >>> from sympy.abc import x >>> from sympy import cos, sin >>> sincos_to_sum(16*sin(x)**3*cos(2*x)**2) 7*sin(x) - 5*sin(3*x) + 3*sin(5*x) - sin(7*x) """ if not expr.has(cos, sin): return expr else: return TR8(expand_mul(TRpower(expr)))
c69f6180e361d5de0fc6b46da8c69261676b0ea3f0643354f51b01cd7f781c0a
r""" This module contains the functionality to arrange the nodes of a diagram on an abstract grid, and then to produce a graphical representation of the grid. The currently supported back-ends are Xy-pic [Xypic]. Layout Algorithm ================ This section provides an overview of the algorithms implemented in :class:`DiagramGrid` to lay out diagrams. The first step of the algorithm is the removal composite and identity morphisms which do not have properties in the supplied diagram. The premises and conclusions of the diagram are then merged. The generic layout algorithm begins with the construction of the "skeleton" of the diagram. The skeleton is an undirected graph which has the objects of the diagram as vertices and has an (undirected) edge between each pair of objects between which there exist morphisms. The direction of the morphisms does not matter at this stage. The skeleton also includes an edge between each pair of vertices `A` and `C` such that there exists an object `B` which is connected via a morphism to `A`, and via a morphism to `C`. The skeleton constructed in this way has the property that every object is a vertex of a triangle formed by three edges of the skeleton. This property lies at the base of the generic layout algorithm. After the skeleton has been constructed, the algorithm lists all triangles which can be formed. Note that some triangles will not have all edges corresponding to morphisms which will actually be drawn. Triangles which have only one edge or less which will actually be drawn are immediately discarded. The list of triangles is sorted according to the number of edges which correspond to morphisms, then the triangle with the least number of such edges is selected. One of such edges is picked and the corresponding objects are placed horizontally, on a grid. This edge is recorded to be in the fringe. The algorithm then finds a "welding" of a triangle to the fringe. A welding is an edge in the fringe where a triangle could be attached. If the algorithm succeeds in finding such a welding, it adds to the grid that vertex of the triangle which was not yet included in any edge in the fringe and records the two new edges in the fringe. This process continues iteratively until all objects of the diagram has been placed or until no more weldings can be found. An edge is only removed from the fringe when a welding to this edge has been found, and there is no room around this edge to place another vertex. When no more weldings can be found, but there are still triangles left, the algorithm searches for a possibility of attaching one of the remaining triangles to the existing structure by a vertex. If such a possibility is found, the corresponding edge of the found triangle is placed in the found space and the iterative process of welding triangles restarts. When logical groups are supplied, each of these groups is laid out independently. Then a diagram is constructed in which groups are objects and any two logical groups between which there exist morphisms are connected via a morphism. This diagram is laid out. Finally, the grid which includes all objects of the initial diagram is constructed by replacing the cells which contain logical groups with the corresponding laid out grids, and by correspondingly expanding the rows and columns. The sequential layout algorithm begins by constructing the underlying undirected graph defined by the morphisms obtained after simplifying premises and conclusions and merging them (see above). The vertex with the minimal degree is then picked up and depth-first search is started from it. All objects which are located at distance `n` from the root in the depth-first search tree, are positioned in the `n`-th column of the resulting grid. The sequential layout will therefore attempt to lay the objects out along a line. References ========== [Xypic] http://xy-pic.sourceforge.net/ """ from sympy.categories import (CompositeMorphism, IdentityMorphism, NamedMorphism, Diagram) from sympy.core import Dict, Symbol from sympy.core.compatibility import iterable from sympy.printing import latex from sympy.sets import FiniteSet from sympy.utilities import default_sort_key from sympy.utilities.decorator import doctest_depends_on from itertools import chain __doctest_requires__ = {('preview_diagram',): 'pyglet'} class _GrowableGrid: """ Holds a growable grid of objects. Explanation =========== It is possible to append or prepend a row or a column to the grid using the corresponding methods. Prepending rows or columns has the effect of changing the coordinates of the already existing elements. This class currently represents a naive implementation of the functionality with little attempt at optimisation. """ def __init__(self, width, height): self._width = width self._height = height self._array = [[None for j in range(width)] for i in range(height)] @property def width(self): return self._width @property def height(self): return self._height def __getitem__(self, i_j): """ Returns the element located at in the i-th line and j-th column. """ i, j = i_j return self._array[i][j] def __setitem__(self, i_j, newvalue): """ Sets the element located at in the i-th line and j-th column. """ i, j = i_j self._array[i][j] = newvalue def append_row(self): """ Appends an empty row to the grid. """ self._height += 1 self._array.append([None for j in range(self._width)]) def append_column(self): """ Appends an empty column to the grid. """ self._width += 1 for i in range(self._height): self._array[i].append(None) def prepend_row(self): """ Prepends the grid with an empty row. """ self._height += 1 self._array.insert(0, [None for j in range(self._width)]) def prepend_column(self): """ Prepends the grid with an empty column. """ self._width += 1 for i in range(self._height): self._array[i].insert(0, None) class DiagramGrid: r""" Constructs and holds the fitting of the diagram into a grid. Explanation =========== The mission of this class is to analyse the structure of the supplied diagram and to place its objects on a grid such that, when the objects and the morphisms are actually drawn, the diagram would be "readable", in the sense that there will not be many intersections of moprhisms. This class does not perform any actual drawing. It does strive nevertheless to offer sufficient metadata to draw a diagram. Consider the following simple diagram. >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> from sympy import pprint >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) The simplest way to have a diagram laid out is the following: >>> grid = DiagramGrid(diagram) >>> (grid.width, grid.height) (2, 2) >>> pprint(grid) A B <BLANKLINE> C Sometimes one sees the diagram as consisting of logical groups. One can advise ``DiagramGrid`` as to such groups by employing the ``groups`` keyword argument. Consider the following diagram: >>> D = Object("D") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> h = NamedMorphism(D, A, "h") >>> k = NamedMorphism(D, B, "k") >>> diagram = Diagram([f, g, h, k]) Lay it out with generic layout: >>> grid = DiagramGrid(diagram) >>> pprint(grid) A B D <BLANKLINE> C Now, we can group the objects `A` and `D` to have them near one another: >>> grid = DiagramGrid(diagram, groups=[[A, D], B, C]) >>> pprint(grid) B C <BLANKLINE> A D Note how the positioning of the other objects changes. Further indications can be supplied to the constructor of :class:`DiagramGrid` using keyword arguments. The currently supported hints are explained in the following paragraphs. :class:`DiagramGrid` does not automatically guess which layout would suit the supplied diagram better. Consider, for example, the following linear diagram: >>> E = Object("E") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> h = NamedMorphism(C, D, "h") >>> i = NamedMorphism(D, E, "i") >>> diagram = Diagram([f, g, h, i]) When laid out with the generic layout, it does not get to look linear: >>> grid = DiagramGrid(diagram) >>> pprint(grid) A B <BLANKLINE> C D <BLANKLINE> E To get it laid out in a line, use ``layout="sequential"``: >>> grid = DiagramGrid(diagram, layout="sequential") >>> pprint(grid) A B C D E One may sometimes need to transpose the resulting layout. While this can always be done by hand, :class:`DiagramGrid` provides a hint for that purpose: >>> grid = DiagramGrid(diagram, layout="sequential", transpose=True) >>> pprint(grid) A <BLANKLINE> B <BLANKLINE> C <BLANKLINE> D <BLANKLINE> E Separate hints can also be provided for each group. For an example, refer to ``tests/test_drawing.py``, and see the different ways in which the five lemma [FiveLemma] can be laid out. See Also ======== Diagram References ========== .. [FiveLemma] https://en.wikipedia.org/wiki/Five_lemma """ @staticmethod def _simplify_morphisms(morphisms): """ Given a dictionary mapping morphisms to their properties, returns a new dictionary in which there are no morphisms which do not have properties, and which are compositions of other morphisms included in the dictionary. Identities are dropped as well. """ newmorphisms = {} for morphism, props in morphisms.items(): if isinstance(morphism, CompositeMorphism) and not props: continue elif isinstance(morphism, IdentityMorphism): continue else: newmorphisms[morphism] = props return newmorphisms @staticmethod def _merge_premises_conclusions(premises, conclusions): """ Given two dictionaries of morphisms and their properties, produces a single dictionary which includes elements from both dictionaries. If a morphism has some properties in premises and also in conclusions, the properties in conclusions take priority. """ return dict(chain(premises.items(), conclusions.items())) @staticmethod def _juxtapose_edges(edge1, edge2): """ If ``edge1`` and ``edge2`` have precisely one common endpoint, returns an edge which would form a triangle with ``edge1`` and ``edge2``. If ``edge1`` and ``edge2`` don't have a common endpoint, returns ``None``. If ``edge1`` and ``edge`` are the same edge, returns ``None``. """ intersection = edge1 & edge2 if len(intersection) != 1: # The edges either have no common points or are equal. return None # The edges have a common endpoint. Extract the different # endpoints and set up the new edge. return (edge1 - intersection) | (edge2 - intersection) @staticmethod def _add_edge_append(dictionary, edge, elem): """ If ``edge`` is not in ``dictionary``, adds ``edge`` to the dictionary and sets its value to ``[elem]``. Otherwise appends ``elem`` to the value of existing entry. Note that edges are undirected, thus `(A, B) = (B, A)`. """ if edge in dictionary: dictionary[edge].append(elem) else: dictionary[edge] = [elem] @staticmethod def _build_skeleton(morphisms): """ Creates a dictionary which maps edges to corresponding morphisms. Thus for a morphism `f:A\rightarrow B`, the edge `(A, B)` will be associated with `f`. This function also adds to the list those edges which are formed by juxtaposition of two edges already in the list. These new edges are not associated with any morphism and are only added to assure that the diagram can be decomposed into triangles. """ edges = {} # Create edges for morphisms. for morphism in morphisms: DiagramGrid._add_edge_append( edges, frozenset([morphism.domain, morphism.codomain]), morphism) # Create new edges by juxtaposing existing edges. edges1 = dict(edges) for w in edges1: for v in edges1: wv = DiagramGrid._juxtapose_edges(w, v) if wv and wv not in edges: edges[wv] = [] return edges @staticmethod def _list_triangles(edges): """ Builds the set of triangles formed by the supplied edges. The triangles are arbitrary and need not be commutative. A triangle is a set that contains all three of its sides. """ triangles = set() for w in edges: for v in edges: wv = DiagramGrid._juxtapose_edges(w, v) if wv and wv in edges: triangles.add(frozenset([w, v, wv])) return triangles @staticmethod def _drop_redundant_triangles(triangles, skeleton): """ Returns a list which contains only those triangles who have morphisms associated with at least two edges. """ return [tri for tri in triangles if len([e for e in tri if skeleton[e]]) >= 2] @staticmethod def _morphism_length(morphism): """ Returns the length of a morphism. The length of a morphism is the number of components it consists of. A non-composite morphism is of length 1. """ if isinstance(morphism, CompositeMorphism): return len(morphism.components) else: return 1 @staticmethod def _compute_triangle_min_sizes(triangles, edges): r""" Returns a dictionary mapping triangles to their minimal sizes. The minimal size of a triangle is the sum of maximal lengths of morphisms associated to the sides of the triangle. The length of a morphism is the number of components it consists of. A non-composite morphism is of length 1. Sorting triangles by this metric attempts to address two aspects of layout. For triangles with only simple morphisms in the edge, this assures that triangles with all three edges visible will get typeset after triangles with less visible edges, which sometimes minimizes the necessity in diagonal arrows. For triangles with composite morphisms in the edges, this assures that objects connected with shorter morphisms will be laid out first, resulting the visual proximity of those objects which are connected by shorter morphisms. """ triangle_sizes = {} for triangle in triangles: size = 0 for e in triangle: morphisms = edges[e] if morphisms: size += max(DiagramGrid._morphism_length(m) for m in morphisms) triangle_sizes[triangle] = size return triangle_sizes @staticmethod def _triangle_objects(triangle): """ Given a triangle, returns the objects included in it. """ # A triangle is a frozenset of three two-element frozensets # (the edges). This chains the three edges together and # creates a frozenset from the iterator, thus producing a # frozenset of objects of the triangle. return frozenset(chain(*tuple(triangle))) @staticmethod def _other_vertex(triangle, edge): """ Given a triangle and an edge of it, returns the vertex which opposes the edge. """ # This gets the set of objects of the triangle and then # subtracts the set of objects employed in ``edge`` to get the # vertex opposite to ``edge``. return list(DiagramGrid._triangle_objects(triangle) - set(edge))[0] @staticmethod def _empty_point(pt, grid): """ Checks if the cell at coordinates ``pt`` is either empty or out of the bounds of the grid. """ if (pt[0] < 0) or (pt[1] < 0) or \ (pt[0] >= grid.height) or (pt[1] >= grid.width): return True return grid[pt] is None @staticmethod def _put_object(coords, obj, grid, fringe): """ Places an object at the coordinate ``cords`` in ``grid``, growing the grid and updating ``fringe``, if necessary. Returns (0, 0) if no row or column has been prepended, (1, 0) if a row was prepended, (0, 1) if a column was prepended and (1, 1) if both a column and a row were prepended. """ (i, j) = coords offset = (0, 0) if i == -1: grid.prepend_row() i = 0 offset = (1, 0) for k in range(len(fringe)): ((i1, j1), (i2, j2)) = fringe[k] fringe[k] = ((i1 + 1, j1), (i2 + 1, j2)) elif i == grid.height: grid.append_row() if j == -1: j = 0 offset = (offset[0], 1) grid.prepend_column() for k in range(len(fringe)): ((i1, j1), (i2, j2)) = fringe[k] fringe[k] = ((i1, j1 + 1), (i2, j2 + 1)) elif j == grid.width: grid.append_column() grid[i, j] = obj return offset @staticmethod def _choose_target_cell(pt1, pt2, edge, obj, skeleton, grid): """ Given two points, ``pt1`` and ``pt2``, and the welding edge ``edge``, chooses one of the two points to place the opposing vertex ``obj`` of the triangle. If neither of this points fits, returns ``None``. """ pt1_empty = DiagramGrid._empty_point(pt1, grid) pt2_empty = DiagramGrid._empty_point(pt2, grid) if pt1_empty and pt2_empty: # Both cells are empty. Of these two, choose that cell # which will assure that a visible edge of the triangle # will be drawn perpendicularly to the current welding # edge. A = grid[edge[0]] if skeleton.get(frozenset([A, obj])): return pt1 else: return pt2 if pt1_empty: return pt1 elif pt2_empty: return pt2 else: return None @staticmethod def _find_triangle_to_weld(triangles, fringe, grid): """ Finds, if possible, a triangle and an edge in the ``fringe`` to which the triangle could be attached. Returns the tuple containing the triangle and the index of the corresponding edge in the ``fringe``. This function relies on the fact that objects are unique in the diagram. """ for triangle in triangles: for (a, b) in fringe: if frozenset([grid[a], grid[b]]) in triangle: return (triangle, (a, b)) return None @staticmethod def _weld_triangle(tri, welding_edge, fringe, grid, skeleton): """ If possible, welds the triangle ``tri`` to ``fringe`` and returns ``False``. If this method encounters a degenerate situation in the fringe and corrects it such that a restart of the search is required, it returns ``True`` (which means that a restart in finding triangle weldings is required). A degenerate situation is a situation when an edge listed in the fringe does not belong to the visual boundary of the diagram. """ a, b = welding_edge target_cell = None obj = DiagramGrid._other_vertex(tri, (grid[a], grid[b])) # We now have a triangle and an edge where it can be welded to # the fringe. Decide where to place the other vertex of the # triangle and check for degenerate situations en route. if (abs(a[0] - b[0]) == 1) and (abs(a[1] - b[1]) == 1): # A diagonal edge. target_cell = (a[0], b[1]) if grid[target_cell]: # That cell is already occupied. target_cell = (b[0], a[1]) if grid[target_cell]: # Degenerate situation, this edge is not # on the actual fringe. Correct the # fringe and go on. fringe.remove((a, b)) return True elif a[0] == b[0]: # A horizontal edge. We first attempt to build the # triangle in the downward direction. down_left = a[0] + 1, a[1] down_right = a[0] + 1, b[1] target_cell = DiagramGrid._choose_target_cell( down_left, down_right, (a, b), obj, skeleton, grid) if not target_cell: # No room below this edge. Check above. up_left = a[0] - 1, a[1] up_right = a[0] - 1, b[1] target_cell = DiagramGrid._choose_target_cell( up_left, up_right, (a, b), obj, skeleton, grid) if not target_cell: # This edge is not in the fringe, remove it # and restart. fringe.remove((a, b)) return True elif a[1] == b[1]: # A vertical edge. We will attempt to place the other # vertex of the triangle to the right of this edge. right_up = a[0], a[1] + 1 right_down = b[0], a[1] + 1 target_cell = DiagramGrid._choose_target_cell( right_up, right_down, (a, b), obj, skeleton, grid) if not target_cell: # No room to the left. See what's to the right. left_up = a[0], a[1] - 1 left_down = b[0], a[1] - 1 target_cell = DiagramGrid._choose_target_cell( left_up, left_down, (a, b), obj, skeleton, grid) if not target_cell: # This edge is not in the fringe, remove it # and restart. fringe.remove((a, b)) return True # We now know where to place the other vertex of the # triangle. offset = DiagramGrid._put_object(target_cell, obj, grid, fringe) # Take care of the displacement of coordinates if a row or # a column was prepended. target_cell = (target_cell[0] + offset[0], target_cell[1] + offset[1]) a = (a[0] + offset[0], a[1] + offset[1]) b = (b[0] + offset[0], b[1] + offset[1]) fringe.extend([(a, target_cell), (b, target_cell)]) # No restart is required. return False @staticmethod def _triangle_key(tri, triangle_sizes): """ Returns a key for the supplied triangle. It should be the same independently of the hash randomisation. """ objects = sorted( DiagramGrid._triangle_objects(tri), key=default_sort_key) return (triangle_sizes[tri], default_sort_key(objects)) @staticmethod def _pick_root_edge(tri, skeleton): """ For a given triangle always picks the same root edge. The root edge is the edge that will be placed first on the grid. """ candidates = [sorted(e, key=default_sort_key) for e in tri if skeleton[e]] sorted_candidates = sorted(candidates, key=default_sort_key) # Don't forget to assure the proper ordering of the vertices # in this edge. return tuple(sorted(sorted_candidates[0], key=default_sort_key)) @staticmethod def _drop_irrelevant_triangles(triangles, placed_objects): """ Returns only those triangles whose set of objects is not completely included in ``placed_objects``. """ return [tri for tri in triangles if not placed_objects.issuperset( DiagramGrid._triangle_objects(tri))] @staticmethod def _grow_pseudopod(triangles, fringe, grid, skeleton, placed_objects): """ Starting from an object in the existing structure on the ``grid``, adds an edge to which a triangle from ``triangles`` could be welded. If this method has found a way to do so, it returns the object it has just added. This method should be applied when ``_weld_triangle`` cannot find weldings any more. """ for i in range(grid.height): for j in range(grid.width): obj = grid[i, j] if not obj: continue # Here we need to choose a triangle which has only # ``obj`` in common with the existing structure. The # situations when this is not possible should be # handled elsewhere. def good_triangle(tri): objs = DiagramGrid._triangle_objects(tri) return obj in objs and \ placed_objects & (objs - {obj}) == set() tris = [tri for tri in triangles if good_triangle(tri)] if not tris: # This object is not interesting. continue # Pick the "simplest" of the triangles which could be # attached. Remember that the list of triangles is # sorted according to their "simplicity" (see # _compute_triangle_min_sizes for the metric). # # Note that ``tris`` are sequentially built from # ``triangles``, so we don't have to worry about hash # randomisation. tri = tris[0] # We have found a triangle which could be attached to # the existing structure by a vertex. candidates = sorted([e for e in tri if skeleton[e]], key=lambda e: FiniteSet(*e).sort_key()) edges = [e for e in candidates if obj in e] # Note that a meaningful edge (i.e., and edge that is # associated with a morphism) containing ``obj`` # always exists. That's because all triangles are # guaranteed to have at least two meaningful edges. # See _drop_redundant_triangles. # Get the object at the other end of the edge. edge = edges[0] other_obj = tuple(edge - frozenset([obj]))[0] # Now check for free directions. When checking for # free directions, prefer the horizontal and vertical # directions. neighbours = [(i - 1, j), (i, j + 1), (i + 1, j), (i, j - 1), (i - 1, j - 1), (i - 1, j + 1), (i + 1, j - 1), (i + 1, j + 1)] for pt in neighbours: if DiagramGrid._empty_point(pt, grid): # We have a found a place to grow the # pseudopod into. offset = DiagramGrid._put_object( pt, other_obj, grid, fringe) i += offset[0] j += offset[1] pt = (pt[0] + offset[0], pt[1] + offset[1]) fringe.append(((i, j), pt)) return other_obj # This diagram is actually cooler that I can handle. Fail cowardly. return None @staticmethod def _handle_groups(diagram, groups, merged_morphisms, hints): """ Given the slightly preprocessed morphisms of the diagram, produces a grid laid out according to ``groups``. If a group has hints, it is laid out with those hints only, without any influence from ``hints``. Otherwise, it is laid out with ``hints``. """ def lay_out_group(group, local_hints): """ If ``group`` is a set of objects, uses a ``DiagramGrid`` to lay it out and returns the grid. Otherwise returns the object (i.e., ``group``). If ``local_hints`` is not empty, it is supplied to ``DiagramGrid`` as the dictionary of hints. Otherwise, the ``hints`` argument of ``_handle_groups`` is used. """ if isinstance(group, FiniteSet): # Set up the corresponding object-to-group # mappings. for obj in group: obj_groups[obj] = group # Lay out the current group. if local_hints: groups_grids[group] = DiagramGrid( diagram.subdiagram_from_objects(group), **local_hints) else: groups_grids[group] = DiagramGrid( diagram.subdiagram_from_objects(group), **hints) else: obj_groups[group] = group def group_to_finiteset(group): """ Converts ``group`` to a :class:``FiniteSet`` if it is an iterable. """ if iterable(group): return FiniteSet(*group) else: return group obj_groups = {} groups_grids = {} # We would like to support various containers to represent # groups. To achieve that, before laying each group out, it # should be converted to a FiniteSet, because that is what the # following code expects. if isinstance(groups, dict) or isinstance(groups, Dict): finiteset_groups = {} for group, local_hints in groups.items(): finiteset_group = group_to_finiteset(group) finiteset_groups[finiteset_group] = local_hints lay_out_group(group, local_hints) groups = finiteset_groups else: finiteset_groups = [] for group in groups: finiteset_group = group_to_finiteset(group) finiteset_groups.append(finiteset_group) lay_out_group(finiteset_group, None) groups = finiteset_groups new_morphisms = [] for morphism in merged_morphisms: dom = obj_groups[morphism.domain] cod = obj_groups[morphism.codomain] # Note that we are not really interested in morphisms # which do not employ two different groups, because # these do not influence the layout. if dom != cod: # These are essentially unnamed morphisms; they are # not going to mess in the final layout. By giving # them the same names, we avoid unnecessary # duplicates. new_morphisms.append(NamedMorphism(dom, cod, "dummy")) # Lay out the new diagram. Since these are dummy morphisms, # properties and conclusions are irrelevant. top_grid = DiagramGrid(Diagram(new_morphisms)) # We now have to substitute the groups with the corresponding # grids, laid out at the beginning of this function. Compute # the size of each row and column in the grid, so that all # nested grids fit. def group_size(group): """ For the supplied group (or object, eventually), returns the size of the cell that will hold this group (object). """ if group in groups_grids: grid = groups_grids[group] return (grid.height, grid.width) else: return (1, 1) row_heights = [max(group_size(top_grid[i, j])[0] for j in range(top_grid.width)) for i in range(top_grid.height)] column_widths = [max(group_size(top_grid[i, j])[1] for i in range(top_grid.height)) for j in range(top_grid.width)] grid = _GrowableGrid(sum(column_widths), sum(row_heights)) real_row = 0 real_column = 0 for logical_row in range(top_grid.height): for logical_column in range(top_grid.width): obj = top_grid[logical_row, logical_column] if obj in groups_grids: # This is a group. Copy the corresponding grid in # place. local_grid = groups_grids[obj] for i in range(local_grid.height): for j in range(local_grid.width): grid[real_row + i, real_column + j] = local_grid[i, j] else: # This is an object. Just put it there. grid[real_row, real_column] = obj real_column += column_widths[logical_column] real_column = 0 real_row += row_heights[logical_row] return grid @staticmethod def _generic_layout(diagram, merged_morphisms): """ Produces the generic layout for the supplied diagram. """ all_objects = set(diagram.objects) if len(all_objects) == 1: # There only one object in the diagram, just put in on 1x1 # grid. grid = _GrowableGrid(1, 1) grid[0, 0] = tuple(all_objects)[0] return grid skeleton = DiagramGrid._build_skeleton(merged_morphisms) grid = _GrowableGrid(2, 1) if len(skeleton) == 1: # This diagram contains only one morphism. Draw it # horizontally. objects = sorted(all_objects, key=default_sort_key) grid[0, 0] = objects[0] grid[0, 1] = objects[1] return grid triangles = DiagramGrid._list_triangles(skeleton) triangles = DiagramGrid._drop_redundant_triangles(triangles, skeleton) triangle_sizes = DiagramGrid._compute_triangle_min_sizes( triangles, skeleton) triangles = sorted(triangles, key=lambda tri: DiagramGrid._triangle_key(tri, triangle_sizes)) # Place the first edge on the grid. root_edge = DiagramGrid._pick_root_edge(triangles[0], skeleton) grid[0, 0], grid[0, 1] = root_edge fringe = [((0, 0), (0, 1))] # Record which objects we now have on the grid. placed_objects = set(root_edge) while placed_objects != all_objects: welding = DiagramGrid._find_triangle_to_weld( triangles, fringe, grid) if welding: (triangle, welding_edge) = welding restart_required = DiagramGrid._weld_triangle( triangle, welding_edge, fringe, grid, skeleton) if restart_required: continue placed_objects.update( DiagramGrid._triangle_objects(triangle)) else: # No more weldings found. Try to attach triangles by # vertices. new_obj = DiagramGrid._grow_pseudopod( triangles, fringe, grid, skeleton, placed_objects) if not new_obj: # No more triangles can be attached, not even by # the edge. We will set up a new diagram out of # what has been left, laid it out independently, # and then attach it to this one. remaining_objects = all_objects - placed_objects remaining_diagram = diagram.subdiagram_from_objects( FiniteSet(*remaining_objects)) remaining_grid = DiagramGrid(remaining_diagram) # Now, let's glue ``remaining_grid`` to ``grid``. final_width = grid.width + remaining_grid.width final_height = max(grid.height, remaining_grid.height) final_grid = _GrowableGrid(final_width, final_height) for i in range(grid.width): for j in range(grid.height): final_grid[i, j] = grid[i, j] start_j = grid.width for i in range(remaining_grid.height): for j in range(remaining_grid.width): final_grid[i, start_j + j] = remaining_grid[i, j] return final_grid placed_objects.add(new_obj) triangles = DiagramGrid._drop_irrelevant_triangles( triangles, placed_objects) return grid @staticmethod def _get_undirected_graph(objects, merged_morphisms): """ Given the objects and the relevant morphisms of a diagram, returns the adjacency lists of the underlying undirected graph. """ adjlists = {} for obj in objects: adjlists[obj] = [] for morphism in merged_morphisms: adjlists[morphism.domain].append(morphism.codomain) adjlists[morphism.codomain].append(morphism.domain) # Assure that the objects in the adjacency list are always in # the same order. for obj in adjlists.keys(): adjlists[obj].sort(key=default_sort_key) return adjlists @staticmethod def _sequential_layout(diagram, merged_morphisms): r""" Lays out the diagram in "sequential" layout. This method will attempt to produce a result as close to a line as possible. For linear diagrams, the result will actually be a line. """ objects = diagram.objects sorted_objects = sorted(objects, key=default_sort_key) # Set up the adjacency lists of the underlying undirected # graph of ``merged_morphisms``. adjlists = DiagramGrid._get_undirected_graph(objects, merged_morphisms) # Find an object with the minimal degree. This is going to be # the root. root = sorted_objects[0] mindegree = len(adjlists[root]) for obj in sorted_objects: current_degree = len(adjlists[obj]) if current_degree < mindegree: root = obj mindegree = current_degree grid = _GrowableGrid(1, 1) grid[0, 0] = root placed_objects = {root} def place_objects(pt, placed_objects): """ Does depth-first search in the underlying graph of the diagram and places the objects en route. """ # We will start placing new objects from here. new_pt = (pt[0], pt[1] + 1) for adjacent_obj in adjlists[grid[pt]]: if adjacent_obj in placed_objects: # This object has already been placed. continue DiagramGrid._put_object(new_pt, adjacent_obj, grid, []) placed_objects.add(adjacent_obj) placed_objects.update(place_objects(new_pt, placed_objects)) new_pt = (new_pt[0] + 1, new_pt[1]) return placed_objects place_objects((0, 0), placed_objects) return grid @staticmethod def _drop_inessential_morphisms(merged_morphisms): r""" Removes those morphisms which should appear in the diagram, but which have no relevance to object layout. Currently this removes "loop" morphisms: the non-identity morphisms with the same domains and codomains. """ morphisms = [m for m in merged_morphisms if m.domain != m.codomain] return morphisms @staticmethod def _get_connected_components(objects, merged_morphisms): """ Given a container of morphisms, returns a list of connected components formed by these morphisms. A connected component is represented by a diagram consisting of the corresponding morphisms. """ component_index = {} for o in objects: component_index[o] = None # Get the underlying undirected graph of the diagram. adjlist = DiagramGrid._get_undirected_graph(objects, merged_morphisms) def traverse_component(object, current_index): """ Does a depth-first search traversal of the component containing ``object``. """ component_index[object] = current_index for o in adjlist[object]: if component_index[o] is None: traverse_component(o, current_index) # Traverse all components. current_index = 0 for o in adjlist: if component_index[o] is None: traverse_component(o, current_index) current_index += 1 # List the objects of the components. component_objects = [[] for i in range(current_index)] for o, idx in component_index.items(): component_objects[idx].append(o) # Finally, list the morphisms belonging to each component. # # Note: If some objects are isolated, they will not get any # morphisms at this stage, and since the layout algorithm # relies, we are essentially going to lose this object. # Therefore, check if there are isolated objects and, for each # of them, provide the trivial identity morphism. It will get # discarded later, but the object will be there. component_morphisms = [] for component in component_objects: current_morphisms = {} for m in merged_morphisms: if (m.domain in component) and (m.codomain in component): current_morphisms[m] = merged_morphisms[m] if len(component) == 1: # Let's add an identity morphism, for the sake of # surely having morphisms in this component. current_morphisms[IdentityMorphism(component[0])] = FiniteSet() component_morphisms.append(Diagram(current_morphisms)) return component_morphisms def __init__(self, diagram, groups=None, **hints): premises = DiagramGrid._simplify_morphisms(diagram.premises) conclusions = DiagramGrid._simplify_morphisms(diagram.conclusions) all_merged_morphisms = DiagramGrid._merge_premises_conclusions( premises, conclusions) merged_morphisms = DiagramGrid._drop_inessential_morphisms( all_merged_morphisms) # Store the merged morphisms for later use. self._morphisms = all_merged_morphisms components = DiagramGrid._get_connected_components( diagram.objects, all_merged_morphisms) if groups and (groups != diagram.objects): # Lay out the diagram according to the groups. self._grid = DiagramGrid._handle_groups( diagram, groups, merged_morphisms, hints) elif len(components) > 1: # Note that we check for connectedness _before_ checking # the layout hints because the layout strategies don't # know how to deal with disconnected diagrams. # The diagram is disconnected. Lay out the components # independently. grids = [] # Sort the components to eventually get the grids arranged # in a fixed, hash-independent order. components = sorted(components, key=default_sort_key) for component in components: grid = DiagramGrid(component, **hints) grids.append(grid) # Throw the grids together, in a line. total_width = sum(g.width for g in grids) total_height = max(g.height for g in grids) grid = _GrowableGrid(total_width, total_height) start_j = 0 for g in grids: for i in range(g.height): for j in range(g.width): grid[i, start_j + j] = g[i, j] start_j += g.width self._grid = grid elif "layout" in hints: if hints["layout"] == "sequential": self._grid = DiagramGrid._sequential_layout( diagram, merged_morphisms) else: self._grid = DiagramGrid._generic_layout(diagram, merged_morphisms) if hints.get("transpose"): # Transpose the resulting grid. grid = _GrowableGrid(self._grid.height, self._grid.width) for i in range(self._grid.height): for j in range(self._grid.width): grid[j, i] = self._grid[i, j] self._grid = grid @property def width(self): """ Returns the number of columns in this diagram layout. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> grid.width 2 """ return self._grid.width @property def height(self): """ Returns the number of rows in this diagram layout. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> grid.height 2 """ return self._grid.height def __getitem__(self, i_j): """ Returns the object placed in the row ``i`` and column ``j``. The indices are 0-based. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> (grid[0, 0], grid[0, 1]) (Object("A"), Object("B")) >>> (grid[1, 0], grid[1, 1]) (None, Object("C")) """ i, j = i_j return self._grid[i, j] @property def morphisms(self): """ Returns those morphisms (and their properties) which are sufficiently meaningful to be drawn. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> grid.morphisms {NamedMorphism(Object("A"), Object("B"), "f"): EmptySet, NamedMorphism(Object("B"), Object("C"), "g"): EmptySet} """ return self._morphisms def __str__(self): """ Produces a string representation of this class. This method returns a string representation of the underlying list of lists of objects. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import Diagram, DiagramGrid >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g]) >>> grid = DiagramGrid(diagram) >>> print(grid) [[Object("A"), Object("B")], [None, Object("C")]] """ return repr(self._grid._array) class ArrowStringDescription: r""" Stores the information necessary for producing an Xy-pic description of an arrow. The principal goal of this class is to abstract away the string representation of an arrow and to also provide the functionality to produce the actual Xy-pic string. ``unit`` sets the unit which will be used to specify the amount of curving and other distances. ``horizontal_direction`` should be a string of ``"r"`` or ``"l"`` specifying the horizontal offset of the target cell of the arrow relatively to the current one. ``vertical_direction`` should specify the vertical offset using a series of either ``"d"`` or ``"u"``. ``label_position`` should be either ``"^"``, ``"_"``, or ``"|"`` to specify that the label should be positioned above the arrow, below the arrow or just over the arrow, in a break. Note that the notions "above" and "below" are relative to arrow direction. ``label`` stores the morphism label. This works as follows (disregard the yet unexplained arguments): >>> from sympy.categories.diagram_drawing import ArrowStringDescription >>> astr = ArrowStringDescription( ... unit="mm", curving=None, curving_amount=None, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> print(str(astr)) \ar[dr]_{f} ``curving`` should be one of ``"^"``, ``"_"`` to specify in which direction the arrow is going to curve. ``curving_amount`` is a number describing how many ``unit``'s the morphism is going to curve: >>> astr = ArrowStringDescription( ... unit="mm", curving="^", curving_amount=12, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> print(str(astr)) \ar@/^12mm/[dr]_{f} ``looping_start`` and ``looping_end`` are currently only used for loop morphisms, those which have the same domain and codomain. These two attributes should store a valid Xy-pic direction and specify, correspondingly, the direction the arrow gets out into and the direction the arrow gets back from: >>> astr = ArrowStringDescription( ... unit="mm", curving=None, curving_amount=None, ... looping_start="u", looping_end="l", horizontal_direction="", ... vertical_direction="", label_position="_", label="f") >>> print(str(astr)) \ar@(u,l)[]_{f} ``label_displacement`` controls how far the arrow label is from the ends of the arrow. For example, to position the arrow label near the arrow head, use ">": >>> astr = ArrowStringDescription( ... unit="mm", curving="^", curving_amount=12, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> astr.label_displacement = ">" >>> print(str(astr)) \ar@/^12mm/[dr]_>{f} Finally, ``arrow_style`` is used to specify the arrow style. To get a dashed arrow, for example, use "{-->}" as arrow style: >>> astr = ArrowStringDescription( ... unit="mm", curving="^", curving_amount=12, ... looping_start=None, looping_end=None, horizontal_direction="d", ... vertical_direction="r", label_position="_", label="f") >>> astr.arrow_style = "{-->}" >>> print(str(astr)) \ar@/^12mm/@{-->}[dr]_{f} Notes ===== Instances of :class:`ArrowStringDescription` will be constructed by :class:`XypicDiagramDrawer` and provided for further use in formatters. The user is not expected to construct instances of :class:`ArrowStringDescription` themselves. To be able to properly utilise this class, the reader is encouraged to checkout the Xy-pic user guide, available at [Xypic]. See Also ======== XypicDiagramDrawer References ========== [Xypic] http://xy-pic.sourceforge.net/ """ def __init__(self, unit, curving, curving_amount, looping_start, looping_end, horizontal_direction, vertical_direction, label_position, label): self.unit = unit self.curving = curving self.curving_amount = curving_amount self.looping_start = looping_start self.looping_end = looping_end self.horizontal_direction = horizontal_direction self.vertical_direction = vertical_direction self.label_position = label_position self.label = label self.label_displacement = "" self.arrow_style = "" # This flag shows that the position of the label of this # morphism was set while typesetting a curved morphism and # should not be modified later. self.forced_label_position = False def __str__(self): if self.curving: curving_str = "@/%s%d%s/" % (self.curving, self.curving_amount, self.unit) else: curving_str = "" if self.looping_start and self.looping_end: looping_str = "@(%s,%s)" % (self.looping_start, self.looping_end) else: looping_str = "" if self.arrow_style: style_str = "@" + self.arrow_style else: style_str = "" return "\\ar%s%s%s[%s%s]%s%s{%s}" % \ (curving_str, looping_str, style_str, self.horizontal_direction, self.vertical_direction, self.label_position, self.label_displacement, self.label) class XypicDiagramDrawer: r""" Given a :class:`~.Diagram` and the corresponding :class:`DiagramGrid`, produces the Xy-pic representation of the diagram. The most important method in this class is ``draw``. Consider the following triangle diagram: >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import DiagramGrid, XypicDiagramDrawer >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g], {g * f: "unique"}) To draw this diagram, its objects need to be laid out with a :class:`DiagramGrid`:: >>> grid = DiagramGrid(diagram) Finally, the drawing: >>> drawer = XypicDiagramDrawer() >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } For further details see the docstring of this method. To control the appearance of the arrows, formatters are used. The dictionary ``arrow_formatters`` maps morphisms to formatter functions. A formatter is accepts an :class:`ArrowStringDescription` and is allowed to modify any of the arrow properties exposed thereby. For example, to have all morphisms with the property ``unique`` appear as dashed arrows, and to have their names prepended with `\exists !`, the following should be done: >>> def formatter(astr): ... astr.label = r"\exists !" + astr.label ... astr.arrow_style = "{-->}" >>> drawer.arrow_formatters["unique"] = formatter >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar@{-->}[d]_{\exists !g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } To modify the appearance of all arrows in the diagram, set ``default_arrow_formatter``. For example, to place all morphism labels a little bit farther from the arrow head so that they look more centred, do as follows: >>> def default_formatter(astr): ... astr.label_displacement = "(0.45)" >>> drawer.default_arrow_formatter = default_formatter >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar@{-->}[d]_(0.45){\exists !g\circ f} \ar[r]^(0.45){f} & B \ar[ld]^(0.45){g} \\ C & } In some diagrams some morphisms are drawn as curved arrows. Consider the following diagram: >>> D = Object("D") >>> E = Object("E") >>> h = NamedMorphism(D, A, "h") >>> k = NamedMorphism(D, B, "k") >>> diagram = Diagram([f, g, h, k]) >>> grid = DiagramGrid(diagram) >>> drawer = XypicDiagramDrawer() >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[r]_{f} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_3mm/[ll]_{h} \\ & C & } To control how far the morphisms are curved by default, one can use the ``unit`` and ``default_curving_amount`` attributes: >>> drawer.unit = "cm" >>> drawer.default_curving_amount = 1 >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[r]_{f} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_1cm/[ll]_{h} \\ & C & } In some diagrams, there are multiple curved morphisms between the same two objects. To control by how much the curving changes between two such successive morphisms, use ``default_curving_step``: >>> drawer.default_curving_step = 1 >>> h1 = NamedMorphism(A, D, "h1") >>> diagram = Diagram([f, g, h, k, h1]) >>> grid = DiagramGrid(diagram) >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[r]_{f} \ar@/^1cm/[rr]^{h_{1}} & B \ar[d]^{g} & D \ar[l]^{k} \ar@/_2cm/[ll]_{h} \\ & C & } The default value of ``default_curving_step`` is 4 units. See Also ======== draw, ArrowStringDescription """ def __init__(self): self.unit = "mm" self.default_curving_amount = 3 self.default_curving_step = 4 # This dictionary maps properties to the corresponding arrow # formatters. self.arrow_formatters = {} # This is the default arrow formatter which will be applied to # each arrow independently of its properties. self.default_arrow_formatter = None @staticmethod def _process_loop_morphism(i, j, grid, morphisms_str_info, object_coords): """ Produces the information required for constructing the string representation of a loop morphism. This function is invoked from ``_process_morphism``. See Also ======== _process_morphism """ curving = "" label_pos = "^" looping_start = "" looping_end = "" # This is a loop morphism. Count how many morphisms stick # in each of the four quadrants. Note that straight # vertical and horizontal morphisms count in two quadrants # at the same time (i.e., a morphism going up counts both # in the first and the second quadrants). # The usual numbering (counterclockwise) of quadrants # applies. quadrant = [0, 0, 0, 0] obj = grid[i, j] for m, m_str_info in morphisms_str_info.items(): if (m.domain == obj) and (m.codomain == obj): # That's another loop morphism. Check how it # loops and mark the corresponding quadrants as # busy. (l_s, l_e) = (m_str_info.looping_start, m_str_info.looping_end) if (l_s, l_e) == ("r", "u"): quadrant[0] += 1 elif (l_s, l_e) == ("u", "l"): quadrant[1] += 1 elif (l_s, l_e) == ("l", "d"): quadrant[2] += 1 elif (l_s, l_e) == ("d", "r"): quadrant[3] += 1 continue if m.domain == obj: (end_i, end_j) = object_coords[m.codomain] goes_out = True elif m.codomain == obj: (end_i, end_j) = object_coords[m.domain] goes_out = False else: continue d_i = end_i - i d_j = end_j - j m_curving = m_str_info.curving if (d_i != 0) and (d_j != 0): # This is really a diagonal morphism. Detect the # quadrant. if (d_i > 0) and (d_j > 0): quadrant[0] += 1 elif (d_i > 0) and (d_j < 0): quadrant[1] += 1 elif (d_i < 0) and (d_j < 0): quadrant[2] += 1 elif (d_i < 0) and (d_j > 0): quadrant[3] += 1 elif d_i == 0: # Knowing where the other end of the morphism is # and which way it goes, we now have to decide # which quadrant is now the upper one and which is # the lower one. if d_j > 0: if goes_out: upper_quadrant = 0 lower_quadrant = 3 else: upper_quadrant = 3 lower_quadrant = 0 else: if goes_out: upper_quadrant = 2 lower_quadrant = 1 else: upper_quadrant = 1 lower_quadrant = 2 if m_curving: if m_curving == "^": quadrant[upper_quadrant] += 1 elif m_curving == "_": quadrant[lower_quadrant] += 1 else: # This morphism counts in both upper and lower # quadrants. quadrant[upper_quadrant] += 1 quadrant[lower_quadrant] += 1 elif d_j == 0: # Knowing where the other end of the morphism is # and which way it goes, we now have to decide # which quadrant is now the left one and which is # the right one. if d_i < 0: if goes_out: left_quadrant = 1 right_quadrant = 0 else: left_quadrant = 0 right_quadrant = 1 else: if goes_out: left_quadrant = 3 right_quadrant = 2 else: left_quadrant = 2 right_quadrant = 3 if m_curving: if m_curving == "^": quadrant[left_quadrant] += 1 elif m_curving == "_": quadrant[right_quadrant] += 1 else: # This morphism counts in both upper and lower # quadrants. quadrant[left_quadrant] += 1 quadrant[right_quadrant] += 1 # Pick the freest quadrant to curve our morphism into. freest_quadrant = 0 for i in range(4): if quadrant[i] < quadrant[freest_quadrant]: freest_quadrant = i # Now set up proper looping. (looping_start, looping_end) = [("r", "u"), ("u", "l"), ("l", "d"), ("d", "r")][freest_quadrant] return (curving, label_pos, looping_start, looping_end) @staticmethod def _process_horizontal_morphism(i, j, target_j, grid, morphisms_str_info, object_coords): """ Produces the information required for constructing the string representation of a horizontal morphism. This function is invoked from ``_process_morphism``. See Also ======== _process_morphism """ # The arrow is horizontal. Check if it goes from left to # right (``backwards == False``) or from right to left # (``backwards == True``). backwards = False start = j end = target_j if end < start: (start, end) = (end, start) backwards = True # Let's see which objects are there between ``start`` and # ``end``, and then count how many morphisms stick out # upwards, and how many stick out downwards. # # For example, consider the situation: # # B1 C1 # | | # A--B--C--D # | # B2 # # Between the objects `A` and `D` there are two objects: # `B` and `C`. Further, there are two morphisms which # stick out upward (the ones between `B1` and `B` and # between `C` and `C1`) and one morphism which sticks out # downward (the one between `B and `B2`). # # We need this information to decide how to curve the # arrow between `A` and `D`. First of all, since there # are two objects between `A` and `D``, we must curve the # arrow. Then, we will have it curve downward, because # there is more space (less morphisms stick out downward # than upward). up = [] down = [] straight_horizontal = [] for k in range(start + 1, end): obj = grid[i, k] if not obj: continue for m in morphisms_str_info: if m.domain == obj: (end_i, end_j) = object_coords[m.codomain] elif m.codomain == obj: (end_i, end_j) = object_coords[m.domain] else: continue if end_i > i: down.append(m) elif end_i < i: up.append(m) elif not morphisms_str_info[m].curving: # This is a straight horizontal morphism, # because it has no curving. straight_horizontal.append(m) if len(up) < len(down): # More morphisms stick out downward than upward, let's # curve the morphism up. if backwards: curving = "_" label_pos = "_" else: curving = "^" label_pos = "^" # Assure that the straight horizontal morphisms have # their labels on the lower side of the arrow. for m in straight_horizontal: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if j1 < j2: m_str_info.label_position = "_" else: m_str_info.label_position = "^" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True else: # More morphisms stick out downward than upward, let's # curve the morphism up. if backwards: curving = "^" label_pos = "^" else: curving = "_" label_pos = "_" # Assure that the straight horizontal morphisms have # their labels on the upper side of the arrow. for m in straight_horizontal: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if j1 < j2: m_str_info.label_position = "^" else: m_str_info.label_position = "_" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True return (curving, label_pos) @staticmethod def _process_vertical_morphism(i, j, target_i, grid, morphisms_str_info, object_coords): """ Produces the information required for constructing the string representation of a vertical morphism. This function is invoked from ``_process_morphism``. See Also ======== _process_morphism """ # This arrow is vertical. Check if it goes from top to # bottom (``backwards == False``) or from bottom to top # (``backwards == True``). backwards = False start = i end = target_i if end < start: (start, end) = (end, start) backwards = True # Let's see which objects are there between ``start`` and # ``end``, and then count how many morphisms stick out to # the left, and how many stick out to the right. # # See the corresponding comment in the previous branch of # this if-statement for more details. left = [] right = [] straight_vertical = [] for k in range(start + 1, end): obj = grid[k, j] if not obj: continue for m in morphisms_str_info: if m.domain == obj: (end_i, end_j) = object_coords[m.codomain] elif m.codomain == obj: (end_i, end_j) = object_coords[m.domain] else: continue if end_j > j: right.append(m) elif end_j < j: left.append(m) elif not morphisms_str_info[m].curving: # This is a straight vertical morphism, # because it has no curving. straight_vertical.append(m) if len(left) < len(right): # More morphisms stick out to the left than to the # right, let's curve the morphism to the right. if backwards: curving = "^" label_pos = "^" else: curving = "_" label_pos = "_" # Assure that the straight vertical morphisms have # their labels on the left side of the arrow. for m in straight_vertical: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if i1 < i2: m_str_info.label_position = "^" else: m_str_info.label_position = "_" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True else: # More morphisms stick out to the right than to the # left, let's curve the morphism to the left. if backwards: curving = "_" label_pos = "_" else: curving = "^" label_pos = "^" # Assure that the straight vertical morphisms have # their labels on the right side of the arrow. for m in straight_vertical: (i1, j1) = object_coords[m.domain] (i2, j2) = object_coords[m.codomain] m_str_info = morphisms_str_info[m] if i1 < i2: m_str_info.label_position = "_" else: m_str_info.label_position = "^" # Don't allow any further modifications of the # position of this label. m_str_info.forced_label_position = True return (curving, label_pos) def _process_morphism(self, diagram, grid, morphism, object_coords, morphisms, morphisms_str_info): """ Given the required information, produces the string representation of ``morphism``. """ def repeat_string_cond(times, str_gt, str_lt): """ If ``times > 0``, repeats ``str_gt`` ``times`` times. Otherwise, repeats ``str_lt`` ``-times`` times. """ if times > 0: return str_gt * times else: return str_lt * (-times) def count_morphisms_undirected(A, B): """ Counts how many processed morphisms there are between the two supplied objects. """ return len([m for m in morphisms_str_info if {m.domain, m.codomain} == {A, B}]) def count_morphisms_filtered(dom, cod, curving): """ Counts the processed morphisms which go out of ``dom`` into ``cod`` with curving ``curving``. """ return len([m for m, m_str_info in morphisms_str_info.items() if (m.domain, m.codomain) == (dom, cod) and (m_str_info.curving == curving)]) (i, j) = object_coords[morphism.domain] (target_i, target_j) = object_coords[morphism.codomain] # We now need to determine the direction of # the arrow. delta_i = target_i - i delta_j = target_j - j vertical_direction = repeat_string_cond(delta_i, "d", "u") horizontal_direction = repeat_string_cond(delta_j, "r", "l") curving = "" label_pos = "^" looping_start = "" looping_end = "" if (delta_i == 0) and (delta_j == 0): # This is a loop morphism. (curving, label_pos, looping_start, looping_end) = XypicDiagramDrawer._process_loop_morphism( i, j, grid, morphisms_str_info, object_coords) elif (delta_i == 0) and (abs(j - target_j) > 1): # This is a horizontal morphism. (curving, label_pos) = XypicDiagramDrawer._process_horizontal_morphism( i, j, target_j, grid, morphisms_str_info, object_coords) elif (delta_j == 0) and (abs(i - target_i) > 1): # This is a vertical morphism. (curving, label_pos) = XypicDiagramDrawer._process_vertical_morphism( i, j, target_i, grid, morphisms_str_info, object_coords) count = count_morphisms_undirected(morphism.domain, morphism.codomain) curving_amount = "" if curving: # This morphisms should be curved anyway. curving_amount = self.default_curving_amount + count * \ self.default_curving_step elif count: # There are no objects between the domain and codomain of # the current morphism, but this is not there already are # some morphisms with the same domain and codomain, so we # have to curve this one. curving = "^" filtered_morphisms = count_morphisms_filtered( morphism.domain, morphism.codomain, curving) curving_amount = self.default_curving_amount + \ filtered_morphisms * \ self.default_curving_step # Let's now get the name of the morphism. morphism_name = "" if isinstance(morphism, IdentityMorphism): morphism_name = "id_{%s}" + latex(grid[i, j]) elif isinstance(morphism, CompositeMorphism): component_names = [latex(Symbol(component.name)) for component in morphism.components] component_names.reverse() morphism_name = "\\circ ".join(component_names) elif isinstance(morphism, NamedMorphism): morphism_name = latex(Symbol(morphism.name)) return ArrowStringDescription( self.unit, curving, curving_amount, looping_start, looping_end, horizontal_direction, vertical_direction, label_pos, morphism_name) @staticmethod def _check_free_space_horizontal(dom_i, dom_j, cod_j, grid): """ For a horizontal morphism, checks whether there is free space (i.e., space not occupied by any objects) above the morphism or below it. """ if dom_j < cod_j: (start, end) = (dom_j, cod_j) backwards = False else: (start, end) = (cod_j, dom_j) backwards = True # Check for free space above. if dom_i == 0: free_up = True else: free_up = all([grid[dom_i - 1, j] for j in range(start, end + 1)]) # Check for free space below. if dom_i == grid.height - 1: free_down = True else: free_down = all([not grid[dom_i + 1, j] for j in range(start, end + 1)]) return (free_up, free_down, backwards) @staticmethod def _check_free_space_vertical(dom_i, cod_i, dom_j, grid): """ For a vertical morphism, checks whether there is free space (i.e., space not occupied by any objects) to the left of the morphism or to the right of it. """ if dom_i < cod_i: (start, end) = (dom_i, cod_i) backwards = False else: (start, end) = (cod_i, dom_i) backwards = True # Check if there's space to the left. if dom_j == 0: free_left = True else: free_left = all([not grid[i, dom_j - 1] for i in range(start, end + 1)]) if dom_j == grid.width - 1: free_right = True else: free_right = all([not grid[i, dom_j + 1] for i in range(start, end + 1)]) return (free_left, free_right, backwards) @staticmethod def _check_free_space_diagonal(dom_i, cod_i, dom_j, cod_j, grid): """ For a diagonal morphism, checks whether there is free space (i.e., space not occupied by any objects) above the morphism or below it. """ def abs_xrange(start, end): if start < end: return range(start, end + 1) else: return range(end, start + 1) if dom_i < cod_i and dom_j < cod_j: # This morphism goes from top-left to # bottom-right. (start_i, start_j) = (dom_i, dom_j) (end_i, end_j) = (cod_i, cod_j) backwards = False elif dom_i > cod_i and dom_j > cod_j: # This morphism goes from bottom-right to # top-left. (start_i, start_j) = (cod_i, cod_j) (end_i, end_j) = (dom_i, dom_j) backwards = True if dom_i < cod_i and dom_j > cod_j: # This morphism goes from top-right to # bottom-left. (start_i, start_j) = (dom_i, dom_j) (end_i, end_j) = (cod_i, cod_j) backwards = True elif dom_i > cod_i and dom_j < cod_j: # This morphism goes from bottom-left to # top-right. (start_i, start_j) = (cod_i, cod_j) (end_i, end_j) = (dom_i, dom_j) backwards = False # This is an attempt at a fast and furious strategy to # decide where there is free space on the two sides of # a diagonal morphism. For a diagonal morphism # starting at ``(start_i, start_j)`` and ending at # ``(end_i, end_j)`` the rectangle defined by these # two points is considered. The slope of the diagonal # ``alpha`` is then computed. Then, for every cell # ``(i, j)`` within the rectangle, the slope # ``alpha1`` of the line through ``(start_i, # start_j)`` and ``(i, j)`` is considered. If # ``alpha1`` is between 0 and ``alpha``, the point # ``(i, j)`` is above the diagonal, if ``alpha1`` is # between ``alpha`` and infinity, the point is below # the diagonal. Also note that, with some beforehand # precautions, this trick works for both the main and # the secondary diagonals of the rectangle. # I have considered the possibility to only follow the # shorter diagonals immediately above and below the # main (or secondary) diagonal. This, however, # wouldn't have resulted in much performance gain or # better detection of outer edges, because of # relatively small sizes of diagram grids, while the # code would have become harder to understand. alpha = float(end_i - start_i)/(end_j - start_j) free_up = True free_down = True for i in abs_xrange(start_i, end_i): if not free_up and not free_down: break for j in abs_xrange(start_j, end_j): if not free_up and not free_down: break if (i, j) == (start_i, start_j): continue if j == start_j: alpha1 = "inf" else: alpha1 = float(i - start_i)/(j - start_j) if grid[i, j]: if (alpha1 == "inf") or (abs(alpha1) > abs(alpha)): free_down = False elif abs(alpha1) < abs(alpha): free_up = False return (free_up, free_down, backwards) def _push_labels_out(self, morphisms_str_info, grid, object_coords): """ For all straight morphisms which form the visual boundary of the laid out diagram, puts their labels on their outer sides. """ def set_label_position(free1, free2, pos1, pos2, backwards, m_str_info): """ Given the information about room available to one side and to the other side of a morphism (``free1`` and ``free2``), sets the position of the morphism label in such a way that it is on the freer side. This latter operations involves choice between ``pos1`` and ``pos2``, taking ``backwards`` in consideration. Thus this function will do nothing if either both ``free1 == True`` and ``free2 == True`` or both ``free1 == False`` and ``free2 == False``. In either case, choosing one side over the other presents no advantage. """ if backwards: (pos1, pos2) = (pos2, pos1) if free1 and not free2: m_str_info.label_position = pos1 elif free2 and not free1: m_str_info.label_position = pos2 for m, m_str_info in morphisms_str_info.items(): if m_str_info.curving or m_str_info.forced_label_position: # This is either a curved morphism, and curved # morphisms have other magic, or the position of this # label has already been fixed. continue if m.domain == m.codomain: # This is a loop morphism, their labels, again have a # different magic. continue (dom_i, dom_j) = object_coords[m.domain] (cod_i, cod_j) = object_coords[m.codomain] if dom_i == cod_i: # Horizontal morphism. (free_up, free_down, backwards) = XypicDiagramDrawer._check_free_space_horizontal( dom_i, dom_j, cod_j, grid) set_label_position(free_up, free_down, "^", "_", backwards, m_str_info) elif dom_j == cod_j: # Vertical morphism. (free_left, free_right, backwards) = XypicDiagramDrawer._check_free_space_vertical( dom_i, cod_i, dom_j, grid) set_label_position(free_left, free_right, "_", "^", backwards, m_str_info) else: # A diagonal morphism. (free_up, free_down, backwards) = XypicDiagramDrawer._check_free_space_diagonal( dom_i, cod_i, dom_j, cod_j, grid) set_label_position(free_up, free_down, "^", "_", backwards, m_str_info) @staticmethod def _morphism_sort_key(morphism, object_coords): """ Provides a morphism sorting key such that horizontal or vertical morphisms between neighbouring objects come first, then horizontal or vertical morphisms between more far away objects, and finally, all other morphisms. """ (i, j) = object_coords[morphism.domain] (target_i, target_j) = object_coords[morphism.codomain] if morphism.domain == morphism.codomain: # Loop morphisms should get after diagonal morphisms # so that the proper direction in which to curve the # loop can be determined. return (3, 0, default_sort_key(morphism)) if target_i == i: return (1, abs(target_j - j), default_sort_key(morphism)) if target_j == j: return (1, abs(target_i - i), default_sort_key(morphism)) # Diagonal morphism. return (2, 0, default_sort_key(morphism)) @staticmethod def _build_xypic_string(diagram, grid, morphisms, morphisms_str_info, diagram_format): """ Given a collection of :class:`ArrowStringDescription` describing the morphisms of a diagram and the object layout information of a diagram, produces the final Xy-pic picture. """ # Build the mapping between objects and morphisms which have # them as domains. object_morphisms = {} for obj in diagram.objects: object_morphisms[obj] = [] for morphism in morphisms: object_morphisms[morphism.domain].append(morphism) result = "\\xymatrix%s{\n" % diagram_format for i in range(grid.height): for j in range(grid.width): obj = grid[i, j] if obj: result += latex(obj) + " " morphisms_to_draw = object_morphisms[obj] for morphism in morphisms_to_draw: result += str(morphisms_str_info[morphism]) + " " # Don't put the & after the last column. if j < grid.width - 1: result += "& " # Don't put the line break after the last row. if i < grid.height - 1: result += "\\\\" result += "\n" result += "}\n" return result def draw(self, diagram, grid, masked=None, diagram_format=""): r""" Returns the Xy-pic representation of ``diagram`` laid out in ``grid``. Consider the following simple triangle diagram. >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import DiagramGrid, XypicDiagramDrawer >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g], {g * f: "unique"}) To draw this diagram, its objects need to be laid out with a :class:`DiagramGrid`:: >>> grid = DiagramGrid(diagram) Finally, the drawing: >>> drawer = XypicDiagramDrawer() >>> print(drawer.draw(diagram, grid)) \xymatrix{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } The argument ``masked`` can be used to skip morphisms in the presentation of the diagram: >>> print(drawer.draw(diagram, grid, masked=[g * f])) \xymatrix{ A \ar[r]^{f} & B \ar[ld]^{g} \\ C & } Finally, the ``diagram_format`` argument can be used to specify the format string of the diagram. For example, to increase the spacing by 1 cm, proceeding as follows: >>> print(drawer.draw(diagram, grid, diagram_format="@+1cm")) \xymatrix@+1cm{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } """ # This method works in several steps. It starts by removing # the masked morphisms, if necessary, and then maps objects to # their positions in the grid (coordinate tuples). Remember # that objects are unique in ``Diagram`` and in the layout # produced by ``DiagramGrid``, so every object is mapped to a # single coordinate pair. # # The next step is the central step and is concerned with # analysing the morphisms of the diagram and deciding how to # draw them. For example, how to curve the arrows is decided # at this step. The bulk of the analysis is implemented in # ``_process_morphism``, to the result of which the # appropriate formatters are applied. # # The result of the previous step is a list of # ``ArrowStringDescription``. After the analysis and # application of formatters, some extra logic tries to assure # better positioning of morphism labels (for example, an # attempt is made to avoid the situations when arrows cross # labels). This functionality constitutes the next step and # is implemented in ``_push_labels_out``. Note that label # positions which have been set via a formatter are not # affected in this step. # # Finally, at the closing step, the array of # ``ArrowStringDescription`` and the layout information # incorporated in ``DiagramGrid`` are combined to produce the # resulting Xy-pic picture. This part of code lies in # ``_build_xypic_string``. if not masked: morphisms_props = grid.morphisms else: morphisms_props = {} for m, props in grid.morphisms.items(): if m in masked: continue morphisms_props[m] = props # Build the mapping between objects and their position in the # grid. object_coords = {} for i in range(grid.height): for j in range(grid.width): if grid[i, j]: object_coords[grid[i, j]] = (i, j) morphisms = sorted(morphisms_props, key=lambda m: XypicDiagramDrawer._morphism_sort_key( m, object_coords)) # Build the tuples defining the string representations of # morphisms. morphisms_str_info = {} for morphism in morphisms: string_description = self._process_morphism( diagram, grid, morphism, object_coords, morphisms, morphisms_str_info) if self.default_arrow_formatter: self.default_arrow_formatter(string_description) for prop in morphisms_props[morphism]: # prop is a Symbol. TODO: Find out why. if prop.name in self.arrow_formatters: formatter = self.arrow_formatters[prop.name] formatter(string_description) morphisms_str_info[morphism] = string_description # Reposition the labels a bit. self._push_labels_out(morphisms_str_info, grid, object_coords) return XypicDiagramDrawer._build_xypic_string( diagram, grid, morphisms, morphisms_str_info, diagram_format) def xypic_draw_diagram(diagram, masked=None, diagram_format="", groups=None, **hints): r""" Provides a shortcut combining :class:`DiagramGrid` and :class:`XypicDiagramDrawer`. Returns an Xy-pic presentation of ``diagram``. The argument ``masked`` is a list of morphisms which will be not be drawn. The argument ``diagram_format`` is the format string inserted after "\xymatrix". ``groups`` should be a set of logical groups. The ``hints`` will be passed directly to the constructor of :class:`DiagramGrid`. For more information about the arguments, see the docstrings of :class:`DiagramGrid` and ``XypicDiagramDrawer.draw``. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import xypic_draw_diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> diagram = Diagram([f, g], {g * f: "unique"}) >>> print(xypic_draw_diagram(diagram)) \xymatrix{ A \ar[d]_{g\circ f} \ar[r]^{f} & B \ar[ld]^{g} \\ C & } See Also ======== XypicDiagramDrawer, DiagramGrid """ grid = DiagramGrid(diagram, groups, **hints) drawer = XypicDiagramDrawer() return drawer.draw(diagram, grid, masked, diagram_format) @doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',)) def preview_diagram(diagram, masked=None, diagram_format="", groups=None, output='png', viewer=None, euler=True, **hints): """ Combines the functionality of ``xypic_draw_diagram`` and ``sympy.printing.preview``. The arguments ``masked``, ``diagram_format``, ``groups``, and ``hints`` are passed to ``xypic_draw_diagram``, while ``output``, ``viewer, and ``euler`` are passed to ``preview``. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy.categories import preview_diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {g * f: "unique"}) >>> preview_diagram(d) See Also ======== XypicDiagramDrawer """ from sympy.printing import preview latex_output = xypic_draw_diagram(diagram, masked, diagram_format, groups, **hints) preview(latex_output, output, viewer, euler, ("xypic",))
147eafcf174e45cf174ce7dfa2e0c5dda33aa5198c523b9a097d1d0d2c805950
from sympy.core import S, Basic, Dict, Symbol, Tuple, sympify from sympy.core.compatibility import iterable from sympy.core.symbol import Str from sympy.sets import Set, FiniteSet, EmptySet class Class(Set): r""" The base class for any kind of class in the set-theoretic sense. Explanation =========== In axiomatic set theories, everything is a class. A class which can be a member of another class is a set. A class which is not a member of another class is a proper class. The class `\{1, 2\}` is a set; the class of all sets is a proper class. This class is essentially a synonym for :class:`sympy.core.Set`. The goal of this class is to assure easier migration to the eventual proper implementation of set theory. """ is_proper = False class Object(Symbol): """ The base class for any kind of object in an abstract category. Explanation =========== While technically any instance of :class:`~.Basic` will do, this class is the recommended way to create abstract objects in abstract categories. """ class Morphism(Basic): """ The base class for any morphism in an abstract category. Explanation =========== In abstract categories, a morphism is an arrow between two category objects. The object where the arrow starts is called the domain, while the object where the arrow ends is called the codomain. Two morphisms between the same pair of objects are considered to be the same morphisms. To distinguish between morphisms between the same objects use :class:`NamedMorphism`. It is prohibited to instantiate this class. Use one of the derived classes instead. See Also ======== IdentityMorphism, NamedMorphism, CompositeMorphism """ def __new__(cls, domain, codomain): raise(NotImplementedError( "Cannot instantiate Morphism. Use derived classes instead.")) @property def domain(self): """ Returns the domain of the morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f.domain Object("A") """ return self.args[0] @property def codomain(self): """ Returns the codomain of the morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f.codomain Object("B") """ return self.args[1] def compose(self, other): r""" Composes self with the supplied morphism. The order of elements in the composition is the usual order, i.e., to construct `g\circ f` use ``g.compose(f)``. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> g * f CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"), NamedMorphism(Object("B"), Object("C"), "g"))) >>> (g * f).domain Object("A") >>> (g * f).codomain Object("C") """ return CompositeMorphism(other, self) def __mul__(self, other): r""" Composes self with the supplied morphism. The semantics of this operation is given by the following equation: ``g * f == g.compose(f)`` for composable morphisms ``g`` and ``f``. See Also ======== compose """ return self.compose(other) class IdentityMorphism(Morphism): """ Represents an identity morphism. Explanation =========== An identity morphism is a morphism with equal domain and codomain, which acts as an identity with respect to composition. Examples ======== >>> from sympy.categories import Object, NamedMorphism, IdentityMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> id_A = IdentityMorphism(A) >>> id_B = IdentityMorphism(B) >>> f * id_A == f True >>> id_B * f == f True See Also ======== Morphism """ def __new__(cls, domain): return Basic.__new__(cls, domain) @property def codomain(self): return self.domain class NamedMorphism(Morphism): """ Represents a morphism which has a name. Explanation =========== Names are used to distinguish between morphisms which have the same domain and codomain: two named morphisms are equal if they have the same domains, codomains, and names. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f NamedMorphism(Object("A"), Object("B"), "f") >>> f.name 'f' See Also ======== Morphism """ def __new__(cls, domain, codomain, name): if not name: raise ValueError("Empty morphism names not allowed.") if not isinstance(name, Str): name = Str(name) return Basic.__new__(cls, domain, codomain, name) @property def name(self): """ Returns the name of the morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> f.name 'f' """ return self.args[2].name class CompositeMorphism(Morphism): r""" Represents a morphism which is a composition of other morphisms. Explanation =========== Two composite morphisms are equal if the morphisms they were obtained from (components) are the same and were listed in the same order. The arguments to the constructor for this class should be listed in diagram order: to obtain the composition `g\circ f` from the instances of :class:`Morphism` ``g`` and ``f`` use ``CompositeMorphism(f, g)``. Examples ======== >>> from sympy.categories import Object, NamedMorphism, CompositeMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> g * f CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"), NamedMorphism(Object("B"), Object("C"), "g"))) >>> CompositeMorphism(f, g) == g * f True """ @staticmethod def _add_morphism(t, morphism): """ Intelligently adds ``morphism`` to tuple ``t``. Explanation =========== If ``morphism`` is a composite morphism, its components are added to the tuple. If ``morphism`` is an identity, nothing is added to the tuple. No composability checks are performed. """ if isinstance(morphism, CompositeMorphism): # ``morphism`` is a composite morphism; we have to # denest its components. return t + morphism.components elif isinstance(morphism, IdentityMorphism): # ``morphism`` is an identity. Nothing happens. return t else: return t + Tuple(morphism) def __new__(cls, *components): if components and not isinstance(components[0], Morphism): # Maybe the user has explicitly supplied a list of # morphisms. return CompositeMorphism.__new__(cls, *components[0]) normalised_components = Tuple() for current, following in zip(components, components[1:]): if not isinstance(current, Morphism) or \ not isinstance(following, Morphism): raise TypeError("All components must be morphisms.") if current.codomain != following.domain: raise ValueError("Uncomposable morphisms.") normalised_components = CompositeMorphism._add_morphism( normalised_components, current) # We haven't added the last morphism to the list of normalised # components. Add it now. normalised_components = CompositeMorphism._add_morphism( normalised_components, components[-1]) if not normalised_components: # If ``normalised_components`` is empty, only identities # were supplied. Since they all were composable, they are # all the same identities. return components[0] elif len(normalised_components) == 1: # No sense to construct a whole CompositeMorphism. return normalised_components[0] return Basic.__new__(cls, normalised_components) @property def components(self): """ Returns the components of this composite morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).components (NamedMorphism(Object("A"), Object("B"), "f"), NamedMorphism(Object("B"), Object("C"), "g")) """ return self.args[0] @property def domain(self): """ Returns the domain of this composite morphism. The domain of the composite morphism is the domain of its first component. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).domain Object("A") """ return self.components[0].domain @property def codomain(self): """ Returns the codomain of this composite morphism. The codomain of the composite morphism is the codomain of its last component. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).codomain Object("C") """ return self.components[-1].codomain def flatten(self, new_name): """ Forgets the composite structure of this morphism. Explanation =========== If ``new_name`` is not empty, returns a :class:`NamedMorphism` with the supplied name, otherwise returns a :class:`Morphism`. In both cases the domain of the new morphism is the domain of this composite morphism and the codomain of the new morphism is the codomain of this composite morphism. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> (g * f).flatten("h") NamedMorphism(Object("A"), Object("C"), "h") """ return NamedMorphism(self.domain, self.codomain, new_name) class Category(Basic): r""" An (abstract) category. Explanation =========== A category [JoyOfCats] is a quadruple `\mbox{K} = (O, \hom, id, \circ)` consisting of * a (set-theoretical) class `O`, whose members are called `K`-objects, * for each pair `(A, B)` of `K`-objects, a set `\hom(A, B)` whose members are called `K`-morphisms from `A` to `B`, * for a each `K`-object `A`, a morphism `id:A\rightarrow A`, called the `K`-identity of `A`, * a composition law `\circ` associating with every `K`-morphisms `f:A\rightarrow B` and `g:B\rightarrow C` a `K`-morphism `g\circ f:A\rightarrow C`, called the composite of `f` and `g`. Composition is associative, `K`-identities are identities with respect to composition, and the sets `\hom(A, B)` are pairwise disjoint. This class knows nothing about its objects and morphisms. Concrete cases of (abstract) categories should be implemented as classes derived from this one. Certain instances of :class:`Diagram` can be asserted to be commutative in a :class:`Category` by supplying the argument ``commutative_diagrams`` in the constructor. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> K = Category("K", commutative_diagrams=[d]) >>> K.commutative_diagrams == FiniteSet(d) True See Also ======== Diagram """ def __new__(cls, name, objects=EmptySet, commutative_diagrams=EmptySet): if not name: raise ValueError("A Category cannot have an empty name.") if not isinstance(name, Str): name = Str(name) if not isinstance(objects, Class): objects = Class(objects) new_category = Basic.__new__(cls, name, objects, FiniteSet(*commutative_diagrams)) return new_category @property def name(self): """ Returns the name of this category. Examples ======== >>> from sympy.categories import Category >>> K = Category("K") >>> K.name 'K' """ return self.args[0].name @property def objects(self): """ Returns the class of objects of this category. Examples ======== >>> from sympy.categories import Object, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> K = Category("K", FiniteSet(A, B)) >>> K.objects Class(FiniteSet(Object("A"), Object("B"))) """ return self.args[1] @property def commutative_diagrams(self): """ Returns the :class:`~.FiniteSet` of diagrams which are known to be commutative in this category. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram, Category >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> K = Category("K", commutative_diagrams=[d]) >>> K.commutative_diagrams == FiniteSet(d) True """ return self.args[2] def hom(self, A, B): raise NotImplementedError( "hom-sets are not implemented in Category.") def all_morphisms(self): raise NotImplementedError( "Obtaining the class of morphisms is not implemented in Category.") class Diagram(Basic): r""" Represents a diagram in a certain category. Explanation =========== Informally, a diagram is a collection of objects of a category and certain morphisms between them. A diagram is still a monoid with respect to morphism composition; i.e., identity morphisms, as well as all composites of morphisms included in the diagram belong to the diagram. For a more formal approach to this notion see [Pare1970]. The components of composite morphisms are also added to the diagram. No properties are assigned to such morphisms by default. A commutative diagram is often accompanied by a statement of the following kind: "if such morphisms with such properties exist, then such morphisms which such properties exist and the diagram is commutative". To represent this, an instance of :class:`Diagram` includes a collection of morphisms which are the premises and another collection of conclusions. ``premises`` and ``conclusions`` associate morphisms belonging to the corresponding categories with the :class:`~.FiniteSet`'s of their properties. The set of properties of a composite morphism is the intersection of the sets of properties of its components. The domain and codomain of a conclusion morphism should be among the domains and codomains of the morphisms listed as the premises of a diagram. No checks are carried out of whether the supplied object and morphisms do belong to one and the same category. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy import pprint, default_sort_key >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> premises_keys = sorted(d.premises.keys(), key=default_sort_key) >>> pprint(premises_keys, use_unicode=False) [g*f:A-->C, id:A-->A, id:B-->B, id:C-->C, f:A-->B, g:B-->C] >>> pprint(d.premises, use_unicode=False) {g*f:A-->C: EmptySet, id:A-->A: EmptySet, id:B-->B: EmptySet, id:C-->C: EmptyS et, f:A-->B: EmptySet, g:B-->C: EmptySet} >>> d = Diagram([f, g], {g * f: "unique"}) >>> pprint(d.conclusions) {g*f:A-->C: {unique}} References ========== [Pare1970] B. Pareigis: Categories and functors. Academic Press, 1970. """ @staticmethod def _set_dict_union(dictionary, key, value): """ If ``key`` is in ``dictionary``, set the new value of ``key`` to be the union between the old value and ``value``. Otherwise, set the value of ``key`` to ``value. Returns ``True`` if the key already was in the dictionary and ``False`` otherwise. """ if key in dictionary: dictionary[key] = dictionary[key] | value return True else: dictionary[key] = value return False @staticmethod def _add_morphism_closure(morphisms, morphism, props, add_identities=True, recurse_composites=True): """ Adds a morphism and its attributes to the supplied dictionary ``morphisms``. If ``add_identities`` is True, also adds the identity morphisms for the domain and the codomain of ``morphism``. """ if not Diagram._set_dict_union(morphisms, morphism, props): # We have just added a new morphism. if isinstance(morphism, IdentityMorphism): if props: # Properties for identity morphisms don't really # make sense, because very much is known about # identity morphisms already, so much that they # are trivial. Having properties for identity # morphisms would only be confusing. raise ValueError( "Instances of IdentityMorphism cannot have properties.") return if add_identities: empty = EmptySet id_dom = IdentityMorphism(morphism.domain) id_cod = IdentityMorphism(morphism.codomain) Diagram._set_dict_union(morphisms, id_dom, empty) Diagram._set_dict_union(morphisms, id_cod, empty) for existing_morphism, existing_props in list(morphisms.items()): new_props = existing_props & props if morphism.domain == existing_morphism.codomain: left = morphism * existing_morphism Diagram._set_dict_union(morphisms, left, new_props) if morphism.codomain == existing_morphism.domain: right = existing_morphism * morphism Diagram._set_dict_union(morphisms, right, new_props) if isinstance(morphism, CompositeMorphism) and recurse_composites: # This is a composite morphism, add its components as # well. empty = EmptySet for component in morphism.components: Diagram._add_morphism_closure(morphisms, component, empty, add_identities) def __new__(cls, *args): """ Construct a new instance of Diagram. Explanation =========== If no arguments are supplied, an empty diagram is created. If at least an argument is supplied, ``args[0]`` is interpreted as the premises of the diagram. If ``args[0]`` is a list, it is interpreted as a list of :class:`Morphism`'s, in which each :class:`Morphism` has an empty set of properties. If ``args[0]`` is a Python dictionary or a :class:`Dict`, it is interpreted as a dictionary associating to some :class:`Morphism`'s some properties. If at least two arguments are supplied ``args[1]`` is interpreted as the conclusions of the diagram. The type of ``args[1]`` is interpreted in exactly the same way as the type of ``args[0]``. If only one argument is supplied, the diagram has no conclusions. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import IdentityMorphism, Diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> IdentityMorphism(A) in d.premises.keys() True >>> g * f in d.premises.keys() True >>> d = Diagram([f, g], {g * f: "unique"}) >>> d.conclusions[g * f] FiniteSet(unique) """ premises = {} conclusions = {} # Here we will keep track of the objects which appear in the # premises. objects = EmptySet if len(args) >= 1: # We've got some premises in the arguments. premises_arg = args[0] if isinstance(premises_arg, list): # The user has supplied a list of morphisms, none of # which have any attributes. empty = EmptySet for morphism in premises_arg: objects |= FiniteSet(morphism.domain, morphism.codomain) Diagram._add_morphism_closure(premises, morphism, empty) elif isinstance(premises_arg, dict) or isinstance(premises_arg, Dict): # The user has supplied a dictionary of morphisms and # their properties. for morphism, props in premises_arg.items(): objects |= FiniteSet(morphism.domain, morphism.codomain) Diagram._add_morphism_closure( premises, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props)) if len(args) >= 2: # We also have some conclusions. conclusions_arg = args[1] if isinstance(conclusions_arg, list): # The user has supplied a list of morphisms, none of # which have any attributes. empty = EmptySet for morphism in conclusions_arg: # Check that no new objects appear in conclusions. if ((sympify(objects.contains(morphism.domain)) is S.true) and (sympify(objects.contains(morphism.codomain)) is S.true)): # No need to add identities and recurse # composites this time. Diagram._add_morphism_closure( conclusions, morphism, empty, add_identities=False, recurse_composites=False) elif isinstance(conclusions_arg, dict) or \ isinstance(conclusions_arg, Dict): # The user has supplied a dictionary of morphisms and # their properties. for morphism, props in conclusions_arg.items(): # Check that no new objects appear in conclusions. if (morphism.domain in objects) and \ (morphism.codomain in objects): # No need to add identities and recurse # composites this time. Diagram._add_morphism_closure( conclusions, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props), add_identities=False, recurse_composites=False) return Basic.__new__(cls, Dict(premises), Dict(conclusions), objects) @property def premises(self): """ Returns the premises of this diagram. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import IdentityMorphism, Diagram >>> from sympy import pretty >>> A = Object("A") >>> B = Object("B") >>> f = NamedMorphism(A, B, "f") >>> id_A = IdentityMorphism(A) >>> id_B = IdentityMorphism(B) >>> d = Diagram([f]) >>> print(pretty(d.premises, use_unicode=False)) {id:A-->A: EmptySet, id:B-->B: EmptySet, f:A-->B: EmptySet} """ return self.args[0] @property def conclusions(self): """ Returns the conclusions of this diagram. Examples ======== >>> from sympy.categories import Object, NamedMorphism >>> from sympy.categories import IdentityMorphism, Diagram >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> IdentityMorphism(A) in d.premises.keys() True >>> g * f in d.premises.keys() True >>> d = Diagram([f, g], {g * f: "unique"}) >>> d.conclusions[g * f] == FiniteSet("unique") True """ return self.args[1] @property def objects(self): """ Returns the :class:`~.FiniteSet` of objects that appear in this diagram. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g]) >>> d.objects FiniteSet(Object("A"), Object("B"), Object("C")) """ return self.args[2] def hom(self, A, B): """ Returns a 2-tuple of sets of morphisms between objects ``A`` and ``B``: one set of morphisms listed as premises, and the other set of morphisms listed as conclusions. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy import pretty >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {g * f: "unique"}) >>> print(pretty(d.hom(A, C), use_unicode=False)) ({g*f:A-->C}, {g*f:A-->C}) See Also ======== Object, Morphism """ premises = EmptySet conclusions = EmptySet for morphism in self.premises.keys(): if (morphism.domain == A) and (morphism.codomain == B): premises |= FiniteSet(morphism) for morphism in self.conclusions.keys(): if (morphism.domain == A) and (morphism.codomain == B): conclusions |= FiniteSet(morphism) return (premises, conclusions) def is_subdiagram(self, diagram): """ Checks whether ``diagram`` is a subdiagram of ``self``. Diagram `D'` is a subdiagram of `D` if all premises (conclusions) of `D'` are contained in the premises (conclusions) of `D`. The morphisms contained both in `D'` and `D` should have the same properties for `D'` to be a subdiagram of `D`. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {g * f: "unique"}) >>> d1 = Diagram([f]) >>> d.is_subdiagram(d1) True >>> d1.is_subdiagram(d) False """ premises = all([(m in self.premises) and (diagram.premises[m] == self.premises[m]) for m in diagram.premises]) if not premises: return False conclusions = all([(m in self.conclusions) and (diagram.conclusions[m] == self.conclusions[m]) for m in diagram.conclusions]) # Premises is surely ``True`` here. return conclusions def subdiagram_from_objects(self, objects): """ If ``objects`` is a subset of the objects of ``self``, returns a diagram which has as premises all those premises of ``self`` which have a domains and codomains in ``objects``, likewise for conclusions. Properties are preserved. Examples ======== >>> from sympy.categories import Object, NamedMorphism, Diagram >>> from sympy import FiniteSet >>> A = Object("A") >>> B = Object("B") >>> C = Object("C") >>> f = NamedMorphism(A, B, "f") >>> g = NamedMorphism(B, C, "g") >>> d = Diagram([f, g], {f: "unique", g*f: "veryunique"}) >>> d1 = d.subdiagram_from_objects(FiniteSet(A, B)) >>> d1 == Diagram([f], {f: "unique"}) True """ if not objects.is_subset(self.objects): raise ValueError( "Supplied objects should all belong to the diagram.") new_premises = {} for morphism, props in self.premises.items(): if ((sympify(objects.contains(morphism.domain)) is S.true) and (sympify(objects.contains(morphism.codomain)) is S.true)): new_premises[morphism] = props new_conclusions = {} for morphism, props in self.conclusions.items(): if ((sympify(objects.contains(morphism.domain)) is S.true) and (sympify(objects.contains(morphism.codomain)) is S.true)): new_conclusions[morphism] = props return Diagram(new_premises, new_conclusions)
f69ed1b4ee5861fdccbcf5193a5358f2f0376e336599cb0b99c3f0497c3475c0
from typing import Any, Set from functools import reduce from itertools import permutations from sympy.combinatorics import Permutation from sympy.core import ( Basic, Expr, Function, diff, Pow, Mul, Add, Atom, Lambda, S, Tuple, Dict ) from sympy.core.cache import cacheit from sympy.core.symbol import Symbol, Dummy from sympy.core.symbol import Str from sympy.core.sympify import _sympify from sympy.functions import factorial from sympy.matrices import ImmutableDenseMatrix as Matrix from sympy.simplify import simplify from sympy.solvers import solve from sympy.utilities.exceptions import SymPyDeprecationWarning # TODO you are a bit excessive in the use of Dummies # TODO dummy point, literal field # TODO too often one needs to call doit or simplify on the output, check the # tests and find out why from sympy.tensor.array import ImmutableDenseNDimArray class Manifold(Atom): """A mathematical manifold. Explanation =========== A manifold is a topological space that locally resembles Euclidean space near each point [1]. This class does not provide any means to study the topological characteristics of the manifold that it represents, though. Parameters ========== name : str The name of the manifold. dim : int The dimension of the manifold. Examples ======== >>> from sympy.diffgeom import Manifold >>> m = Manifold('M', 2) >>> m M >>> m.dim 2 References ========== .. [1] https://en.wikipedia.org/wiki/Manifold """ def __new__(cls, name, dim, **kwargs): if not isinstance(name, Str): name = Str(name) dim = _sympify(dim) obj = super().__new__(cls, name, dim) obj.patches = _deprecated_list( "Manifold.patches", "external container for registry", 19321, "1.7", [] ) return obj @property def name(self): return self.args[0] @property def dim(self): return self.args[1] class Patch(Atom): """A patch on a manifold. Explanation =========== Coordinate patch, or patch in short, is a simply-connected open set around a point in the manifold [1]. On a manifold one can have many patches that do not always include the whole manifold. On these patches coordinate charts can be defined that permit the parameterization of any point on the patch in terms of a tuple of real numbers (the coordinates). This class does not provide any means to study the topological characteristics of the patch that it represents. Parameters ========== name : str The name of the patch. manifold : Manifold The manifold on which the patch is defined. Examples ======== >>> from sympy.diffgeom import Manifold, Patch >>> m = Manifold('M', 2) >>> p = Patch('P', m) >>> p P >>> p.dim 2 References ========== .. [1] G. Sussman, J. Wisdom, W. Farr, Functional Differential Geometry (2013) """ def __new__(cls, name, manifold, **kwargs): if not isinstance(name, Str): name = Str(name) obj = super().__new__(cls, name, manifold) obj.manifold.patches.append(obj) # deprecated obj.coord_systems = _deprecated_list( "Patch.coord_systems", "external container for registry", 19321, "1.7", [] ) return obj @property def name(self): return self.args[0] @property def manifold(self): return self.args[1] @property def dim(self): return self.manifold.dim class CoordSystem(Atom): """A coordinate system defined on the patch. Explanation =========== Coordinate system is a system that uses one or more coordinates to uniquely determine the position of the points or other geometric elements on a manifold [1]. By passing Symbols to *symbols* parameter, user can define the name and assumptions of coordinate symbols of the coordinate system. If not passed, these symbols are generated automatically and are assumed to be real valued. By passing *relations* parameter, user can define the tranform relations of coordinate systems. Inverse transformation and indirect transformation can be found automatically. If this parameter is not passed, coordinate transformation cannot be done. Parameters ========== name : str The name of the coordinate system. patch : Patch The patch where the coordinate system is defined. symbols : list of Symbols, optional Defines the names and assumptions of coordinate symbols. relations : dict, optional - key : tuple of two strings, who are the names of systems where the coordinates transform from and transform to. - value : Lambda returning the transformed coordinates. Examples ======== >>> from sympy import symbols, pi, Lambda, Matrix, sqrt, atan2, cos, sin >>> from sympy.diffgeom import Manifold, Patch, CoordSystem >>> m = Manifold('M', 2) >>> p = Patch('P', m) >>> x, y = symbols('x y', real=True) >>> r, theta = symbols('r theta', nonnegative=True) >>> relation_dict = { ... ('Car2D', 'Pol'): Lambda((x, y), Matrix([sqrt(x**2 + y**2), atan2(y, x)])), ... ('Pol', 'Car2D'): Lambda((r, theta), Matrix([r*cos(theta), r*sin(theta)])) ... } >>> Car2D = CoordSystem('Car2D', p, [x, y], relation_dict) >>> Pol = CoordSystem('Pol', p, [r, theta], relation_dict) >>> Car2D Car2D >>> Car2D.dim 2 >>> Car2D.symbols [x, y] >>> Car2D.transformation(Pol) Lambda((x, y), Matrix([ [sqrt(x**2 + y**2)], [ atan2(y, x)]])) >>> Car2D.transform(Pol) Matrix([ [sqrt(x**2 + y**2)], [ atan2(y, x)]]) >>> Car2D.transform(Pol, [1, 2]) Matrix([ [sqrt(5)], [atan(2)]]) >>> Pol.jacobian(Car2D) Matrix([ [cos(theta), -r*sin(theta)], [sin(theta), r*cos(theta)]]) >>> Pol.jacobian(Car2D, [1, pi/2]) Matrix([ [0, -1], [1, 0]]) References ========== .. [1] https://en.wikipedia.org/wiki/Coordinate_system """ def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): if not isinstance(name, Str): name = Str(name) # canonicallize the symbols if symbols is None: names = kwargs.get('names', None) if names is None: symbols = Tuple( *[Symbol('%s_%s' % (name.name, i), real=True) for i in range(patch.dim)] ) else: SymPyDeprecationWarning( feature="Class signature 'names' of CoordSystem", useinstead="class signature 'symbols'", issue=19321, deprecated_since_version="1.7" ).warn() symbols = Tuple( *[Symbol(n, real=True) for n in names] ) else: syms = [] for s in symbols: if isinstance(s, Symbol): syms.append(Symbol(s.name, **s._assumptions.generator)) elif isinstance(s, str): SymPyDeprecationWarning( feature="Passing str as coordinate symbol's name", useinstead="Symbol which contains the name and assumption for coordinate symbol", issue=19321, deprecated_since_version="1.7" ).warn() syms.append(Symbol(s, real=True)) symbols = Tuple(*syms) # canonicallize the relations rel_temp = {} for k,v in relations.items(): s1, s2 = k if not isinstance(s1, Str): s1 = Str(s1) if not isinstance(s2, Str): s2 = Str(s2) key = Tuple(s1, s2) rel_temp[key] = v relations = Dict(rel_temp) # construct the object obj = super().__new__(cls, name, patch, symbols, relations) # Add deprecated attributes obj.transforms = _deprecated_dict( "Mutable CoordSystem.transforms", "'relations' parameter in class signature", 19321, "1.7", {} ) obj._names = [str(n) for n in symbols] obj.patch.coord_systems.append(obj) # deprecated obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated obj._dummy = Dummy() return obj @property def name(self): return self.args[0] @property def patch(self): return self.args[1] @property def manifold(self): return self.patch.manifold @property def symbols(self): return [ CoordinateSymbol( self, i, **s._assumptions.generator ) for i,s in enumerate(self.args[2]) ] @property def relations(self): return self.args[3] @property def dim(self): return self.patch.dim ########################################################################## # Finding transformation relation ########################################################################## def transformation(self, sys): """ Return coordinate transform relation from *self* to *sys* as Lambda. """ if self.relations != sys.relations: raise TypeError( "Two coordinate systems have different relations") key = Tuple(self.name, sys.name) if key in self.relations: return self.relations[key] elif key[::-1] in self.relations: return self._inverse_transformation(sys, self) else: return self._indirect_transformation(self, sys) @staticmethod def _inverse_transformation(sys1, sys2): # Find the transformation relation from sys2 to sys1 forward_transform = sys1.transform(sys2) forward_syms, forward_results = forward_transform.args inv_syms = [i.as_dummy() for i in forward_syms] inv_results = solve( [t[0] - t[1] for t in zip(inv_syms, forward_results)], list(forward_syms), dict=True)[0] inv_results = [inv_results[s] for s in forward_syms] signature = tuple(inv_syms) expr = Matrix(inv_results) return Lambda(signature, expr) @classmethod @cacheit def _indirect_transformation(cls, sys1, sys2): # Find the transformation relation between two indirectly connected coordinate systems path = cls._dijkstra(sys1, sys2) Lambdas = [] for i in range(len(path) - 1): s1, s2 = path[i], path[i + 1] Lambdas.append(s1.transformation(s2)) syms = Lambdas[-1].signature expr = syms for l in reversed(Lambdas): expr = l(*expr) return Lambda(syms, expr) @staticmethod def _dijkstra(sys1, sys2): # Use Dijkstra algorithm to find the shortest path between two indirectly-connected # coordinate systems relations = sys1.relations graph = {} for s1, s2 in relations.keys(): if s1 not in graph: graph[s1] = {s2} else: graph[s1].add(s2) if s2 not in graph: graph[s2] = {s1} else: graph[s2].add(s1) path_dict = {sys:[0, [], 0] for sys in graph} # minimum distance, path, times of visited def visit(sys): path_dict[sys][2] = 1 for newsys in graph[sys]: distance = path_dict[sys][0] + 1 if path_dict[newsys][0] >= distance or not path_dict[newsys][1]: path_dict[newsys][0] = distance path_dict[newsys][1] = [i for i in path_dict[sys][1]] path_dict[newsys][1].append(sys) visit(sys1) while True: min_distance = max(path_dict.values(), key=lambda x:x[0])[0] newsys = None for sys, lst in path_dict.items(): if 0 < lst[0] <= min_distance and not lst[2]: min_distance = lst[0] newsys = sys if newsys is None: break visit(newsys) result = path_dict[sys2][1] result.append(sys2) if result == [sys2]: raise KeyError("Two coordinate systems are not connected.") return result def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False): SymPyDeprecationWarning( feature="CoordSystem.connect_to", useinstead="new instance generated with new 'transforms' parameter", issue=19321, deprecated_since_version="1.7" ).warn() from_coords, to_exprs = dummyfy(from_coords, to_exprs) self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs) if inverse: to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs) if fill_in_gaps: self._fill_gaps_in_transformations() @staticmethod def _inv_transf(from_coords, to_exprs): # Will be removed when connect_to is removed inv_from = [i.as_dummy() for i in from_coords] inv_to = solve( [t[0] - t[1] for t in zip(inv_from, to_exprs)], list(from_coords), dict=True)[0] inv_to = [inv_to[fc] for fc in from_coords] return Matrix(inv_from), Matrix(inv_to) @staticmethod def _fill_gaps_in_transformations(): # Will be removed when connect_to is removed raise NotImplementedError ########################################################################## # Coordinate transformations ########################################################################## def transform(self, sys, coordinates=None): """ Return the result of coordinate transformation from *self* to *sys*. If coordinates are not given, coordinate symbols of *self* are used. """ if coordinates is None: coordinates = Matrix(self.symbols) else: coordinates = Matrix(coordinates) if self != sys: transf = self.transformation(sys) coordinates = transf(*coordinates) return coordinates def coord_tuple_transform_to(self, to_sys, coords): """Transform ``coords`` to coord system ``to_sys``.""" SymPyDeprecationWarning( feature="CoordSystem.coord_tuple_transform_to", useinstead="CoordSystem.transform", issue=19321, deprecated_since_version="1.7" ).warn() coords = Matrix(coords) if self != to_sys: transf = self.transforms[to_sys] coords = transf[1].subs(list(zip(transf[0], coords))) return coords def jacobian(self, sys, coordinates=None): """ Return the jacobian matrix of a transformation. """ result = self.transform(sys).jacobian(self.symbols) if coordinates is not None: result = result.subs(list(zip(self.symbols, coordinates))) return result jacobian_matrix = jacobian def jacobian_determinant(self, sys, coordinates=None): """Return the jacobian determinant of a transformation.""" return self.jacobian(sys, coordinates).det() ########################################################################## # Points ########################################################################## def point(self, coords): """Create a ``Point`` with coordinates given in this coord system.""" return Point(self, coords) def point_to_coords(self, point): """Calculate the coordinates of a point in this coord system.""" return point.coords(self) ########################################################################## # Base fields. ########################################################################## def base_scalar(self, coord_index): """Return ``BaseScalarField`` that takes a point and returns one of the coordinates.""" return BaseScalarField(self, coord_index) coord_function = base_scalar def base_scalars(self): """Returns a list of all coordinate functions. For more details see the ``base_scalar`` method of this class.""" return [self.base_scalar(i) for i in range(self.dim)] coord_functions = base_scalars def base_vector(self, coord_index): """Return a basis vector field. The basis vector field for this coordinate system. It is also an operator on scalar fields.""" return BaseVectorField(self, coord_index) def base_vectors(self): """Returns a list of all base vectors. For more details see the ``base_vector`` method of this class.""" return [self.base_vector(i) for i in range(self.dim)] def base_oneform(self, coord_index): """Return a basis 1-form field. The basis one-form field for this coordinate system. It is also an operator on vector fields.""" return Differential(self.coord_function(coord_index)) def base_oneforms(self): """Returns a list of all base oneforms. For more details see the ``base_oneform`` method of this class.""" return [self.base_oneform(i) for i in range(self.dim)] class CoordinateSymbol(Symbol): """A symbol which denotes an abstract value of i-th coordinate of the coordinate system with given context. Explanation =========== Each coordinates in coordinate system are represented by unique symbol, such as x, y, z in Cartesian coordinate system. You may not construct this class directly. Instead, use `symbols` method of CoordSystem. Parameters ========== coord_sys : CoordSystem index : integer Examples ======== >>> from sympy import symbols >>> from sympy.diffgeom import Manifold, Patch, CoordSystem >>> m = Manifold('M', 2) >>> p = Patch('P', m) >>> _x, _y = symbols('x y', nonnegative=True) >>> C = CoordSystem('C', p, [_x, _y]) >>> x, y = C.symbols >>> x.name 'x' >>> x.coord_sys == C True >>> x.index 0 >>> x.is_nonnegative True """ def __new__(cls, coord_sys, index, **assumptions): name = coord_sys.args[2][index].name obj = super().__new__(cls, name, **assumptions) obj.coord_sys = coord_sys obj.index = index return obj def __getnewargs__(self): return (self.coord_sys, self.index) def _hashable_content(self): return ( self.coord_sys, self.index ) + tuple(sorted(self.assumptions0.items())) class Point(Basic): """Point defined in a coordinate system. Explanation =========== Mathematically, point is defined in the manifold and does not have any coordinates by itself. Coordinate system is what imbues the coordinates to the point by coordinate chart. However, due to the difficulty of realizing such logic, you must supply a coordinate system and coordinates to define a Point here. The usage of this object after its definition is independent of the coordinate system that was used in order to define it, however due to limitations in the simplification routines you can arrive at complicated expressions if you use inappropriate coordinate systems. Parameters ========== coord_sys : CoordSystem coords : list The coordinates of the point. Examples ======== >>> from sympy import pi >>> from sympy.diffgeom import Point >>> from sympy.diffgeom.rn import R2, R2_r, R2_p >>> rho, theta = R2_p.symbols >>> p = Point(R2_p, [rho, 3*pi/4]) >>> p.manifold == R2 True >>> p.coords() Matrix([ [ rho], [3*pi/4]]) >>> p.coords(R2_r) Matrix([ [-sqrt(2)*rho/2], [ sqrt(2)*rho/2]]) """ def __new__(cls, coord_sys, coords, **kwargs): coords = Matrix(coords) obj = super().__new__(cls, coord_sys, coords) obj._coord_sys = coord_sys obj._coords = coords return obj @property def patch(self): return self._coord_sys.patch @property def manifold(self): return self._coord_sys.manifold @property def dim(self): return self.manifold.dim def coords(self, sys=None): """ Coordinates of the point in given coordinate system. If coordinate system is not passed, it returns the coordinates in the coordinate system in which the poin was defined. """ if sys is None: return self._coords else: return self._coord_sys.transform(sys, self._coords) @property def free_symbols(self): return self._coords.free_symbols class BaseScalarField(Expr): """Base scalar field over a manifold for a given coordinate system. Explanation =========== A scalar field takes a point as an argument and returns a scalar. A base scalar field of a coordinate system takes a point and returns one of the coordinates of that point in the coordinate system in question. To define a scalar field you need to choose the coordinate system and the index of the coordinate. The use of the scalar field after its definition is independent of the coordinate system in which it was defined, however due to limitations in the simplification routines you may arrive at more complicated expression if you use unappropriate coordinate systems. You can build complicated scalar fields by just building up SymPy expressions containing ``BaseScalarField`` instances. Parameters ========== coord_sys : CoordSystem index : integer Examples ======== >>> from sympy import Function, pi >>> from sympy.diffgeom import BaseScalarField >>> from sympy.diffgeom.rn import R2_r, R2_p >>> rho, _ = R2_p.symbols >>> point = R2_p.point([rho, 0]) >>> fx, fy = R2_r.base_scalars() >>> ftheta = BaseScalarField(R2_r, 1) >>> fx(point) rho >>> fy(point) 0 >>> (fx**2+fy**2).rcall(point) rho**2 >>> g = Function('g') >>> fg = g(ftheta-pi) >>> fg.rcall(point) g(-pi) """ is_commutative = True def __new__(cls, coord_sys, index, **kwargs): index = _sympify(index) obj = super().__new__(cls, coord_sys, index) obj._coord_sys = coord_sys obj._index = index return obj @property def coord_sys(self): return self.args[0] @property def index(self): return self.args[1] @property def patch(self): return self.coord_sys.patch @property def manifold(self): return self.coord_sys.manifold @property def dim(self): return self.manifold.dim def __call__(self, *args): """Evaluating the field at a point or doing nothing. If the argument is a ``Point`` instance, the field is evaluated at that point. The field is returned itself if the argument is any other object. It is so in order to have working recursive calling mechanics for all fields (check the ``__call__`` method of ``Expr``). """ point = args[0] if len(args) != 1 or not isinstance(point, Point): return self coords = point.coords(self._coord_sys) # XXX Calling doit is necessary with all the Subs expressions # XXX Calling simplify is necessary with all the trig expressions return simplify(coords[self._index]).doit() # XXX Workaround for limitations on the content of args free_symbols = set() # type: Set[Any] def doit(self): return self class BaseVectorField(Expr): r"""Base vector field over a manifold for a given coordinate system. Explanation =========== A vector field is an operator taking a scalar field and returning a directional derivative (which is also a scalar field). A base vector field is the same type of operator, however the derivation is specifically done with respect to a chosen coordinate. To define a base vector field you need to choose the coordinate system and the index of the coordinate. The use of the vector field after its definition is independent of the coordinate system in which it was defined, however due to limitations in the simplification routines you may arrive at more complicated expression if you use unappropriate coordinate systems. Parameters ========== coord_sys : CoordSystem index : integer Examples ======== >>> from sympy import Function >>> from sympy.diffgeom.rn import R2_p, R2_r >>> from sympy.diffgeom import BaseVectorField >>> from sympy import pprint >>> x, y = R2_r.symbols >>> rho, theta = R2_p.symbols >>> fx, fy = R2_r.base_scalars() >>> point_p = R2_p.point([rho, theta]) >>> point_r = R2_r.point([x, y]) >>> g = Function('g') >>> s_field = g(fx, fy) >>> v = BaseVectorField(R2_r, 1) >>> pprint(v(s_field)) / d \| |---(g(x, xi))|| \dxi /|xi=y >>> pprint(v(s_field).rcall(point_r).doit()) d --(g(x, y)) dy >>> pprint(v(s_field).rcall(point_p)) / d \| |---(g(rho*cos(theta), xi))|| \dxi /|xi=rho*sin(theta) """ is_commutative = False def __new__(cls, coord_sys, index, **kwargs): index = _sympify(index) obj = super().__new__(cls, coord_sys, index) obj._coord_sys = coord_sys obj._index = index return obj @property def coord_sys(self): return self.args[0] @property def index(self): return self.args[1] @property def patch(self): return self.coord_sys.patch @property def manifold(self): return self.coord_sys.manifold @property def dim(self): return self.manifold.dim def __call__(self, scalar_field): """Apply on a scalar field. The action of a vector field on a scalar field is a directional differentiation. If the argument is not a scalar field an error is raised. """ if covariant_order(scalar_field) or contravariant_order(scalar_field): raise ValueError('Only scalar fields can be supplied as arguments to vector fields.') if scalar_field is None: return self base_scalars = list(scalar_field.atoms(BaseScalarField)) # First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r) d_var = self._coord_sys._dummy # TODO: you need a real dummy function for the next line d_funcs = [Function('_#_%s' % i)(d_var) for i, b in enumerate(base_scalars)] d_result = scalar_field.subs(list(zip(base_scalars, d_funcs))) d_result = d_result.diff(d_var) # Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y)) coords = self._coord_sys.symbols d_funcs_deriv = [f.diff(d_var) for f in d_funcs] d_funcs_deriv_sub = [] for b in base_scalars: jac = self._coord_sys.jacobian(b._coord_sys, coords) d_funcs_deriv_sub.append(jac[b._index, self._index]) d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub))) # Remove the dummies result = d_result.subs(list(zip(d_funcs, base_scalars))) result = result.subs(list(zip(coords, self._coord_sys.coord_functions()))) return result.doit() def _find_coords(expr): # Finds CoordinateSystems existing in expr fields = expr.atoms(BaseScalarField, BaseVectorField) result = set() for f in fields: result.add(f._coord_sys) return result class Commutator(Expr): r"""Commutator of two vector fields. Explanation =========== The commutator of two vector fields `v_1` and `v_2` is defined as the vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal to `v_1(v_2(f)) - v_2(v_1(f))`. Examples ======== >>> from sympy.diffgeom.rn import R2_p, R2_r >>> from sympy.diffgeom import Commutator >>> from sympy.simplify import simplify >>> fx, fy = R2_r.base_scalars() >>> e_x, e_y = R2_r.base_vectors() >>> e_r = R2_p.base_vector(0) >>> c_xy = Commutator(e_x, e_y) >>> c_xr = Commutator(e_x, e_r) >>> c_xy 0 Unfortunately, the current code is not able to compute everything: >>> c_xr Commutator(e_x, e_rho) >>> simplify(c_xr(fy**2)) -2*cos(theta)*y**2/(x**2 + y**2) """ def __new__(cls, v1, v2): if (covariant_order(v1) or contravariant_order(v1) != 1 or covariant_order(v2) or contravariant_order(v2) != 1): raise ValueError( 'Only commutators of vector fields are supported.') if v1 == v2: return S.Zero coord_sys = set().union(*[_find_coords(v) for v in (v1, v2)]) if len(coord_sys) == 1: # Only one coordinate systems is used, hence it is easy enough to # actually evaluate the commutator. if all(isinstance(v, BaseVectorField) for v in (v1, v2)): return S.Zero bases_1, bases_2 = [list(v.atoms(BaseVectorField)) for v in (v1, v2)] coeffs_1 = [v1.expand().coeff(b) for b in bases_1] coeffs_2 = [v2.expand().coeff(b) for b in bases_2] res = 0 for c1, b1 in zip(coeffs_1, bases_1): for c2, b2 in zip(coeffs_2, bases_2): res += c1*b1(c2)*b2 - c2*b2(c1)*b1 return res else: obj = super().__new__(cls, v1, v2) obj._v1 = v1 # deprecated assignment obj._v2 = v2 # deprecated assignment return obj @property def v1(self): return self.args[0] @property def v2(self): return self.args[1] def __call__(self, scalar_field): """Apply on a scalar field. If the argument is not a scalar field an error is raised. """ return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field)) class Differential(Expr): r"""Return the differential (exterior derivative) of a form field. Explanation =========== The differential of a form (i.e. the exterior derivative) has a complicated definition in the general case. The differential `df` of the 0-form `f` is defined for any vector field `v` as `df(v) = v(f)`. Examples ======== >>> from sympy import Function >>> from sympy.diffgeom.rn import R2_r >>> from sympy.diffgeom import Differential >>> from sympy import pprint >>> fx, fy = R2_r.base_scalars() >>> e_x, e_y = R2_r.base_vectors() >>> g = Function('g') >>> s_field = g(fx, fy) >>> dg = Differential(s_field) >>> dg d(g(x, y)) >>> pprint(dg(e_x)) / d \| |---(g(xi, y))|| \dxi /|xi=x >>> pprint(dg(e_y)) / d \| |---(g(x, xi))|| \dxi /|xi=y Applying the exterior derivative operator twice always results in: >>> Differential(dg) 0 """ is_commutative = False def __new__(cls, form_field): if contravariant_order(form_field): raise ValueError( 'A vector field was supplied as an argument to Differential.') if isinstance(form_field, Differential): return S.Zero else: obj = super().__new__(cls, form_field) obj._form_field = form_field # deprecated assignment return obj @property def form_field(self): return self.args[0] def __call__(self, *vector_fields): """Apply on a list of vector_fields. Explanation =========== If the number of vector fields supplied is not equal to 1 + the order of the form field inside the differential the result is undefined. For 1-forms (i.e. differentials of scalar fields) the evaluation is done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector field, the differential is returned unchanged. This is done in order to permit partial contractions for higher forms. In the general case the evaluation is done by applying the form field inside the differential on a list with one less elements than the number of elements in the original list. Lowering the number of vector fields is achieved through replacing each pair of fields by their commutator. If the arguments are not vectors or ``None``s an error is raised. """ if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None for a in vector_fields): raise ValueError('The arguments supplied to Differential should be vector fields or Nones.') k = len(vector_fields) if k == 1: if vector_fields[0]: return vector_fields[0].rcall(self._form_field) return self else: # For higher form it is more complicated: # Invariant formula: # https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula # df(v1, ... vn) = +/- vi(f(v1..no i..vn)) # +/- f([vi,vj],v1..no i, no j..vn) f = self._form_field v = vector_fields ret = 0 for i in range(k): t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:])) ret += (-1)**i*t for j in range(i + 1, k): c = Commutator(v[i], v[j]) if c: # TODO this is ugly - the Commutator can be Zero and # this causes the next line to fail t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:]) ret += (-1)**(i + j)*t return ret class TensorProduct(Expr): """Tensor product of forms. Explanation =========== The tensor product permits the creation of multilinear functionals (i.e. higher order tensors) out of lower order fields (e.g. 1-forms and vector fields). However, the higher tensors thus created lack the interesting features provided by the other type of product, the wedge product, namely they are not antisymmetric and hence are not form fields. Examples ======== >>> from sympy.diffgeom.rn import R2_r >>> from sympy.diffgeom import TensorProduct >>> fx, fy = R2_r.base_scalars() >>> e_x, e_y = R2_r.base_vectors() >>> dx, dy = R2_r.base_oneforms() >>> TensorProduct(dx, dy)(e_x, e_y) 1 >>> TensorProduct(dx, dy)(e_y, e_x) 0 >>> TensorProduct(dx, fx*dy)(fx*e_x, e_y) x**2 >>> TensorProduct(e_x, e_y)(fx**2, fy**2) 4*x*y >>> TensorProduct(e_y, dx)(fy) dx You can nest tensor products. >>> tp1 = TensorProduct(dx, dy) >>> TensorProduct(tp1, dx)(e_x, e_y, e_x) 1 You can make partial contraction for instance when 'raising an index'. Putting ``None`` in the second argument of ``rcall`` means that the respective position in the tensor product is left as it is. >>> TP = TensorProduct >>> metric = TP(dx, dx) + 3*TP(dy, dy) >>> metric.rcall(e_y, None) 3*dy Or automatically pad the args with ``None`` without specifying them. >>> metric.rcall(e_y) 3*dy """ def __new__(cls, *args): scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0]) multifields = [m for m in args if covariant_order(m) + contravariant_order(m)] if multifields: if len(multifields) == 1: return scalar*multifields[0] return scalar*super().__new__(cls, *multifields) else: return scalar def __call__(self, *fields): """Apply on a list of fields. If the number of input fields supplied is not equal to the order of the tensor product field, the list of arguments is padded with ``None``'s. The list of arguments is divided in sublists depending on the order of the forms inside the tensor product. The sublists are provided as arguments to these forms and the resulting expressions are given to the constructor of ``TensorProduct``. """ tot_order = covariant_order(self) + contravariant_order(self) tot_args = len(fields) if tot_args != tot_order: fields = list(fields) + [None]*(tot_order - tot_args) orders = [covariant_order(f) + contravariant_order(f) for f in self._args] indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)] fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])] multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)] return TensorProduct(*multipliers) class WedgeProduct(TensorProduct): """Wedge product of forms. Explanation =========== In the context of integration only completely antisymmetric forms make sense. The wedge product permits the creation of such forms. Examples ======== >>> from sympy.diffgeom.rn import R2_r >>> from sympy.diffgeom import WedgeProduct >>> fx, fy = R2_r.base_scalars() >>> e_x, e_y = R2_r.base_vectors() >>> dx, dy = R2_r.base_oneforms() >>> WedgeProduct(dx, dy)(e_x, e_y) 1 >>> WedgeProduct(dx, dy)(e_y, e_x) -1 >>> WedgeProduct(dx, fx*dy)(fx*e_x, e_y) x**2 >>> WedgeProduct(e_x, e_y)(fy, None) -e_x You can nest wedge products. >>> wp1 = WedgeProduct(dx, dy) >>> WedgeProduct(wp1, dx)(e_x, e_y, e_x) 0 """ # TODO the calculation of signatures is slow # TODO you do not need all these permutations (neither the prefactor) def __call__(self, *fields): """Apply on a list of vector_fields. The expression is rewritten internally in terms of tensor products and evaluated.""" orders = (covariant_order(e) + contravariant_order(e) for e in self.args) mul = 1/Mul(*(factorial(o) for o in orders)) perms = permutations(fields) perms_par = (Permutation( p).signature() for p in permutations(list(range(len(fields))))) tensor_prod = TensorProduct(*self.args) return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)]) class LieDerivative(Expr): """Lie derivative with respect to a vector field. Explanation =========== The transport operator that defines the Lie derivative is the pushforward of the field to be derived along the integral curve of the field with respect to which one derives. Examples ======== >>> from sympy.diffgeom.rn import R2_r, R2_p >>> from sympy.diffgeom import (LieDerivative, TensorProduct) >>> fx, fy = R2_r.base_scalars() >>> e_x, e_y = R2_r.base_vectors() >>> e_rho, e_theta = R2_p.base_vectors() >>> dx, dy = R2_r.base_oneforms() >>> LieDerivative(e_x, fy) 0 >>> LieDerivative(e_x, fx) 1 >>> LieDerivative(e_x, e_x) 0 The Lie derivative of a tensor field by another tensor field is equal to their commutator: >>> LieDerivative(e_x, e_rho) Commutator(e_x, e_rho) >>> LieDerivative(e_x + e_y, fx) 1 >>> tp = TensorProduct(dx, dy) >>> LieDerivative(e_x, tp) LieDerivative(e_x, TensorProduct(dx, dy)) >>> LieDerivative(e_x, tp) LieDerivative(e_x, TensorProduct(dx, dy)) """ def __new__(cls, v_field, expr): expr_form_ord = covariant_order(expr) if contravariant_order(v_field) != 1 or covariant_order(v_field): raise ValueError('Lie derivatives are defined only with respect to' ' vector fields. The supplied argument was not a ' 'vector field.') if expr_form_ord > 0: obj = super().__new__(cls, v_field, expr) # deprecated assignments obj._v_field = v_field obj._expr = expr return obj if expr.atoms(BaseVectorField): return Commutator(v_field, expr) else: return v_field.rcall(expr) @property def v_field(self): return self.args[0] @property def expr(self): return self.args[1] def __call__(self, *args): v = self.v_field expr = self.expr lead_term = v(expr(*args)) rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:]) for i in range(len(args))]) return lead_term - rest class BaseCovarDerivativeOp(Expr): """Covariant derivative operator with respect to a base vector. Examples ======== >>> from sympy.diffgeom.rn import R2_r >>> from sympy.diffgeom import BaseCovarDerivativeOp >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct >>> TP = TensorProduct >>> fx, fy = R2_r.base_scalars() >>> e_x, e_y = R2_r.base_vectors() >>> dx, dy = R2_r.base_oneforms() >>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy)) >>> ch [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch) >>> cvd(fx) 1 >>> cvd(fx*e_x) e_x """ def __new__(cls, coord_sys, index, christoffel): index = _sympify(index) christoffel = ImmutableDenseNDimArray(christoffel) obj = super().__new__(cls, coord_sys, index, christoffel) # deprecated assignments obj._coord_sys = coord_sys obj._index = index obj._christoffel = christoffel return obj @property def coord_sys(self): return self.args[0] @property def index(self): return self.args[1] @property def christoffel(self): return self.args[2] def __call__(self, field): """Apply on a scalar field. The action of a vector field on a scalar field is a directional differentiation. If the argument is not a scalar field the behaviour is undefined. """ if covariant_order(field) != 0: raise NotImplementedError() field = vectors_in_basis(field, self._coord_sys) wrt_vector = self._coord_sys.base_vector(self._index) wrt_scalar = self._coord_sys.coord_function(self._index) vectors = list(field.atoms(BaseVectorField)) # First step: replace all vectors with something susceptible to # derivation and do the derivation # TODO: you need a real dummy function for the next line d_funcs = [Function('_#_%s' % i)(wrt_scalar) for i, b in enumerate(vectors)] d_result = field.subs(list(zip(vectors, d_funcs))) d_result = wrt_vector(d_result) # Second step: backsubstitute the vectors in d_result = d_result.subs(list(zip(d_funcs, vectors))) # Third step: evaluate the derivatives of the vectors derivs = [] for v in vectors: d = Add(*[(self._christoffel[k, wrt_vector._index, v._index] *v._coord_sys.base_vector(k)) for k in range(v._coord_sys.dim)]) derivs.append(d) to_subs = [wrt_vector(d) for d in d_funcs] # XXX: This substitution can fail when there are Dummy symbols and the # cache is disabled: https://github.com/sympy/sympy/issues/17794 result = d_result.subs(list(zip(to_subs, derivs))) # Remove the dummies result = result.subs(list(zip(d_funcs, vectors))) return result.doit() class CovarDerivativeOp(Expr): """Covariant derivative operator. Examples ======== >>> from sympy.diffgeom.rn import R2_r >>> from sympy.diffgeom import CovarDerivativeOp >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct >>> TP = TensorProduct >>> fx, fy = R2_r.base_scalars() >>> e_x, e_y = R2_r.base_vectors() >>> dx, dy = R2_r.base_oneforms() >>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy)) >>> ch [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> cvd = CovarDerivativeOp(fx*e_x, ch) >>> cvd(fx) x >>> cvd(fx*e_x) x*e_x """ def __new__(cls, wrt, christoffel): if len({v._coord_sys for v in wrt.atoms(BaseVectorField)}) > 1: raise NotImplementedError() if contravariant_order(wrt) != 1 or covariant_order(wrt): raise ValueError('Covariant derivatives are defined only with ' 'respect to vector fields. The supplied argument ' 'was not a vector field.') obj = super().__new__(cls, wrt, christoffel) # deprecated assigments obj._wrt = wrt obj._christoffel = christoffel return obj @property def wrt(self): return self.args[0] @property def christoffel(self): return self.args[1] def __call__(self, field): vectors = list(self._wrt.atoms(BaseVectorField)) base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel) for v in vectors] return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field) ############################################################################### # Integral curves on vector fields ############################################################################### def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False): r"""Return the series expansion for an integral curve of the field. Explanation =========== Integral curve is a function `\gamma` taking a parameter in `R` to a point in the manifold. It verifies the equation: `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` where the given ``vector_field`` is denoted as `V`. This holds for any value `t` for the parameter and any scalar field `f`. This equation can also be decomposed of a basis of coordinate functions `V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i` This function returns a series expansion of `\gamma(t)` in terms of the coordinate system ``coord_sys``. The equations and expansions are necessarily done in coordinate-system-dependent way as there is no other way to represent movement between points on the manifold (i.e. there is no such thing as a difference of points for a general manifold). Parameters ========== vector_field the vector field for which an integral curve will be given param the argument of the function `\gamma` from R to the curve start_point the point which corresponds to `\gamma(0)` n the order to which to expand coord_sys the coordinate system in which to expand coeffs (default False) - if True return a list of elements of the expansion Examples ======== Use the predefined R2 manifold: >>> from sympy.abc import t, x, y >>> from sympy.diffgeom.rn import R2_p, R2_r >>> from sympy.diffgeom import intcurve_series Specify a starting point and a vector field: >>> start_point = R2_r.point([x, y]) >>> vector_field = R2_r.e_x Calculate the series: >>> intcurve_series(vector_field, t, start_point, n=3) Matrix([ [t + x], [ y]]) Or get the elements of the expansion in a list: >>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True) >>> series[0] Matrix([ [x], [y]]) >>> series[1] Matrix([ [t], [0]]) >>> series[2] Matrix([ [0], [0]]) The series in the polar coordinate system: >>> series = intcurve_series(vector_field, t, start_point, ... n=3, coord_sys=R2_p, coeffs=True) >>> series[0] Matrix([ [sqrt(x**2 + y**2)], [ atan2(y, x)]]) >>> series[1] Matrix([ [t*x/sqrt(x**2 + y**2)], [ -t*y/(x**2 + y**2)]]) >>> series[2] Matrix([ [t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2], [ t**2*x*y/(x**2 + y**2)**2]]) See Also ======== intcurve_diffequ """ if contravariant_order(vector_field) != 1 or covariant_order(vector_field): raise ValueError('The supplied field was not a vector field.') def iter_vfield(scalar_field, i): """Return ``vector_field`` called `i` times on ``scalar_field``.""" return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field) def taylor_terms_per_coord(coord_function): """Return the series for one of the coordinates.""" return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i) for i in range(n)] coord_sys = coord_sys if coord_sys else start_point._coord_sys coord_functions = coord_sys.coord_functions() taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions] if coeffs: return [Matrix(t) for t in zip(*taylor_terms)] else: return Matrix([sum(c) for c in taylor_terms]) def intcurve_diffequ(vector_field, param, start_point, coord_sys=None): r"""Return the differential equation for an integral curve of the field. Explanation =========== Integral curve is a function `\gamma` taking a parameter in `R` to a point in the manifold. It verifies the equation: `V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)` where the given ``vector_field`` is denoted as `V`. This holds for any value `t` for the parameter and any scalar field `f`. This function returns the differential equation of `\gamma(t)` in terms of the coordinate system ``coord_sys``. The equations and expansions are necessarily done in coordinate-system-dependent way as there is no other way to represent movement between points on the manifold (i.e. there is no such thing as a difference of points for a general manifold). Parameters ========== vector_field the vector field for which an integral curve will be given param the argument of the function `\gamma` from R to the curve start_point the point which corresponds to `\gamma(0)` coord_sys the coordinate system in which to give the equations Returns ======= a tuple of (equations, initial conditions) Examples ======== Use the predefined R2 manifold: >>> from sympy.abc import t >>> from sympy.diffgeom.rn import R2, R2_p, R2_r >>> from sympy.diffgeom import intcurve_diffequ Specify a starting point and a vector field: >>> start_point = R2_r.point([0, 1]) >>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y Get the equation: >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point) >>> equations [f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)] >>> init_cond [f_0(0), f_1(0) - 1] The series in the polar coordinate system: >>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p) >>> equations [Derivative(f_0(t), t), Derivative(f_1(t), t) - 1] >>> init_cond [f_0(0) - 1, f_1(0) - pi/2] See Also ======== intcurve_series """ if contravariant_order(vector_field) != 1 or covariant_order(vector_field): raise ValueError('The supplied field was not a vector field.') coord_sys = coord_sys if coord_sys else start_point._coord_sys gammas = [Function('f_%d' % i)(param) for i in range( start_point._coord_sys.dim)] arbitrary_p = Point(coord_sys, gammas) coord_functions = coord_sys.coord_functions() equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p)) for cf in coord_functions] init_cond = [simplify(cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point)) for cf in coord_functions] return equations, init_cond ############################################################################### # Helpers ############################################################################### def dummyfy(args, exprs): # TODO Is this a good idea? d_args = Matrix([s.as_dummy() for s in args]) reps = dict(zip(args, d_args)) d_exprs = Matrix([_sympify(expr).subs(reps) for expr in exprs]) return d_args, d_exprs ############################################################################### # Helpers ############################################################################### def contravariant_order(expr, _strict=False): """Return the contravariant order of an expression. Examples ======== >>> from sympy.diffgeom import contravariant_order >>> from sympy.diffgeom.rn import R2 >>> from sympy.abc import a >>> contravariant_order(a) 0 >>> contravariant_order(a*R2.x + 2) 0 >>> contravariant_order(a*R2.x*R2.e_y + R2.e_x) 1 """ # TODO move some of this to class methods. # TODO rewrite using the .as_blah_blah methods if isinstance(expr, Add): orders = [contravariant_order(e) for e in expr.args] if len(set(orders)) != 1: raise ValueError('Misformed expression containing contravariant fields of varying order.') return orders[0] elif isinstance(expr, Mul): orders = [contravariant_order(e) for e in expr.args] not_zero = [o for o in orders if o != 0] if len(not_zero) > 1: raise ValueError('Misformed expression containing multiplication between vectors.') return 0 if not not_zero else not_zero[0] elif isinstance(expr, Pow): if covariant_order(expr.base) or covariant_order(expr.exp): raise ValueError( 'Misformed expression containing a power of a vector.') return 0 elif isinstance(expr, BaseVectorField): return 1 elif isinstance(expr, TensorProduct): return sum(contravariant_order(a) for a in expr.args) elif not _strict or expr.atoms(BaseScalarField): return 0 else: # If it does not contain anything related to the diffgeom module and it is _strict return -1 def covariant_order(expr, _strict=False): """Return the covariant order of an expression. Examples ======== >>> from sympy.diffgeom import covariant_order >>> from sympy.diffgeom.rn import R2 >>> from sympy.abc import a >>> covariant_order(a) 0 >>> covariant_order(a*R2.x + 2) 0 >>> covariant_order(a*R2.x*R2.dy + R2.dx) 1 """ # TODO move some of this to class methods. # TODO rewrite using the .as_blah_blah methods if isinstance(expr, Add): orders = [covariant_order(e) for e in expr.args] if len(set(orders)) != 1: raise ValueError('Misformed expression containing form fields of varying order.') return orders[0] elif isinstance(expr, Mul): orders = [covariant_order(e) for e in expr.args] not_zero = [o for o in orders if o != 0] if len(not_zero) > 1: raise ValueError('Misformed expression containing multiplication between forms.') return 0 if not not_zero else not_zero[0] elif isinstance(expr, Pow): if covariant_order(expr.base) or covariant_order(expr.exp): raise ValueError( 'Misformed expression containing a power of a form.') return 0 elif isinstance(expr, Differential): return covariant_order(*expr.args) + 1 elif isinstance(expr, TensorProduct): return sum(covariant_order(a) for a in expr.args) elif not _strict or expr.atoms(BaseScalarField): return 0 else: # If it does not contain anything related to the diffgeom module and it is _strict return -1 ############################################################################### # Coordinate transformation functions ############################################################################### def vectors_in_basis(expr, to_sys): """Transform all base vectors in base vectors of a specified coord basis. While the new base vectors are in the new coordinate system basis, any coefficients are kept in the old system. Examples ======== >>> from sympy.diffgeom import vectors_in_basis >>> from sympy.diffgeom.rn import R2_r, R2_p >>> vectors_in_basis(R2_r.e_x, R2_p) -y*e_theta/(x**2 + y**2) + x*e_rho/sqrt(x**2 + y**2) >>> vectors_in_basis(R2_p.e_r, R2_r) sin(theta)*e_y + cos(theta)*e_x """ vectors = list(expr.atoms(BaseVectorField)) new_vectors = [] for v in vectors: cs = v._coord_sys jac = cs.jacobian(to_sys, cs.coord_functions()) new = (jac.T*Matrix(to_sys.base_vectors()))[v._index] new_vectors.append(new) return expr.subs(list(zip(vectors, new_vectors))) ############################################################################### # Coordinate-dependent functions ############################################################################### def twoform_to_matrix(expr): """Return the matrix representing the twoform. For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`, where `e_i` is the i-th base vector field for the coordinate system in which the expression of `w` is given. Examples ======== >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import twoform_to_matrix, TensorProduct >>> TP = TensorProduct >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) Matrix([ [1, 0], [0, 1]]) >>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) Matrix([ [x, 0], [0, 1]]) >>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2) Matrix([ [ 1, 0], [-1/2, 1]]) """ if covariant_order(expr) != 2 or contravariant_order(expr): raise ValueError('The input expression is not a two-form.') coord_sys = _find_coords(expr) if len(coord_sys) != 1: raise ValueError('The input expression concerns more than one ' 'coordinate systems, hence there is no unambiguous ' 'way to choose a coordinate system for the matrix.') coord_sys = coord_sys.pop() vectors = coord_sys.base_vectors() expr = expr.expand() matrix_content = [[expr.rcall(v1, v2) for v1 in vectors] for v2 in vectors] return Matrix(matrix_content) def metric_to_Christoffel_1st(expr): """Return the nested list of Christoffel symbols for the given metric. This returns the Christoffel symbol of first kind that represents the Levi-Civita connection for the given metric. Examples ======== >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Christoffel_1st, TensorProduct >>> TP = TensorProduct >>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]] """ matrix = twoform_to_matrix(expr) if not matrix.is_symmetric(): raise ValueError( 'The two-form representing the metric is not symmetric.') coord_sys = _find_coords(expr).pop() deriv_matrices = [matrix.applyfunc(lambda a: d(a)) for d in coord_sys.base_vectors()] indices = list(range(coord_sys.dim)) christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2 for k in indices] for j in indices] for i in indices] return ImmutableDenseNDimArray(christoffel) def metric_to_Christoffel_2nd(expr): """Return the nested list of Christoffel symbols for the given metric. This returns the Christoffel symbol of second kind that represents the Levi-Civita connection for the given metric. Examples ======== >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct >>> TP = TensorProduct >>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[0, 0], [0, 0]], [[0, 0], [0, 0]]] >>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]] """ ch_1st = metric_to_Christoffel_1st(expr) coord_sys = _find_coords(expr).pop() indices = list(range(coord_sys.dim)) # XXX workaround, inverting a matrix does not work if it contains non # symbols #matrix = twoform_to_matrix(expr).inv() matrix = twoform_to_matrix(expr) s_fields = set() for e in matrix: s_fields.update(e.atoms(BaseScalarField)) s_fields = list(s_fields) dums = coord_sys.symbols matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields))) # XXX end of workaround christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices]) for k in indices] for j in indices] for i in indices] return ImmutableDenseNDimArray(christoffel) def metric_to_Riemann_components(expr): """Return the components of the Riemann tensor expressed in a given basis. Given a metric it calculates the components of the Riemann tensor in the canonical basis of the coordinate system in which the metric expression is given. Examples ======== >>> from sympy import exp >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Riemann_components, TensorProduct >>> TP = TensorProduct >>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]] >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ R2.r**2*TP(R2.dtheta, R2.dtheta) >>> non_trivial_metric exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta) >>> riemann = metric_to_Riemann_components(non_trivial_metric) >>> riemann[0, :, :, :] [[[0, 0], [0, 0]], [[0, exp(-2*rho)*rho], [-exp(-2*rho)*rho, 0]]] >>> riemann[1, :, :, :] [[[0, -1/rho], [1/rho, 0]], [[0, 0], [0, 0]]] """ ch_2nd = metric_to_Christoffel_2nd(expr) coord_sys = _find_coords(expr).pop() indices = list(range(coord_sys.dim)) deriv_ch = [[[[d(ch_2nd[i, j, k]) for d in coord_sys.base_vectors()] for k in indices] for j in indices] for i in indices] riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu] for nu in indices] for mu in indices] for sig in indices] for rho in indices] riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices]) for nu in indices] for mu in indices] for sig in indices] for rho in indices] riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu] for nu in indices] for mu in indices] for sig in indices] for rho in indices] return ImmutableDenseNDimArray(riemann) def metric_to_Ricci_components(expr): """Return the components of the Ricci tensor expressed in a given basis. Given a metric it calculates the components of the Ricci tensor in the canonical basis of the coordinate system in which the metric expression is given. Examples ======== >>> from sympy import exp >>> from sympy.diffgeom.rn import R2 >>> from sympy.diffgeom import metric_to_Ricci_components, TensorProduct >>> TP = TensorProduct >>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) [[0, 0], [0, 0]] >>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \ R2.r**2*TP(R2.dtheta, R2.dtheta) >>> non_trivial_metric exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta) >>> metric_to_Ricci_components(non_trivial_metric) [[1/rho, 0], [0, exp(-2*rho)*rho]] """ riemann = metric_to_Riemann_components(expr) coord_sys = _find_coords(expr).pop() indices = list(range(coord_sys.dim)) ricci = [[Add(*[riemann[k, i, k, j] for k in indices]) for j in indices] for i in indices] return ImmutableDenseNDimArray(ricci) ############################################################################### # Classes for deprecation ############################################################################### class _deprecated_container: # This class gives deprecation warning. # When deprecated features are completely deleted, this should be removed as well. # See https://github.com/sympy/sympy/pull/19368 def __init__(self, feature, useinstead, issue, version, data): super().__init__(data) self.feature = feature self.useinstead = useinstead self.issue = issue self.version = version def warn(self): SymPyDeprecationWarning( feature=self.feature, useinstead=self.useinstead, issue=self.issue, deprecated_since_version=self.version).warn() def __iter__(self): self.warn() return super().__iter__() def __getitem__(self, key): self.warn() return super().__getitem__(key) def __contains__(self, key): self.warn() return super().__contains__(key) class _deprecated_list(_deprecated_container, list): pass class _deprecated_dict(_deprecated_container, dict): pass
e53f7369ef27e3d71876029c0aa7e998298b1245c343b932e895e5369ad0c2a2
from sympy.printing.pycode import PythonCodePrinter """ This module collects utilities for rendering Python code. """ def render_as_module(content, standard='python3'): """Renders python code as a module (with the required imports). Parameters ========== standard : See the parameter ``standard`` in :meth:`sympy.printing.pycode.pycode` """ printer = PythonCodePrinter({'standard':standard}) pystr = printer.doprint(content) if printer._settings['fully_qualified_modules']: module_imports_str = '\n'.join('import %s' % k for k in printer.module_imports) else: module_imports_str = '\n'.join(['from %s import %s' % (k, ', '.join(v)) for k, v in printer.module_imports.items()]) return module_imports_str + '\n\n' + pystr
eace8ede3cbd305dffa63f7669c16eb3ddbfa2baf5639f4cf398849a2699fb4f
import math from sympy import Interval from sympy.calculus.singularities import is_increasing, is_decreasing from sympy.codegen.rewriting import Optimization from sympy.core.function import UndefinedFunction """ This module collects classes useful for approimate rewriting of expressions. This can be beneficial when generating numeric code for which performance is of greater importance than precision (e.g. for preconditioners used in iterative methods). """ class SumApprox(Optimization): """ Approximates sum by neglecting small terms. Explanation =========== If terms are expressions which can be determined to be monotonic, then bounds for those expressions are added. Parameters ========== bounds : dict Mapping expressions to length 2 tuple of bounds (low, high). reltol : number Threshold for when to ignore a term. Taken relative to the largest lower bound among bounds. Examples ======== >>> from sympy import exp >>> from sympy.abc import x, y, z >>> from sympy.codegen.rewriting import optimize >>> from sympy.codegen.approximations import SumApprox >>> bounds = {x: (-1, 1), y: (1000, 2000), z: (-10, 3)} >>> sum_approx3 = SumApprox(bounds, reltol=1e-3) >>> sum_approx2 = SumApprox(bounds, reltol=1e-2) >>> sum_approx1 = SumApprox(bounds, reltol=1e-1) >>> expr = 3*(x + y + exp(z)) >>> optimize(expr, [sum_approx3]) 3*(x + y + exp(z)) >>> optimize(expr, [sum_approx2]) 3*y + 3*exp(z) >>> optimize(expr, [sum_approx1]) 3*y """ def __init__(self, bounds, reltol, **kwargs): super().__init__(**kwargs) self.bounds = bounds self.reltol = reltol def __call__(self, expr): return expr.factor().replace(self.query, lambda arg: self.value(arg)) def query(self, expr): return expr.is_Add def value(self, add): for term in add.args: if term.is_number or term in self.bounds or len(term.free_symbols) != 1: continue fs, = term.free_symbols if fs not in self.bounds: continue intrvl = Interval(*self.bounds[fs]) if is_increasing(term, intrvl, fs): self.bounds[term] = ( term.subs({fs: self.bounds[fs][0]}), term.subs({fs: self.bounds[fs][1]}) ) elif is_decreasing(term, intrvl, fs): self.bounds[term] = ( term.subs({fs: self.bounds[fs][1]}), term.subs({fs: self.bounds[fs][0]}) ) else: return add if all(term.is_number or term in self.bounds for term in add.args): bounds = [(term, term) if term.is_number else self.bounds[term] for term in add.args] largest_abs_guarantee = 0 for lo, hi in bounds: if lo <= 0 <= hi: continue largest_abs_guarantee = max(largest_abs_guarantee, min(abs(lo), abs(hi))) new_terms = [] for term, (lo, hi) in zip(add.args, bounds): if max(abs(lo), abs(hi)) >= largest_abs_guarantee*self.reltol: new_terms.append(term) return add.func(*new_terms) else: return add class SeriesApprox(Optimization): """ Approximates functions by expanding them as a series. Parameters ========== bounds : dict Mapping expressions to length 2 tuple of bounds (low, high). reltol : number Threshold for when to ignore a term. Taken relative to the largest lower bound among bounds. max_order : int Largest order to include in series expansion n_point_checks : int (even) The validity of an expansion (with respect to reltol) is checked at discrete points (linearly spaced over the bounds of the variable). The number of points used in this numerical check is given by this number. Examples ======== >>> from sympy import sin, pi >>> from sympy.abc import x, y >>> from sympy.codegen.rewriting import optimize >>> from sympy.codegen.approximations import SeriesApprox >>> bounds = {x: (-.1, .1), y: (pi-1, pi+1)} >>> series_approx2 = SeriesApprox(bounds, reltol=1e-2) >>> series_approx3 = SeriesApprox(bounds, reltol=1e-3) >>> series_approx8 = SeriesApprox(bounds, reltol=1e-8) >>> expr = sin(x)*sin(y) >>> optimize(expr, [series_approx2]) x*(-y + (y - pi)**3/6 + pi) >>> optimize(expr, [series_approx3]) (-x**3/6 + x)*sin(y) >>> optimize(expr, [series_approx8]) sin(x)*sin(y) """ def __init__(self, bounds, reltol, max_order=4, n_point_checks=4, **kwargs): super().__init__(**kwargs) self.bounds = bounds self.reltol = reltol self.max_order = max_order if n_point_checks % 2 == 1: raise ValueError("Checking the solution at expansion point is not helpful") self.n_point_checks = n_point_checks self._prec = math.ceil(-math.log10(self.reltol)) def __call__(self, expr): return expr.factor().replace(self.query, lambda arg: self.value(arg)) def query(self, expr): return (expr.is_Function and not isinstance(expr, UndefinedFunction) and len(expr.args) == 1) def value(self, fexpr): free_symbols = fexpr.free_symbols if len(free_symbols) != 1: return fexpr symb, = free_symbols if symb not in self.bounds: return fexpr lo, hi = self.bounds[symb] x0 = (lo + hi)/2 cheapest = None for n in range(self.max_order+1, 0, -1): fseri = fexpr.series(symb, x0=x0, n=n).removeO() n_ok = True for idx in range(self.n_point_checks): x = lo + idx*(hi - lo)/(self.n_point_checks - 1) val = fseri.xreplace({symb: x}) ref = fexpr.xreplace({symb: x}) if abs((1 - val/ref).evalf(self._prec)) > self.reltol: n_ok = False break if n_ok: cheapest = fseri else: break if cheapest is None: return fexpr else: return cheapest
1b726e7f0a6fc5b98b52b5cdffd136227438dc73fede2e1a5028956ea9017d50
""" AST nodes specific to Fortran. The functions defined in this module allows the user to express functions such as ``dsign`` as a SymPy function for symbolic manipulation. """ from sympy.codegen.ast import ( Attribute, CodeBlock, FunctionCall, Node, none, String, Token, _mk_Tuple, Variable ) from sympy.core.basic import Basic from sympy.core.containers import Tuple from sympy.core.expr import Expr from sympy.core.function import Function from sympy.core.numbers import Float, Integer from sympy.core.sympify import sympify from sympy.logic import true, false from sympy.utilities.iterables import iterable pure = Attribute('pure') elemental = Attribute('elemental') # (all elemental procedures are also pure) intent_in = Attribute('intent_in') intent_out = Attribute('intent_out') intent_inout = Attribute('intent_inout') allocatable = Attribute('allocatable') class Program(Token): """ Represents a 'program' block in Fortran. Examples ======== >>> from sympy.codegen.ast import Print >>> from sympy.codegen.fnodes import Program >>> prog = Program('myprogram', [Print([42])]) >>> from sympy.printing import fcode >>> print(fcode(prog, source_format='free')) program myprogram print *, 42 end program """ __slots__ = ('name', 'body') _construct_name = String _construct_body = staticmethod(lambda body: CodeBlock(*body)) class use_rename(Token): """ Represents a renaming in a use statement in Fortran. Examples ======== >>> from sympy.codegen.fnodes import use_rename, use >>> from sympy.printing import fcode >>> ren = use_rename("thingy", "convolution2d") >>> print(fcode(ren, source_format='free')) thingy => convolution2d >>> full = use('signallib', only=['snr', ren]) >>> print(fcode(full, source_format='free')) use signallib, only: snr, thingy => convolution2d """ __slots__ = ('local', 'original') _construct_local = String _construct_original = String def _name(arg): if hasattr(arg, 'name'): return arg.name else: return String(arg) class use(Token): """ Represents a use statement in Fortran. Examples ======== >>> from sympy.codegen.fnodes import use >>> from sympy.printing import fcode >>> fcode(use('signallib'), source_format='free') 'use signallib' >>> fcode(use('signallib', [('metric', 'snr')]), source_format='free') 'use signallib, metric => snr' >>> fcode(use('signallib', only=['snr', 'convolution2d']), source_format='free') 'use signallib, only: snr, convolution2d' """ __slots__ = ('namespace', 'rename', 'only') defaults = {'rename': none, 'only': none} _construct_namespace = staticmethod(_name) _construct_rename = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else use_rename(*arg) for arg in args])) _construct_only = staticmethod(lambda args: Tuple(*[arg if isinstance(arg, use_rename) else _name(arg) for arg in args])) class Module(Token): """ Represents a module in Fortran. Examples ======== >>> from sympy.codegen.fnodes import Module >>> from sympy.printing import fcode >>> print(fcode(Module('signallib', ['implicit none'], []), source_format='free')) module signallib implicit none <BLANKLINE> contains <BLANKLINE> <BLANKLINE> end module """ __slots__ = ('name', 'declarations', 'definitions') defaults = {'declarations': Tuple()} _construct_name = String _construct_declarations = staticmethod(lambda arg: CodeBlock(*arg)) _construct_definitions = staticmethod(lambda arg: CodeBlock(*arg)) class Subroutine(Node): """ Represents a subroutine in Fortran. Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import Print >>> from sympy.codegen.fnodes import Subroutine >>> from sympy.printing import fcode >>> x, y = symbols('x y', real=True) >>> sub = Subroutine('mysub', [x, y], [Print([x**2 + y**2, x*y])]) >>> print(fcode(sub, source_format='free', standard=2003)) subroutine mysub(x, y) real*8 :: x real*8 :: y print *, x**2 + y**2, x*y end subroutine """ __slots__ = ('name', 'parameters', 'body', 'attrs') _construct_name = String _construct_parameters = staticmethod(lambda params: Tuple(*map(Variable.deduced, params))) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) class SubroutineCall(Token): """ Represents a call to a subroutine in Fortran. Examples ======== >>> from sympy.codegen.fnodes import SubroutineCall >>> from sympy.printing import fcode >>> fcode(SubroutineCall('mysub', 'x y'.split())) ' call mysub(x, y)' """ __slots__ = ('name', 'subroutine_args') _construct_name = staticmethod(_name) _construct_subroutine_args = staticmethod(_mk_Tuple) class Do(Token): """ Represents a Do loop in in Fortran. Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import aug_assign, Print >>> from sympy.codegen.fnodes import Do >>> from sympy.printing import fcode >>> i, n = symbols('i n', integer=True) >>> r = symbols('r', real=True) >>> body = [aug_assign(r, '+', 1/i), Print([i, r])] >>> do1 = Do(body, i, 1, n) >>> print(fcode(do1, source_format='free')) do i = 1, n r = r + 1d0/i print *, i, r end do >>> do2 = Do(body, i, 1, n, 2) >>> print(fcode(do2, source_format='free')) do i = 1, n, 2 r = r + 1d0/i print *, i, r end do """ __slots__ = ('body', 'counter', 'first', 'last', 'step', 'concurrent') defaults = {'step': Integer(1), 'concurrent': false} _construct_body = staticmethod(lambda body: CodeBlock(*body)) _construct_counter = staticmethod(sympify) _construct_first = staticmethod(sympify) _construct_last = staticmethod(sympify) _construct_step = staticmethod(sympify) _construct_concurrent = staticmethod(lambda arg: true if arg else false) class ArrayConstructor(Token): """ Represents an array constructor. Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import ArrayConstructor >>> ac = ArrayConstructor([1, 2, 3]) >>> fcode(ac, standard=95, source_format='free') '(/1, 2, 3/)' >>> fcode(ac, standard=2003, source_format='free') '[1, 2, 3]' """ __slots__ = ('elements',) _construct_elements = staticmethod(_mk_Tuple) class ImpliedDoLoop(Token): """ Represents an implied do loop in Fortran. Examples ======== >>> from sympy import Symbol, fcode >>> from sympy.codegen.fnodes import ImpliedDoLoop, ArrayConstructor >>> i = Symbol('i', integer=True) >>> idl = ImpliedDoLoop(i**3, i, -3, 3, 2) # -27, -1, 1, 27 >>> ac = ArrayConstructor([-28, idl, 28]) # -28, -27, -1, 1, 27, 28 >>> fcode(ac, standard=2003, source_format='free') '[-28, (i**3, i = -3, 3, 2), 28]' """ __slots__ = ('expr', 'counter', 'first', 'last', 'step') defaults = {'step': Integer(1)} _construct_expr = staticmethod(sympify) _construct_counter = staticmethod(sympify) _construct_first = staticmethod(sympify) _construct_last = staticmethod(sympify) _construct_step = staticmethod(sympify) class Extent(Basic): """ Represents a dimension extent. Examples ======== >>> from sympy.codegen.fnodes import Extent >>> e = Extent(-3, 3) # -3, -2, -1, 0, 1, 2, 3 >>> from sympy.printing import fcode >>> fcode(e, source_format='free') '-3:3' >>> from sympy.codegen.ast import Variable, real >>> from sympy.codegen.fnodes import dimension, intent_out >>> dim = dimension(e, e) >>> arr = Variable('x', real, attrs=[dim, intent_out]) >>> fcode(arr.as_Declaration(), source_format='free', standard=2003) 'real*8, dimension(-3:3, -3:3), intent(out) :: x' """ def __new__(cls, *args): if len(args) == 2: low, high = args return Basic.__new__(cls, sympify(low), sympify(high)) elif len(args) == 0 or (len(args) == 1 and args[0] in (':', None)): return Basic.__new__(cls) # assumed shape else: raise ValueError("Expected 0 or 2 args (or one argument == None or ':')") def _sympystr(self, printer): if len(self.args) == 0: return ':' return '%d:%d' % self.args assumed_extent = Extent() # or Extent(':'), Extent(None) def dimension(*args): """ Creates a 'dimension' Attribute with (up to 7) extents. Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import dimension, intent_in >>> dim = dimension('2', ':') # 2 rows, runtime determined number of columns >>> from sympy.codegen.ast import Variable, integer >>> arr = Variable('a', integer, attrs=[dim, intent_in]) >>> fcode(arr.as_Declaration(), source_format='free', standard=2003) 'integer*4, dimension(2, :), intent(in) :: a' """ if len(args) > 7: raise ValueError("Fortran only supports up to 7 dimensional arrays") parameters = [] for arg in args: if isinstance(arg, Extent): parameters.append(arg) elif isinstance(arg, str): if arg == ':': parameters.append(Extent()) else: parameters.append(String(arg)) elif iterable(arg): parameters.append(Extent(*arg)) else: parameters.append(sympify(arg)) if len(args) == 0: raise ValueError("Need at least one dimension") return Attribute('dimension', parameters) assumed_size = dimension('*') def array(symbol, dim, intent=None, *, attrs=(), value=None, type=None): """ Convenience function for creating a Variable instance for a Fortran array. Parameters ========== symbol : symbol dim : Attribute or iterable If dim is an ``Attribute`` it need to have the name 'dimension'. If it is not an ``Attribute``, then it is passsed to :func:`dimension` as ``*dim`` intent : str One of: 'in', 'out', 'inout' or None \\*\\*kwargs: Keyword arguments for ``Variable`` ('type' & 'value') Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.ast import integer, real >>> from sympy.codegen.fnodes import array >>> arr = array('a', '*', 'in', type=integer) >>> print(fcode(arr.as_Declaration(), source_format='free', standard=2003)) integer*4, dimension(*), intent(in) :: a >>> x = array('x', [3, ':', ':'], intent='out', type=real) >>> print(fcode(x.as_Declaration(value=1), source_format='free', standard=2003)) real*8, dimension(3, :, :), intent(out) :: x = 1 """ if isinstance(dim, Attribute): if str(dim.name) != 'dimension': raise ValueError("Got an unexpected Attribute argument as dim: %s" % str(dim)) else: dim = dimension(*dim) attrs = list(attrs) + [dim] if intent is not None: if intent not in (intent_in, intent_out, intent_inout): intent = {'in': intent_in, 'out': intent_out, 'inout': intent_inout}[intent] attrs.append(intent) if type is None: return Variable.deduced(symbol, value=value, attrs=attrs) else: return Variable(symbol, type, value=value, attrs=attrs) def _printable(arg): return String(arg) if isinstance(arg, str) else sympify(arg) def allocated(array): """ Creates an AST node for a function call to Fortran's "allocated(...)" Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import allocated >>> alloc = allocated('x') >>> fcode(alloc, source_format='free') 'allocated(x)' """ return FunctionCall('allocated', [_printable(array)]) def lbound(array, dim=None, kind=None): """ Creates an AST node for a function call to Fortran's "lbound(...)" Parameters ========== array : Symbol or String dim : expr kind : expr Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import lbound >>> lb = lbound('arr', dim=2) >>> fcode(lb, source_format='free') 'lbound(arr, 2)' """ return FunctionCall( 'lbound', [_printable(array)] + ([_printable(dim)] if dim else []) + ([_printable(kind)] if kind else []) ) def ubound(array, dim=None, kind=None): return FunctionCall( 'ubound', [_printable(array)] + ([_printable(dim)] if dim else []) + ([_printable(kind)] if kind else []) ) def shape(source, kind=None): """ Creates an AST node for a function call to Fortran's "shape(...)" Parameters ========== source : Symbol or String kind : expr Examples ======== >>> from sympy.printing import fcode >>> from sympy.codegen.fnodes import shape >>> shp = shape('x') >>> fcode(shp, source_format='free') 'shape(x)' """ return FunctionCall( 'shape', [_printable(source)] + ([_printable(kind)] if kind else []) ) def size(array, dim=None, kind=None): """ Creates an AST node for a function call to Fortran's "size(...)" Examples ======== >>> from sympy import Symbol >>> from sympy.printing import fcode >>> from sympy.codegen.ast import FunctionDefinition, real, Return >>> from sympy.codegen.fnodes import array, sum_, size >>> a = Symbol('a', real=True) >>> body = [Return((sum_(a**2)/size(a))**.5)] >>> arr = array(a, dim=[':'], intent='in') >>> fd = FunctionDefinition(real, 'rms', [arr], body) >>> print(fcode(fd, source_format='free', standard=2003)) real*8 function rms(a) real*8, dimension(:), intent(in) :: a rms = sqrt(sum(a**2)*1d0/size(a)) end function """ return FunctionCall( 'size', [_printable(array)] + ([_printable(dim)] if dim else []) + ([_printable(kind)] if kind else []) ) def reshape(source, shape, pad=None, order=None): """ Creates an AST node for a function call to Fortran's "reshape(...)" Parameters ========== source : Symbol or String shape : ArrayExpr """ return FunctionCall( 'reshape', [_printable(source), _printable(shape)] + ([_printable(pad)] if pad else []) + ([_printable(order)] if pad else []) ) def bind_C(name=None): """ Creates an Attribute ``bind_C`` with a name. Parameters ========== name : str Examples ======== >>> from sympy import Symbol >>> from sympy.printing import fcode >>> from sympy.codegen.ast import FunctionDefinition, real, Return >>> from sympy.codegen.fnodes import array, sum_, bind_C >>> a = Symbol('a', real=True) >>> s = Symbol('s', integer=True) >>> arr = array(a, dim=[s], intent='in') >>> body = [Return((sum_(a**2)/s)**.5)] >>> fd = FunctionDefinition(real, 'rms', [arr, s], body, attrs=[bind_C('rms')]) >>> print(fcode(fd, source_format='free', standard=2003)) real*8 function rms(a, s) bind(C, name="rms") real*8, dimension(s), intent(in) :: a integer*4 :: s rms = sqrt(sum(a**2)/s) end function """ return Attribute('bind_C', [String(name)] if name else []) class GoTo(Token): """ Represents a goto statement in Fortran Examples ======== >>> from sympy.codegen.fnodes import GoTo >>> go = GoTo([10, 20, 30], 'i') >>> from sympy.printing import fcode >>> fcode(go, source_format='free') 'go to (10, 20, 30), i' """ __slots__ = ('labels', 'expr') defaults = {'expr': none} _construct_labels = staticmethod(_mk_Tuple) _construct_expr = staticmethod(sympify) class FortranReturn(Token): """ AST node explicitly mapped to a fortran "return". Explanation =========== Because a return statement in fortran is different from C, and in order to aid reuse of our codegen ASTs the ordinary ``.codegen.ast.Return`` is interpreted as assignment to the result variable of the function. If one for some reason needs to generate a fortran RETURN statement, this node should be used. Examples ======== >>> from sympy.codegen.fnodes import FortranReturn >>> from sympy.printing import fcode >>> fcode(FortranReturn('x')) ' return x' """ __slots__ = ('return_value',) defaults = {'return_value': none} _construct_return_value = staticmethod(sympify) class FFunction(Function): _required_standard = 77 def _fcode(self, printer): name = self.__class__.__name__ if printer._settings['standard'] < self._required_standard: raise NotImplementedError("%s requires Fortran %d or newer" % (name, self._required_standard)) return '{}({})'.format(name, ', '.join(map(printer._print, self.args))) class F95Function(FFunction): _required_standard = 95 class isign(FFunction): """ Fortran sign intrinsic for integer arguments. """ nargs = 2 class dsign(FFunction): """ Fortran sign intrinsic for double precision arguments. """ nargs = 2 class cmplx(FFunction): """ Fortran complex conversion function. """ nargs = 2 # may be extended to (2, 3) at a later point class kind(FFunction): """ Fortran kind function. """ nargs = 1 class merge(F95Function): """ Fortran merge function """ nargs = 3 class _literal(Float): _token = None # type: str _decimals = None # type: int def _fcode(self, printer, *args, **kwargs): mantissa, sgnd_ex = ('%.{}e'.format(self._decimals) % self).split('e') mantissa = mantissa.strip('0').rstrip('.') ex_sgn, ex_num = sgnd_ex[0], sgnd_ex[1:].lstrip('0') ex_sgn = '' if ex_sgn == '+' else ex_sgn return (mantissa or '0') + self._token + ex_sgn + (ex_num or '0') class literal_sp(_literal): """ Fortran single precision real literal """ _token = 'e' _decimals = 9 class literal_dp(_literal): """ Fortran double precision real literal """ _token = 'd' _decimals = 17 class sum_(Token, Expr): __slots__ = ('array', 'dim', 'mask') defaults = {'dim': none, 'mask': none} _construct_array = staticmethod(sympify) _construct_dim = staticmethod(sympify) class product_(Token, Expr): __slots__ = ('array', 'dim', 'mask') defaults = {'dim': none, 'mask': none} _construct_array = staticmethod(sympify) _construct_dim = staticmethod(sympify)
926dd53369227fbbfcccda12b9f07fcf1f23366bd91e0849bd189d1056aeb90d
from sympy.core.function import Add, ArgumentIndexError, Function from sympy.core.power import Pow from sympy.core.singleton import S from sympy.functions.elementary.exponential import exp, log from sympy.utilities import default_sort_key def _logaddexp(x1, x2, *, evaluate=True): return log(Add(exp(x1, evaluate=evaluate), exp(x2, evaluate=evaluate), evaluate=evaluate)) _two = S.One*2 _ln2 = log(_two) def _lb(x, *, evaluate=True): return log(x, evaluate=evaluate)/_ln2 def _exp2(x, *, evaluate=True): return Pow(_two, x, evaluate=evaluate) def _logaddexp2(x1, x2, *, evaluate=True): return _lb(Add(_exp2(x1, evaluate=evaluate), _exp2(x2, evaluate=evaluate), evaluate=evaluate)) class logaddexp(Function): """ Logarithm of the sum of exponentiations of the inputs. Helper class for use with e.g. numpy.logaddexp See Also ======== https://numpy.org/doc/stable/reference/generated/numpy.logaddexp.html """ nargs = 2 def __new__(cls, *args): return Function.__new__(cls, *sorted(args, key=default_sort_key)) def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: wrt, other = self.args elif argindex == 2: other, wrt = self.args else: raise ArgumentIndexError(self, argindex) return S.One/(S.One + exp(other-wrt)) def _eval_rewrite_as_log(self, x1, x2, **kwargs): return _logaddexp(x1, x2) def _eval_evalf(self, *args, **kwargs): return self.rewrite(log).evalf(*args, **kwargs) def _eval_simplify(self, *args, **kwargs): a, b = map(lambda x: x.simplify(**kwargs), self.args) candidate = _logaddexp(a, b) if candidate != _logaddexp(a, b, evaluate=False): return candidate else: return logaddexp(a, b) class logaddexp2(Function): """ Logarithm of the sum of exponentiations of the inputs in base-2. Helper class for use with e.g. numpy.logaddexp2 See Also ======== https://numpy.org/doc/stable/reference/generated/numpy.logaddexp2.html """ nargs = 2 def __new__(cls, *args): return Function.__new__(cls, *sorted(args, key=default_sort_key)) def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: wrt, other = self.args elif argindex == 2: other, wrt = self.args else: raise ArgumentIndexError(self, argindex) return S.One/(S.One + _exp2(other-wrt)) def _eval_rewrite_as_log(self, x1, x2, **kwargs): return _logaddexp2(x1, x2) def _eval_evalf(self, *args, **kwargs): return self.rewrite(log).evalf(*args, **kwargs) def _eval_simplify(self, *args, **kwargs): a, b = map(lambda x: x.simplify(**kwargs).factor(), self.args) candidate = _logaddexp2(a, b) if candidate != _logaddexp2(a, b, evaluate=False): return candidate else: return logaddexp2(a, b)
7852fe3c5e082e054762c3f7705af267aa51a60f023477d8809516a778e1a4cc
import bisect import itertools from functools import reduce from itertools import accumulate from collections import defaultdict from sympy import Indexed, IndexedBase, Tuple, Sum, Add, S, Integer, diagonalize_vector, DiagMatrix from sympy.combinatorics import Permutation from sympy.combinatorics.permutations import _af_invert from sympy.core.basic import Basic from sympy.core.compatibility import default_sort_key from sympy.core.mul import Mul from sympy.core.sympify import _sympify from sympy.functions.special.tensor_functions import KroneckerDelta from sympy.matrices.expressions import (MatAdd, MatMul, Trace, Transpose, MatrixSymbol) from sympy.matrices.expressions.matexpr import MatrixExpr, MatrixElement from sympy.tensor.array import NDimArray class _CodegenArrayAbstract(Basic): @property def subranks(self): """ Returns the ranks of the objects in the uppermost tensor product inside the current object. In case no tensor products are contained, return the atomic ranks. Examples ======== >>> from sympy.codegen.array_utils import CodegenArrayTensorProduct, CodegenArrayContraction >>> from sympy import MatrixSymbol >>> M = MatrixSymbol("M", 3, 3) >>> N = MatrixSymbol("N", 3, 3) >>> P = MatrixSymbol("P", 3, 3) Important: do not confuse the rank of the matrix with the rank of an array. >>> tp = CodegenArrayTensorProduct(M, N, P) >>> tp.subranks [2, 2, 2] >>> co = CodegenArrayContraction(tp, (1, 2), (3, 4)) >>> co.subranks [2, 2, 2] """ return self._subranks[:] def subrank(self): """ The sum of ``subranks``. """ return sum(self.subranks) @property def shape(self): return self._shape class CodegenArrayContraction(_CodegenArrayAbstract): r""" This class is meant to represent contractions of arrays in a form easily processable by the code printers. """ def __new__(cls, expr, *contraction_indices, **kwargs): contraction_indices = _sort_contraction_indices(contraction_indices) expr = _sympify(expr) if len(contraction_indices) == 0: return expr if isinstance(expr, CodegenArrayContraction): return cls._flatten(expr, *contraction_indices) if isinstance(expr, CodegenArrayPermuteDims): return cls._handle_nested_permute_dims(expr, *contraction_indices) if isinstance(expr, CodegenArrayTensorProduct): expr, contraction_indices = cls._sort_fully_contracted_args(expr, contraction_indices) if isinstance(expr, CodegenArrayDiagonal): return cls._handle_nested_diagonal(expr, *contraction_indices) obj = Basic.__new__(cls, expr, *contraction_indices) obj._subranks = _get_subranks(expr) obj._mapping = _get_mapping_from_subranks(obj._subranks) free_indices_to_position = {i: i for i in range(sum(obj._subranks)) if all([i not in cind for cind in contraction_indices])} obj._free_indices_to_position = free_indices_to_position shape = expr.shape cls._validate(expr, *contraction_indices) if shape: shape = tuple(shp for i, shp in enumerate(shape) if not any(i in j for j in contraction_indices)) obj._shape = shape return obj def __mul__(self, other): if other == 1: return self else: raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.") def __rmul__(self, other): if other == 1: return self else: raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.") @staticmethod def _validate(expr, *contraction_indices): shape = expr.shape if shape is None: return # Check that no contraction happens when the shape is mismatched: for i in contraction_indices: if len({shape[j] for j in i if shape[j] != -1}) != 1: raise ValueError("contracting indices of different dimensions") @classmethod def _push_indices_down(cls, contraction_indices, indices): flattened_contraction_indices = [j for i in contraction_indices for j in i] flattened_contraction_indices.sort() transform = _build_push_indices_down_func_transformation(flattened_contraction_indices) return _apply_recursively_over_nested_lists(transform, indices) @classmethod def _push_indices_up(cls, contraction_indices, indices): flattened_contraction_indices = [j for i in contraction_indices for j in i] flattened_contraction_indices.sort() transform = _build_push_indices_up_func_transformation(flattened_contraction_indices) return _apply_recursively_over_nested_lists(transform, indices) def split_multiple_contractions(self): """ Recognize multiple contractions and attempt at rewriting them as paired-contractions. """ from sympy import ask, Q contraction_indices = self.contraction_indices if isinstance(self.expr, CodegenArrayTensorProduct): args = list(self.expr.args) else: args = [self.expr] # TODO: unify API, best location in CodegenArrayTensorProduct subranks = [get_rank(i) for i in args] # TODO: unify API mapping = _get_mapping_from_subranks(subranks) reverse_mapping = {v:k for k, v in mapping.items()} new_contraction_indices = [] for indl, links in enumerate(contraction_indices): if len(links) <= 2: new_contraction_indices.append(links) continue # Check multiple contractions: # # Examples: # # * `A_ij b_j0 C_jk` ===> `A*DiagMatrix(b)*C` # # Care for: # - matrix being diagonalized (i.e. `A_ii`) # - vectors being diagonalized (i.e. `a_i0`) # Also consider the case of diagonal matrices being contracted: current_dimension = self.expr.shape[links[0]] tuple_links = [mapping[i] for i in links] arg_indices, arg_positions = zip(*tuple_links) args_updates = {} if len(arg_indices) != len(set(arg_indices)): # Maybe trace should be supported? raise NotImplementedError not_vectors = [] vectors = [] for arg_ind, arg_pos in tuple_links: mat = args[arg_ind] other_arg_pos = 1-arg_pos other_arg_abs = reverse_mapping[arg_ind, other_arg_pos] if (((1 not in mat.shape) and (not ask(Q.diagonal(mat)))) or ((current_dimension == 1) is True and mat.shape != (1, 1)) or any([other_arg_abs in l for li, l in enumerate(contraction_indices) if li != indl]) ): not_vectors.append((arg_ind, arg_pos)) continue args_updates[arg_ind] = diagonalize_vector(mat) vectors.append((arg_ind, arg_pos)) vectors.append((arg_ind, 1-arg_pos)) if len(not_vectors) > 2: new_contraction_indices.append(links) continue if len(not_vectors) == 0: new_sequence = vectors[:1] + vectors[2:] elif len(not_vectors) == 1: new_sequence = not_vectors[:1] + vectors[:-1] else: new_sequence = not_vectors[:1] + vectors + not_vectors[1:] for i in range(0, len(new_sequence) - 1, 2): arg1, pos1 = new_sequence[i] arg2, pos2 = new_sequence[i+1] if arg1 == arg2: raise NotImplementedError continue abspos1 = reverse_mapping[arg1, pos1] abspos2 = reverse_mapping[arg2, pos2] new_contraction_indices.append((abspos1, abspos2)) for ind, newarg in args_updates.items(): args[ind] = newarg return CodegenArrayContraction( CodegenArrayTensorProduct(*args), *new_contraction_indices ) def flatten_contraction_of_diagonal(self): if not isinstance(self.expr, CodegenArrayDiagonal): return self contraction_down = self.expr._push_indices_down(self.expr.diagonal_indices, self.contraction_indices) new_contraction_indices = [] diagonal_indices = self.expr.diagonal_indices[:] for i in contraction_down: contraction_group = list(i) for j in i: diagonal_with = [k for k in diagonal_indices if j in k] contraction_group.extend([l for k in diagonal_with for l in k]) diagonal_indices = [k for k in diagonal_indices if k not in diagonal_with] new_contraction_indices.append(sorted(set(contraction_group))) new_contraction_indices = CodegenArrayDiagonal._push_indices_up(diagonal_indices, new_contraction_indices) return CodegenArrayContraction( CodegenArrayDiagonal( self.expr.expr, *diagonal_indices ), *new_contraction_indices ) @staticmethod def _get_free_indices_to_position_map(free_indices, contraction_indices): free_indices_to_position = {} flattened_contraction_indices = [j for i in contraction_indices for j in i] counter = 0 for ind in free_indices: while counter in flattened_contraction_indices: counter += 1 free_indices_to_position[ind] = counter counter += 1 return free_indices_to_position @staticmethod def _get_index_shifts(expr): """ Get the mapping of indices at the positions before the contraction occurs. Examples ======== >>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct >>> from sympy import MatrixSymbol >>> M = MatrixSymbol("M", 3, 3) >>> N = MatrixSymbol("N", 3, 3) >>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), [1, 2]) >>> cg._get_index_shifts(cg) [0, 2] Indeed, ``cg`` after the contraction has two dimensions, 0 and 1. They need to be shifted by 0 and 2 to get the corresponding positions before the contraction (that is, 0 and 3). """ inner_contraction_indices = expr.contraction_indices all_inner = [j for i in inner_contraction_indices for j in i] all_inner.sort() # TODO: add API for total rank and cumulative rank: total_rank = _get_subrank(expr) inner_rank = len(all_inner) outer_rank = total_rank - inner_rank shifts = [0 for i in range(outer_rank)] counter = 0 pointer = 0 for i in range(outer_rank): while pointer < inner_rank and counter >= all_inner[pointer]: counter += 1 pointer += 1 shifts[i] += pointer counter += 1 return shifts @staticmethod def _convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices): shifts = CodegenArrayContraction._get_index_shifts(expr) outer_contraction_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_contraction_indices) return outer_contraction_indices @staticmethod def _flatten(expr, *outer_contraction_indices): inner_contraction_indices = expr.contraction_indices outer_contraction_indices = CodegenArrayContraction._convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices) contraction_indices = inner_contraction_indices + outer_contraction_indices return CodegenArrayContraction(expr.expr, *contraction_indices) @classmethod def _handle_nested_permute_dims(cls, expr, *contraction_indices): permutation = expr.permutation plist = permutation.array_form new_contraction_indices = [tuple(permutation(j) for j in i) for i in contraction_indices] new_plist = [i for i in plist if all(i not in j for j in new_contraction_indices)] new_plist = cls._push_indices_up(new_contraction_indices, new_plist) return CodegenArrayPermuteDims( CodegenArrayContraction(expr.expr, *new_contraction_indices), Permutation(new_plist) ) @classmethod def _handle_nested_diagonal(cls, expr: 'CodegenArrayDiagonal', *contraction_indices): diagonal_indices = list(expr.diagonal_indices) down_contraction_indices = expr._push_indices_down(expr.diagonal_indices, contraction_indices, get_rank(expr.expr)) # Flatten diagonally contracted indices: down_contraction_indices = [[k for j in i for k in (j if isinstance(j, (tuple, Tuple)) else [j])] for i in down_contraction_indices] new_contraction_indices = [] for contr_indgrp in down_contraction_indices: ind = contr_indgrp[:] for j, diag_indgrp in enumerate(diagonal_indices): if diag_indgrp is None: continue if any(i in diag_indgrp for i in contr_indgrp): ind.extend(diag_indgrp) diagonal_indices[j] = None new_contraction_indices.append(sorted(set(ind))) new_diagonal_indices_down = [i for i in diagonal_indices if i is not None] new_diagonal_indices = CodegenArrayContraction._push_indices_up(new_contraction_indices, new_diagonal_indices_down) return CodegenArrayDiagonal( CodegenArrayContraction(expr.expr, *new_contraction_indices), *new_diagonal_indices ) @classmethod def _sort_fully_contracted_args(cls, expr, contraction_indices): if expr.shape is None: return expr, contraction_indices cumul = list(accumulate([0] + expr.subranks)) index_blocks = [list(range(cumul[i], cumul[i+1])) for i in range(len(expr.args))] contraction_indices_flat = {j for i in contraction_indices for j in i} fully_contracted = [all(j in contraction_indices_flat for j in range(cumul[i], cumul[i+1])) for i, arg in enumerate(expr.args)] new_pos = sorted(range(len(expr.args)), key=lambda x: (0, default_sort_key(expr.args[x])) if fully_contracted[x] else (1,)) new_args = [expr.args[i] for i in new_pos] new_index_blocks_flat = [j for i in new_pos for j in index_blocks[i]] index_permutation_array_form = _af_invert(new_index_blocks_flat) new_contraction_indices = [tuple(index_permutation_array_form[j] for j in i) for i in contraction_indices] new_contraction_indices = _sort_contraction_indices(new_contraction_indices) return CodegenArrayTensorProduct(*new_args), new_contraction_indices def _get_contraction_tuples(self): r""" Return tuples containing the argument index and position within the argument of the index position. Examples ======== >>> from sympy import MatrixSymbol >>> from sympy.abc import N >>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct >>> A = MatrixSymbol("A", N, N) >>> B = MatrixSymbol("B", N, N) >>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(A, B), (1, 2)) >>> cg._get_contraction_tuples() [[(0, 1), (1, 0)]] Notes ===== Here the contraction pair `(1, 2)` meaning that the 2nd and 3rd indices of the tensor product `A\otimes B` are contracted, has been transformed into `(0, 1)` and `(1, 0)`, identifying the same indices in a different notation. `(0, 1)` is the second index (1) of the first argument (i.e. 0 or `A`). `(1, 0)` is the first index (i.e. 0) of the second argument (i.e. 1 or `B`). """ mapping = self._mapping return [[mapping[j] for j in i] for i in self.contraction_indices] @staticmethod def _contraction_tuples_to_contraction_indices(expr, contraction_tuples): # TODO: check that `expr` has `.subranks`: ranks = expr.subranks cumulative_ranks = [0] + list(accumulate(ranks)) return [tuple(cumulative_ranks[j]+k for j, k in i) for i in contraction_tuples] @property def free_indices(self): return self._free_indices[:] @property def free_indices_to_position(self): return dict(self._free_indices_to_position) @property def expr(self): return self.args[0] @property def contraction_indices(self): return self.args[1:] def _contraction_indices_to_components(self): expr = self.expr if not isinstance(expr, CodegenArrayTensorProduct): raise NotImplementedError("only for contractions of tensor products") ranks = expr.subranks mapping = {} counter = 0 for i, rank in enumerate(ranks): for j in range(rank): mapping[counter] = (i, j) counter += 1 return mapping def sort_args_by_name(self): """ Sort arguments in the tensor product so that their order is lexicographical. Examples ======== >>> from sympy import MatrixSymbol >>> from sympy.abc import N >>> from sympy.codegen.array_utils import parse_matrix_expression >>> A = MatrixSymbol("A", N, N) >>> B = MatrixSymbol("B", N, N) >>> C = MatrixSymbol("C", N, N) >>> D = MatrixSymbol("D", N, N) >>> cg = parse_matrix_expression(C*D*A*B) >>> cg CodegenArrayContraction(CodegenArrayTensorProduct(A, D, C, B), (0, 3), (1, 6), (2, 5)) >>> cg.sort_args_by_name() CodegenArrayContraction(CodegenArrayTensorProduct(A, D, B, C), (0, 3), (1, 4), (2, 7)) """ expr = self.expr if not isinstance(expr, CodegenArrayTensorProduct): return self args = expr.args sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1])) pos_sorted, args_sorted = zip(*sorted_data) reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)} contraction_tuples = self._get_contraction_tuples() contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples] c_tp = CodegenArrayTensorProduct(*args_sorted) new_contr_indices = self._contraction_tuples_to_contraction_indices( c_tp, contraction_tuples ) return CodegenArrayContraction(c_tp, *new_contr_indices) def _get_contraction_links(self): r""" Returns a dictionary of links between arguments in the tensor product being contracted. See the example for an explanation of the values. Examples ======== >>> from sympy import MatrixSymbol >>> from sympy.abc import N >>> from sympy.codegen.array_utils import parse_matrix_expression >>> A = MatrixSymbol("A", N, N) >>> B = MatrixSymbol("B", N, N) >>> C = MatrixSymbol("C", N, N) >>> D = MatrixSymbol("D", N, N) Matrix multiplications are pairwise contractions between neighboring matrices: `A_{ij} B_{jk} C_{kl} D_{lm}` >>> cg = parse_matrix_expression(A*B*C*D) >>> cg CodegenArrayContraction(CodegenArrayTensorProduct(B, C, A, D), (0, 5), (1, 2), (3, 6)) >>> cg._get_contraction_links() {0: {0: (2, 1), 1: (1, 0)}, 1: {0: (0, 1), 1: (3, 0)}, 2: {1: (0, 0)}, 3: {0: (1, 1)}} This dictionary is interpreted as follows: argument in position 0 (i.e. matrix `A`) has its second index (i.e. 1) contracted to `(1, 0)`, that is argument in position 1 (matrix `B`) on the first index slot of `B`, this is the contraction provided by the index `j` from `A`. The argument in position 1 (that is, matrix `B`) has two contractions, the ones provided by the indices `j` and `k`, respectively the first and second indices (0 and 1 in the sub-dict). The link `(0, 1)` and `(2, 0)` respectively. `(0, 1)` is the index slot 1 (the 2nd) of argument in position 0 (that is, `A_{\ldot j}`), and so on. """ args, dlinks = _get_contraction_links([self], self.subranks, *self.contraction_indices) return dlinks def get_shape(expr): if hasattr(expr, "shape"): return expr.shape return () class CodegenArrayTensorProduct(_CodegenArrayAbstract): r""" Class to represent the tensor product of array-like objects. """ def __new__(cls, *args): args = [_sympify(arg) for arg in args] args = cls._flatten(args) ranks = [_get_subrank(arg) for arg in args] # Check if there are nested permutation and lift them up: permutation_cycles = [] for i, arg in enumerate(args): if not isinstance(arg, CodegenArrayPermuteDims): continue permutation_cycles.extend([[k + sum(ranks[:i]) for k in j] for j in arg.permutation.cyclic_form]) args[i] = arg.expr if permutation_cycles: return CodegenArrayPermuteDims(CodegenArrayTensorProduct(*args), Permutation(sum(ranks)-1)*Permutation(permutation_cycles)) if len(args) == 1: return args[0] # If there are contraction objects inside, transform the whole # expression into `CodegenArrayContraction`: contractions = {i: arg for i, arg in enumerate(args) if isinstance(arg, CodegenArrayContraction)} if contractions: cumulative_ranks = list(accumulate([0] + ranks))[:-1] tp = cls(*[arg.expr if isinstance(arg, CodegenArrayContraction) else arg for arg in args]) contraction_indices = [tuple(cumulative_ranks[i] + k for k in j) for i, arg in contractions.items() for j in arg.contraction_indices] return CodegenArrayContraction(tp, *contraction_indices) #newargs = [i for i in args if hasattr(i, "shape")] #coeff = reduce(lambda x, y: x*y, [i for i in args if not hasattr(i, "shape")], S.One) #newargs[0] *= coeff obj = Basic.__new__(cls, *args) obj._subranks = ranks shapes = [get_shape(i) for i in args] if any(i is None for i in shapes): obj._shape = None else: obj._shape = tuple(j for i in shapes for j in i) return obj @classmethod def _flatten(cls, args): args = [i for arg in args for i in (arg.args if isinstance(arg, cls) else [arg])] return args class CodegenArrayElementwiseAdd(_CodegenArrayAbstract): r""" Class for elementwise array additions. """ def __new__(cls, *args): args = [_sympify(arg) for arg in args] obj = Basic.__new__(cls, *args) ranks = [get_rank(arg) for arg in args] ranks = list(set(ranks)) if len(ranks) != 1: raise ValueError("summing arrays of different ranks") obj._subranks = ranks shapes = [arg.shape for arg in args] if len({i for i in shapes if i is not None}) > 1: raise ValueError("mismatching shapes in addition") if any(i is None for i in shapes): obj._shape = None else: obj._shape = shapes[0] return obj class CodegenArrayPermuteDims(_CodegenArrayAbstract): r""" Class to represent permutation of axes of arrays. Examples ======== >>> from sympy.codegen.array_utils import CodegenArrayPermuteDims >>> from sympy import MatrixSymbol >>> M = MatrixSymbol("M", 3, 3) >>> cg = CodegenArrayPermuteDims(M, [1, 0]) The object ``cg`` represents the transposition of ``M``, as the permutation ``[1, 0]`` will act on its indices by switching them: `M_{ij} \Rightarrow M_{ji}` This is evident when transforming back to matrix form: >>> from sympy.codegen.array_utils import recognize_matrix_expression >>> recognize_matrix_expression(cg) M.T >>> N = MatrixSymbol("N", 3, 2) >>> cg = CodegenArrayPermuteDims(N, [1, 0]) >>> cg.shape (2, 3) Permutations of tensor products are simplified in order to achieve a standard form: >>> from sympy.codegen.array_utils import CodegenArrayTensorProduct >>> M = MatrixSymbol("M", 4, 5) >>> tp = CodegenArrayTensorProduct(M, N) >>> tp.shape (4, 5, 3, 2) >>> perm1 = CodegenArrayPermuteDims(tp, [2, 3, 1, 0]) The args ``(M, N)`` have been sorted and the permutation has been simplified, the expression is equivalent: >>> perm1.expr.args (N, M) >>> perm1.shape (3, 2, 5, 4) >>> perm1.permutation (2 3) The permutation in its array form has been simplified from ``[2, 3, 1, 0]`` to ``[0, 1, 3, 2]``, as the arguments of the tensor product `M` and `N` have been switched: >>> perm1.permutation.array_form [0, 1, 3, 2] We can nest a second permutation: >>> perm2 = CodegenArrayPermuteDims(perm1, [1, 0, 2, 3]) >>> perm2.shape (2, 3, 5, 4) >>> perm2.permutation.array_form [1, 0, 3, 2] """ def __new__(cls, expr, permutation, nest_permutation=True): from sympy.combinatorics import Permutation expr = _sympify(expr) permutation = Permutation(permutation) if isinstance(expr, CodegenArrayPermuteDims): subexpr = expr.expr subperm = expr.permutation permutation = permutation * subperm expr = subexpr if isinstance(expr, CodegenArrayContraction): expr, permutation = cls._handle_nested_contraction(expr, permutation) if isinstance(expr, CodegenArrayTensorProduct): expr, permutation = cls._sort_components(expr, permutation) plist = permutation.array_form if plist == sorted(plist): return expr obj = Basic.__new__(cls, expr, permutation) obj._subranks = [get_rank(expr)] shape = expr.shape if shape is None: obj._shape = None else: obj._shape = tuple(shape[permutation(i)] for i in range(len(shape))) return obj @property def expr(self): return self.args[0] @property def permutation(self): return self.args[1] @classmethod def _sort_components(cls, expr, permutation): # Get the permutation in its image-form: perm_image_form = _af_invert(permutation.array_form) args = list(expr.args) # Starting index global position for every arg: cumul = list(accumulate([0] + expr.subranks)) # Split `perm_image_form` into a list of list corresponding to the indices # of every argument: perm_image_form_in_components = [perm_image_form[cumul[i]:cumul[i+1]] for i in range(len(args))] # Create an index, target-position-key array: ps = [(i, sorted(comp)) for i, comp in enumerate(perm_image_form_in_components)] # Sort the array according to the target-position-key: # In this way, we define a canonical way to sort the arguments according # to the permutation. ps.sort(key=lambda x: x[1]) # Read the inverse-permutation (i.e. image-form) of the args: perm_args_image_form = [i[0] for i in ps] # Apply the args-permutation to the `args`: args_sorted = [args[i] for i in perm_args_image_form] # Apply the args-permutation to the array-form of the permutation of the axes (of `expr`): perm_image_form_sorted_args = [perm_image_form_in_components[i] for i in perm_args_image_form] new_permutation = Permutation(_af_invert([j for i in perm_image_form_sorted_args for j in i])) return CodegenArrayTensorProduct(*args_sorted), new_permutation @classmethod def _handle_nested_contraction(cls, expr, permutation): if not isinstance(expr, CodegenArrayContraction): return expr, permutation if not isinstance(expr.expr, CodegenArrayTensorProduct): return expr, permutation args = expr.expr.args contraction_indices = expr.contraction_indices contraction_indices_flat = [j for i in contraction_indices for j in i] cumul = list(accumulate([0] + expr.subranks)) # Spread the permutation in its array form across the args in the corresponding # tensor-product arguments with free indices: permutation_array_blocks_up = [] image_form = _af_invert(permutation.array_form) counter = 0 for i, e in enumerate(expr.subranks): current = [] for j in range(cumul[i], cumul[i+1]): if j in contraction_indices_flat: continue current.append(image_form[counter]) counter += 1 permutation_array_blocks_up.append(current) # Get the map of axis repositioning for every argument of tensor-product: index_blocks = [[j for j in range(cumul[i], cumul[i+1])] for i, e in enumerate(expr.subranks)] index_blocks_up = expr._push_indices_up(expr.contraction_indices, index_blocks) inverse_permutation = permutation**(-1) index_blocks_up_permuted = [[inverse_permutation(j) for j in i if j is not None] for i in index_blocks_up] # Sorting key is a list of tuple, first element is the index of `args`, second element of # the tuple is the sorting key to sort `args` of the tensor product: sorting_keys = list(enumerate(index_blocks_up_permuted)) sorting_keys.sort(key=lambda x: x[1]) # Now we can get the permutation acting on the args in its image-form: new_perm_image_form = [i[0] for i in sorting_keys] # Apply the args-level permutation to various elements: new_index_blocks = [index_blocks[i] for i in new_perm_image_form] new_index_perm_array_form = _af_invert([j for i in new_index_blocks for j in i]) new_args = [args[i] for i in new_perm_image_form] new_contraction_indices = [tuple(new_index_perm_array_form[j] for j in i) for i in contraction_indices] new_expr = CodegenArrayContraction(CodegenArrayTensorProduct(*new_args), *new_contraction_indices) new_permutation = Permutation(_af_invert([j for i in [permutation_array_blocks_up[k] for k in new_perm_image_form] for j in i])) return new_expr, new_permutation @classmethod def _check_permutation_mapping(cls, expr, permutation): subranks = expr.subranks index2arg = [i for i, arg in enumerate(expr.args) for j in range(expr.subranks[i])] permuted_indices = [permutation(i) for i in range(expr.subrank())] new_args = list(expr.args) arg_candidate_index = index2arg[permuted_indices[0]] current_indices = [] new_permutation = [] inserted_arg_cand_indices = set([]) for i, idx in enumerate(permuted_indices): if index2arg[idx] != arg_candidate_index: new_permutation.extend(current_indices) current_indices = [] arg_candidate_index = index2arg[idx] current_indices.append(idx) arg_candidate_rank = subranks[arg_candidate_index] if len(current_indices) == arg_candidate_rank: new_permutation.extend(sorted(current_indices)) local_current_indices = [j - min(current_indices) for j in current_indices] i1 = index2arg[i] new_args[i1] = CodegenArrayPermuteDims(new_args[i1], Permutation(local_current_indices)) inserted_arg_cand_indices.add(arg_candidate_index) current_indices = [] new_permutation.extend(current_indices) # TODO: swap args positions in order to simplify the expression: # TODO: this should be in a function args_positions = list(range(len(new_args))) # Get possible shifts: maps = {} cumulative_subranks = [0] + list(accumulate(subranks)) for i in range(0, len(subranks)): s = set([index2arg[new_permutation[j]] for j in range(cumulative_subranks[i], cumulative_subranks[i+1])]) if len(s) != 1: continue elem = next(iter(s)) if i != elem: maps[i] = elem # Find cycles in the map: lines = [] current_line = [] while maps: if len(current_line) == 0: k, v = maps.popitem() current_line.append(k) else: k = current_line[-1] if k not in maps: current_line = [] continue v = maps.pop(k) if v in current_line: lines.append(current_line) current_line = [] continue current_line.append(v) for line in lines: for i, e in enumerate(line): args_positions[line[(i + 1) % len(line)]] = e # TODO: function in order to permute the args: permutation_blocks = [[new_permutation[cumulative_subranks[i] + j] for j in range(e)] for i, e in enumerate(subranks)] new_args = [new_args[i] for i in args_positions] new_permutation_blocks = [permutation_blocks[i] for i in args_positions] new_permutation2 = [j for i in new_permutation_blocks for j in i] return CodegenArrayTensorProduct(*new_args), Permutation(new_permutation2) # **(-1) @classmethod def _check_if_there_are_closed_cycles(cls, expr, permutation): args = list(expr.args) subranks = expr.subranks cyclic_form = permutation.cyclic_form cumulative_subranks = [0] + list(accumulate(subranks)) cyclic_min = [min(i) for i in cyclic_form] cyclic_max = [max(i) for i in cyclic_form] cyclic_keep = [] for i, cycle in enumerate(cyclic_form): flag = True for j in range(0, len(cumulative_subranks) - 1): if cyclic_min[i] >= cumulative_subranks[j] and cyclic_max[i] < cumulative_subranks[j+1]: # Found a sinkable cycle. args[j] = CodegenArrayPermuteDims(args[j], Permutation([[k - cumulative_subranks[j] for k in cyclic_form[i]]])) flag = False break if flag: cyclic_keep.append(cyclic_form[i]) return CodegenArrayTensorProduct(*args), Permutation(cyclic_keep, size=permutation.size) def nest_permutation(self): r""" DEPRECATED. """ ret = self._nest_permutation(self.expr, self.permutation) if ret is None: return self return ret @classmethod def _nest_permutation(cls, expr, permutation): if isinstance(expr, CodegenArrayTensorProduct): return CodegenArrayPermuteDims(*cls._check_if_there_are_closed_cycles(expr, permutation)) elif isinstance(expr, CodegenArrayContraction): # Invert tree hierarchy: put the contraction above. cycles = permutation.cyclic_form newcycles = CodegenArrayContraction._convert_outer_indices_to_inner_indices(expr, *cycles) newpermutation = Permutation(newcycles) new_contr_indices = [tuple(newpermutation(j) for j in i) for i in expr.contraction_indices] return CodegenArrayContraction(CodegenArrayPermuteDims(expr.expr, newpermutation), *new_contr_indices) elif isinstance(expr, CodegenArrayElementwiseAdd): return CodegenArrayElementwiseAdd(*[CodegenArrayPermuteDims(arg, permutation) for arg in expr.args]) return None def nest_permutation(expr): if isinstance(expr, CodegenArrayPermuteDims): return expr.nest_permutation() else: return expr class CodegenArrayDiagonal(_CodegenArrayAbstract): r""" Class to represent the diagonal operator. Explanation =========== In a 2-dimensional array it returns the diagonal, this looks like the operation: `A_{ij} \rightarrow A_{ii}` The diagonal over axes 1 and 2 (the second and third) of the tensor product of two 2-dimensional arrays `A \otimes B` is `\Big[ A_{ab} B_{cd} \Big]_{abcd} \rightarrow \Big[ A_{ai} B_{id} \Big]_{adi}` In this last example the array expression has been reduced from 4-dimensional to 3-dimensional. Notice that no contraction has occurred, rather there is a new index `i` for the diagonal, contraction would have reduced the array to 2 dimensions. Notice that the diagonalized out dimensions are added as new dimensions at the end of the indices. """ def __new__(cls, expr, *diagonal_indices): expr = _sympify(expr) diagonal_indices = [Tuple(*sorted(i)) for i in diagonal_indices] if isinstance(expr, CodegenArrayDiagonal): return cls._flatten(expr, *diagonal_indices) if isinstance(expr, CodegenArrayPermuteDims): return cls._handle_nested_permutedims_in_diag(expr, *diagonal_indices) shape = expr.shape if shape is not None: cls._validate(expr, *diagonal_indices) # Get new shape: positions, shape = cls._get_positions_shape(shape, diagonal_indices) else: positions = None if len(diagonal_indices) == 0: return expr obj = Basic.__new__(cls, expr, *diagonal_indices) obj._positions = positions obj._subranks = _get_subranks(expr) obj._shape = shape return obj @staticmethod def _validate(expr, *diagonal_indices): # Check that no diagonalization happens on indices with mismatched # dimensions: shape = expr.shape for i in diagonal_indices: if len({shape[j] for j in i}) != 1: raise ValueError("diagonalizing indices of different dimensions") if len(i) <= 1: raise ValueError("need at least two axes to diagonalize") @staticmethod def _remove_trivial_dimensions(shape, *diagonal_indices): return [tuple(j for j in i) for i in diagonal_indices if shape[i[0]] != 1] @property def expr(self): return self.args[0] @property def diagonal_indices(self): return self.args[1:] @staticmethod def _flatten(expr, *outer_diagonal_indices): inner_diagonal_indices = expr.diagonal_indices all_inner = [j for i in inner_diagonal_indices for j in i] all_inner.sort() # TODO: add API for total rank and cumulative rank: total_rank = _get_subrank(expr) inner_rank = len(all_inner) outer_rank = total_rank - inner_rank shifts = [0 for i in range(outer_rank)] counter = 0 pointer = 0 for i in range(outer_rank): while pointer < inner_rank and counter >= all_inner[pointer]: counter += 1 pointer += 1 shifts[i] += pointer counter += 1 outer_diagonal_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_diagonal_indices) diagonal_indices = inner_diagonal_indices + outer_diagonal_indices return CodegenArrayDiagonal(expr.expr, *diagonal_indices) @classmethod def _handle_nested_permutedims_in_diag(cls, expr: CodegenArrayPermuteDims, *diagonal_indices): back_diagonal_indices = [[expr.permutation(j) for j in i] for i in diagonal_indices] nondiag = [i for i in range(get_rank(expr)) if not any(i in j for j in diagonal_indices)] back_nondiag = [expr.permutation(i) for i in nondiag] remap = {e: i for i, e in enumerate(sorted(back_nondiag))} new_permutation1 = [remap[i] for i in back_nondiag] shift = len(new_permutation1) diag_block_perm = [i + shift for i in range(len(back_diagonal_indices))] new_permutation = new_permutation1 + diag_block_perm return CodegenArrayPermuteDims( CodegenArrayDiagonal( expr.expr, *back_diagonal_indices ), new_permutation ) def _push_indices_down_nonstatic(self, indices): transform = lambda x: self._positions[x] if x < len(self._positions) else None return _apply_recursively_over_nested_lists(transform, indices) def _push_indices_up_nonstatic(self, indices): def transform(x): for i, e in enumerate(self._positions): if (isinstance(e, int) and x == e) or (isinstance(e, tuple) and x in e): return i return _apply_recursively_over_nested_lists(transform, indices) @classmethod def _push_indices_down(cls, diagonal_indices, indices, rank): positions, shape = cls._get_positions_shape(range(rank), diagonal_indices) transform = lambda x: positions[x] if x < len(positions) else None return _apply_recursively_over_nested_lists(transform, indices) @classmethod def _push_indices_up(cls, diagonal_indices, indices, rank): positions, shape = cls._get_positions_shape(range(rank), diagonal_indices) def transform(x): for i, e in enumerate(positions): if (isinstance(e, int) and x == e) or (isinstance(e, tuple) and x in e): return i return _apply_recursively_over_nested_lists(transform, indices) def transform_to_product(self): from sympy import ask, Q diagonal_indices = self.diagonal_indices if isinstance(self.expr, CodegenArrayContraction): # invert Diagonal and Contraction: diagonal_down = CodegenArrayContraction._push_indices_down( self.expr.contraction_indices, diagonal_indices ) newexpr = CodegenArrayDiagonal( self.expr.expr, *diagonal_down ).transform_to_product() contraction_up = newexpr._push_indices_up( diagonal_down, self.expr.contraction_indices ) return CodegenArrayContraction( newexpr, *contraction_up ) if not isinstance(self.expr, CodegenArrayTensorProduct): return self args = list(self.expr.args) # TODO: unify API subranks = [get_rank(i) for i in args] # TODO: unify API mapping = _get_mapping_from_subranks(subranks) new_contraction_indices = [] drop_diagonal_indices = [] for indl, links in enumerate(diagonal_indices): if len(links) > 2: continue # Also consider the case of diagonal matrices being contracted: current_dimension = self.expr.shape[links[0]] if current_dimension == 1: drop_diagonal_indices.append(indl) continue tuple_links = [mapping[i] for i in links] arg_indices, arg_positions = zip(*tuple_links) if len(arg_indices) != len(set(arg_indices)): # Maybe trace should be supported? raise NotImplementedError args_updates = {} count_nondiagonal = 0 last = None expression_is_square = False # Check that all args are vectors: for arg_ind, arg_pos in tuple_links: mat = args[arg_ind] if 1 in mat.shape and mat.shape != (1, 1): args_updates[arg_ind] = DiagMatrix(mat) last = arg_ind else: expression_is_square = True if not ask(Q.diagonal(mat)): count_nondiagonal += 1 if count_nondiagonal > 1: break if count_nondiagonal > 1: continue # TODO: if count_nondiagonal == 0 then the sub-expression can be recognized as HadamardProduct. for arg_ind, newmat in args_updates.items(): if not expression_is_square and arg_ind == last: continue #pass args[arg_ind] = newmat drop_diagonal_indices.append(indl) new_contraction_indices.append(links) new_diagonal_indices = CodegenArrayContraction._push_indices_up( new_contraction_indices, [e for i, e in enumerate(diagonal_indices) if i not in drop_diagonal_indices] ) return CodegenArrayDiagonal( CodegenArrayContraction( CodegenArrayTensorProduct(*args), *new_contraction_indices ), *new_diagonal_indices ) @classmethod def _get_positions_shape(cls, shape, diagonal_indices): data1 = tuple((i, shp) for i, shp in enumerate(shape) if not any(i in j for j in diagonal_indices)) pos1, shp1 = zip(*data1) if data1 else ((), ()) data2 = tuple((i, shape[i[0]]) for i in diagonal_indices) pos2, shp2 = zip(*data2) if data2 else ((), ()) positions = pos1 + pos2 shape = shp1 + shp2 return positions, shape def get_rank(expr): if isinstance(expr, (MatrixExpr, MatrixElement)): return 2 if isinstance(expr, _CodegenArrayAbstract): return len(expr.shape) if isinstance(expr, NDimArray): return expr.rank() if isinstance(expr, Indexed): return expr.rank if isinstance(expr, IndexedBase): shape = expr.shape if shape is None: return -1 else: return len(shape) if isinstance(expr, _RecognizeMatOp): return expr.rank() if isinstance(expr, _RecognizeMatMulLines): return expr.rank() return 0 def _get_subrank(expr): if isinstance(expr, _CodegenArrayAbstract): return expr.subrank() return get_rank(expr) def _get_subranks(expr): if isinstance(expr, _CodegenArrayAbstract): return expr.subranks else: return [get_rank(expr)] def _get_mapping_from_subranks(subranks): mapping = {} counter = 0 for i, rank in enumerate(subranks): for j in range(rank): mapping[counter] = (i, j) counter += 1 return mapping def _get_contraction_links(args, subranks, *contraction_indices): mapping = _get_mapping_from_subranks(subranks) contraction_tuples = [[mapping[j] for j in i] for i in contraction_indices] dlinks = defaultdict(dict) for links in contraction_tuples: if len(links) == 2: (arg1, pos1), (arg2, pos2) = links dlinks[arg1][pos1] = (arg2, pos2) dlinks[arg2][pos2] = (arg1, pos1) continue return args, dict(dlinks) def _sort_contraction_indices(pairing_indices): pairing_indices = [Tuple(*sorted(i)) for i in pairing_indices] pairing_indices.sort(key=lambda x: min(x)) return pairing_indices def _get_diagonal_indices(flattened_indices): axes_contraction = defaultdict(list) for i, ind in enumerate(flattened_indices): if isinstance(ind, (int, Integer)): # If the indices is a number, there can be no diagonal operation: continue axes_contraction[ind].append(i) axes_contraction = {k: v for k, v in axes_contraction.items() if len(v) > 1} # Put the diagonalized indices at the end: ret_indices = [i for i in flattened_indices if i not in axes_contraction] diag_indices = list(axes_contraction) diag_indices.sort(key=lambda x: flattened_indices.index(x)) diagonal_indices = [tuple(axes_contraction[i]) for i in diag_indices] ret_indices += diag_indices ret_indices = tuple(ret_indices) return diagonal_indices, ret_indices def _get_argindex(subindices, ind): for i, sind in enumerate(subindices): if ind == sind: return i if isinstance(sind, (set, frozenset)) and ind in sind: return i raise IndexError("%s not found in %s" % (ind, subindices)) def _codegen_array_parse(expr): if isinstance(expr, Sum): function = expr.function summation_indices = expr.variables subexpr, subindices = _codegen_array_parse(function) # Check dimensional consistency: shape = subexpr.shape if shape: for ind, istart, iend in expr.limits: i = _get_argindex(subindices, ind) if istart != 0 or iend+1 != shape[i]: raise ValueError("summation index and array dimension mismatch: %s" % ind) contraction_indices = [] subindices = list(subindices) if isinstance(subexpr, CodegenArrayDiagonal): diagonal_indices = list(subexpr.diagonal_indices) dindices = subindices[-len(diagonal_indices):] subindices = subindices[:-len(diagonal_indices)] for index in summation_indices: if index in dindices: position = dindices.index(index) contraction_indices.append(diagonal_indices[position]) diagonal_indices[position] = None diagonal_indices = [i for i in diagonal_indices if i is not None] for i, ind in enumerate(subindices): if ind in summation_indices: pass if diagonal_indices: subexpr = CodegenArrayDiagonal(subexpr.expr, *diagonal_indices) else: subexpr = subexpr.expr axes_contraction = defaultdict(list) for i, ind in enumerate(subindices): if ind in summation_indices: axes_contraction[ind].append(i) subindices[i] = None for k, v in axes_contraction.items(): contraction_indices.append(tuple(v)) free_indices = [i for i in subindices if i is not None] indices_ret = list(free_indices) indices_ret.sort(key=lambda x: free_indices.index(x)) return CodegenArrayContraction( subexpr, *contraction_indices, free_indices=free_indices ), tuple(indices_ret) if isinstance(expr, Mul): args, indices = zip(*[_codegen_array_parse(arg) for arg in expr.args]) # Check if there are KroneckerDelta objects: kronecker_delta_repl = {} for arg in args: if not isinstance(arg, KroneckerDelta): continue # Diagonalize two indices: i, j = arg.indices kindices = set(arg.indices) if i in kronecker_delta_repl: kindices.update(kronecker_delta_repl[i]) if j in kronecker_delta_repl: kindices.update(kronecker_delta_repl[j]) kindices = frozenset(kindices) for index in kindices: kronecker_delta_repl[index] = kindices # Remove KroneckerDelta objects, their relations should be handled by # CodegenArrayDiagonal: newargs = [] newindices = [] for arg, loc_indices in zip(args, indices): if isinstance(arg, KroneckerDelta): continue newargs.append(arg) newindices.append(loc_indices) flattened_indices = [kronecker_delta_repl.get(j, j) for i in newindices for j in i] diagonal_indices, ret_indices = _get_diagonal_indices(flattened_indices) tp = CodegenArrayTensorProduct(*newargs) if diagonal_indices: return (CodegenArrayDiagonal(tp, *diagonal_indices), ret_indices) else: return tp, ret_indices if isinstance(expr, MatrixElement): indices = expr.args[1:] diagonal_indices, ret_indices = _get_diagonal_indices(indices) if diagonal_indices: return (CodegenArrayDiagonal(expr.args[0], *diagonal_indices), ret_indices) else: return expr.args[0], ret_indices if isinstance(expr, Indexed): indices = expr.indices diagonal_indices, ret_indices = _get_diagonal_indices(indices) if diagonal_indices: return (CodegenArrayDiagonal(expr.base, *diagonal_indices), ret_indices) else: return expr.args[0], ret_indices if isinstance(expr, IndexedBase): raise NotImplementedError if isinstance(expr, KroneckerDelta): return expr, expr.indices if isinstance(expr, Add): args, indices = zip(*[_codegen_array_parse(arg) for arg in expr.args]) args = list(args) # Check if all indices are compatible. Otherwise expand the dimensions: index0set = set(indices[0]) index0 = indices[0] for i in range(1, len(args)): if set(indices[i]) != index0set: raise NotImplementedError("indices must be the same") permutation = Permutation([index0.index(j) for j in indices[i]]) # Perform index permutations: args[i] = CodegenArrayPermuteDims(args[i], permutation) return CodegenArrayElementwiseAdd(*args), index0 return expr, () def parse_matrix_expression(expr: MatrixExpr) -> Basic: if isinstance(expr, MatMul): args_nonmat = [] args = [] for arg in expr.args: if isinstance(arg, MatrixExpr): args.append(arg) else: args_nonmat.append(arg) contractions = [(2*i+1, 2*i+2) for i in range(len(args)-1)] scalar = Mul.fromiter(args_nonmat) if scalar == 1: tprod = CodegenArrayTensorProduct( *[parse_matrix_expression(arg) for arg in args]) else: tprod = CodegenArrayTensorProduct( scalar, *[parse_matrix_expression(arg) for arg in args]) return CodegenArrayContraction( tprod, *contractions ) elif isinstance(expr, MatAdd): return CodegenArrayElementwiseAdd( *[parse_matrix_expression(arg) for arg in expr.args] ) elif isinstance(expr, Transpose): return CodegenArrayPermuteDims( parse_matrix_expression(expr.args[0]), [1, 0] ) elif isinstance(expr, Trace): inner_expr = parse_matrix_expression(expr.arg) return CodegenArrayContraction(inner_expr, (0, len(inner_expr.shape) - 1)) else: return expr def parse_indexed_expression(expr, first_indices=None): r""" Parse indexed expression into a form useful for code generation. Examples ======== >>> from sympy.codegen.array_utils import parse_indexed_expression >>> from sympy import MatrixSymbol, Sum, symbols >>> i, j, k, d = symbols("i j k d") >>> M = MatrixSymbol("M", d, d) >>> N = MatrixSymbol("N", d, d) Recognize the trace in summation form: >>> expr = Sum(M[i, i], (i, 0, d-1)) >>> parse_indexed_expression(expr) CodegenArrayContraction(M, (0, 1)) Recognize the extraction of the diagonal by using the same index `i` on both axes of the matrix: >>> expr = M[i, i] >>> parse_indexed_expression(expr) CodegenArrayDiagonal(M, (0, 1)) This function can help perform the transformation expressed in two different mathematical notations as: `\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}` Recognize the matrix multiplication in summation form: >>> expr = Sum(M[i, j]*N[j, k], (j, 0, d-1)) >>> parse_indexed_expression(expr) CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2)) Specify that ``k`` has to be the starting index: >>> parse_indexed_expression(expr, first_indices=[k]) CodegenArrayContraction(CodegenArrayTensorProduct(N, M), (0, 3)) """ result, indices = _codegen_array_parse(expr) if not first_indices: return result for i in first_indices: if i not in indices: first_indices.remove(i) #raise ValueError("index %s not found or not a free index" % i) first_indices.extend([i for i in indices if i not in first_indices]) permutation = [first_indices.index(i) for i in indices] return CodegenArrayPermuteDims(result, permutation) def _has_multiple_lines(expr): if isinstance(expr, _RecognizeMatMulLines): return True if isinstance(expr, _RecognizeMatOp): return expr.multiple_lines return False class _RecognizeMatOp: """ Class to help parsing matrix multiplication lines. """ def __init__(self, operator, args): self.operator = operator self.args = args if any(_has_multiple_lines(arg) for arg in args): multiple_lines = True else: multiple_lines = False self.multiple_lines = multiple_lines def rank(self): if self.operator == Trace: return 0 # TODO: check return 2 def __repr__(self): op = self.operator if op == MatMul: s = "*" elif op == MatAdd: s = "+" else: s = op.__name__ return "_RecognizeMatOp(%s, %s)" % (s, repr(self.args)) return "_RecognizeMatOp(%s)" % (s.join(repr(i) for i in self.args)) def __eq__(self, other): if not isinstance(other, type(self)): return False if self.operator != other.operator: return False if self.args != other.args: return False return True def __iter__(self): return iter(self.args) class _RecognizeMatMulLines(list): """ This class handles multiple parsed multiplication lines. """ def __new__(cls, args): if len(args) == 1: return args[0] return list.__new__(cls, args) def rank(self): return reduce(lambda x, y: x*y, [get_rank(i) for i in self], S.One) def __repr__(self): return "_RecognizeMatMulLines(%s)" % super().__repr__() def _support_function_tp1_recognize(contraction_indices, args): if not isinstance(args, list): args = [args] subranks = [get_rank(i) for i in args] coeff = reduce(lambda x, y: x*y, [arg for arg, srank in zip(args, subranks) if srank == 0], S.One) mapping = _get_mapping_from_subranks(subranks) reverse_mapping = {v:k for k, v in mapping.items()} args, dlinks = _get_contraction_links(args, subranks, *contraction_indices) flatten_contractions = [j for i in contraction_indices for j in i] total_rank = sum(subranks) # TODO: turn `free_indices` into a list? free_indices = {i: i for i in range(total_rank) if i not in flatten_contractions} return_list = [] while dlinks: if free_indices: first_index, starting_argind = min(free_indices.items(), key=lambda x: x[1]) free_indices.pop(first_index) starting_argind, starting_pos = mapping[starting_argind] else: # Maybe a Trace first_index = None starting_argind = min(dlinks) starting_pos = 0 current_argind, current_pos = starting_argind, starting_pos matmul_args = [] last_index = None while True: elem = args[current_argind] if current_pos == 1: elem = _RecognizeMatOp(Transpose, [elem]) matmul_args.append(elem) other_pos = 1 - current_pos if current_argind not in dlinks: other_absolute = reverse_mapping[current_argind, other_pos] free_indices.pop(other_absolute, None) break link_dict = dlinks.pop(current_argind) if other_pos not in link_dict: if free_indices: last_index = [i for i, j in free_indices.items() if mapping[j] == (current_argind, other_pos)][0] else: last_index = None break if len(link_dict) > 2: raise NotImplementedError("not a matrix multiplication line") # Get the last element of `link_dict` as the next link. The last # element is the correct start for trace expressions: current_argind, current_pos = link_dict[other_pos] if current_argind == starting_argind: # This is a trace: if len(matmul_args) > 1: matmul_args = [_RecognizeMatOp(Trace, [_RecognizeMatOp(MatMul, matmul_args)])] elif args[current_argind].shape != (1, 1): matmul_args = [_RecognizeMatOp(Trace, matmul_args)] break dlinks.pop(starting_argind, None) free_indices.pop(last_index, None) return_list.append(_RecognizeMatOp(MatMul, matmul_args)) if coeff != 1: # Let's inject the coefficient: return_list[0].args.insert(0, coeff) return _RecognizeMatMulLines(return_list) def recognize_matrix_expression(expr): r""" Recognize matrix expressions in codegen objects. If more than one matrix multiplication line have been detected, return a list with the matrix expressions. Examples ======== >>> from sympy import MatrixSymbol, Sum >>> from sympy.abc import i, j, k, l, N >>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct >>> from sympy.codegen.array_utils import recognize_matrix_expression, parse_indexed_expression, parse_matrix_expression >>> A = MatrixSymbol("A", N, N) >>> B = MatrixSymbol("B", N, N) >>> C = MatrixSymbol("C", N, N) >>> D = MatrixSymbol("D", N, N) >>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1)) >>> cg = parse_indexed_expression(expr) >>> recognize_matrix_expression(cg) A*B >>> cg = parse_indexed_expression(expr, first_indices=[k]) >>> recognize_matrix_expression(cg) B.T*A.T Transposition is detected: >>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1)) >>> cg = parse_indexed_expression(expr) >>> recognize_matrix_expression(cg) A.T*B >>> cg = parse_indexed_expression(expr, first_indices=[k]) >>> recognize_matrix_expression(cg) B.T*A Detect the trace: >>> expr = Sum(A[i, i], (i, 0, N-1)) >>> cg = parse_indexed_expression(expr) >>> recognize_matrix_expression(cg) Trace(A) Recognize some more complex traces: >>> expr = Sum(A[i, j]*B[j, i], (i, 0, N-1), (j, 0, N-1)) >>> cg = parse_indexed_expression(expr) >>> recognize_matrix_expression(cg) Trace(A*B) More complicated expressions: >>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1)) >>> cg = parse_indexed_expression(expr) >>> recognize_matrix_expression(cg) A*B.T*A.T Expressions constructed from matrix expressions do not contain literal indices, the positions of free indices are returned instead: >>> expr = A*B >>> cg = parse_matrix_expression(expr) >>> recognize_matrix_expression(cg) A*B If more than one line of matrix multiplications is detected, return separate matrix multiplication factors: >>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(A, B, C, D), (1, 2), (5, 6)) >>> recognize_matrix_expression(cg) [A*B, C*D] The two lines have free indices at axes 0, 3 and 4, 7, respectively. """ # TODO: expr has to be a CodegenArray... type rec = _recognize_matrix_expression(expr) return _unfold_recognized_expr(rec) def _recognize_matrix_expression(expr): if isinstance(expr, CodegenArrayContraction): # Apply some transformations: expr = expr.flatten_contraction_of_diagonal() expr = expr.split_multiple_contractions() args = _recognize_matrix_expression(expr.expr) contraction_indices = expr.contraction_indices if isinstance(args, _RecognizeMatOp) and args.operator == MatAdd: addends = [] for arg in args.args: addends.append(_support_function_tp1_recognize(contraction_indices, arg)) return _RecognizeMatOp(MatAdd, addends) elif isinstance(args, _RecognizeMatMulLines): return _support_function_tp1_recognize(contraction_indices, args) return _support_function_tp1_recognize(contraction_indices, [args]) elif isinstance(expr, CodegenArrayElementwiseAdd): add_args = [] for arg in expr.args: add_args.append(_recognize_matrix_expression(arg)) return _RecognizeMatOp(MatAdd, add_args) elif isinstance(expr, (MatrixSymbol, IndexedBase)): return expr elif isinstance(expr, CodegenArrayPermuteDims): if expr.permutation.array_form == [1, 0]: return _RecognizeMatOp(Transpose, [_recognize_matrix_expression(expr.expr)]) elif isinstance(expr.expr, CodegenArrayTensorProduct): ranks = expr.expr.subranks newrange = [expr.permutation(i) for i in range(sum(ranks))] newpos = [] counter = 0 for rank in ranks: newpos.append(newrange[counter:counter+rank]) counter += rank newargs = [] for pos, arg in zip(newpos, expr.expr.args): if pos == sorted(pos): newargs.append((_recognize_matrix_expression(arg), pos[0])) elif len(pos) == 2: newargs.append((_RecognizeMatOp(Transpose, [_recognize_matrix_expression(arg)]), pos[0])) else: raise NotImplementedError newargs.sort(key=lambda x: x[1]) newargs = [i[0] for i in newargs] return _RecognizeMatMulLines(newargs) elif isinstance(expr.expr, CodegenArrayContraction): mat_mul_lines = _recognize_matrix_expression(expr.expr) if not isinstance(mat_mul_lines, _RecognizeMatMulLines): raise NotImplementedError() permutation = Permutation(2*len(mat_mul_lines)-1)*expr.permutation permuted = [permutation(i) for i in range(2*len(mat_mul_lines))] args_array = [None for i in mat_mul_lines] for i in range(len(mat_mul_lines)): p1 = permuted[2*i] p2 = permuted[2*i+1] if p1 // 2 != p2 // 2: raise NotImplementedError("permutation mixes the axes in a way that cannot be represented by matrices") pos = p1 // 2 if p1 > p2: args_array[i] = _RecognizeMatOp(Transpose, mat_mul_lines[pos]) else: args_array[i] = mat_mul_lines[pos] return _RecognizeMatMulLines(args_array) else: raise NotImplementedError() elif isinstance(expr, CodegenArrayTensorProduct): args = [_recognize_matrix_expression(arg) for arg in expr.args] multiple_lines = [_has_multiple_lines(arg) for arg in args] if any(multiple_lines): if any(a.operator != MatAdd for i, a in enumerate(args) if multiple_lines[i] and isinstance(a, _RecognizeMatOp)): raise NotImplementedError getargs = lambda x: x.args if isinstance(x, _RecognizeMatOp) else list(x) expand_args = [getargs(arg) if multiple_lines[i] else [arg] for i, arg in enumerate(args)] it = itertools.product(*expand_args) ret = _RecognizeMatOp(MatAdd, [_RecognizeMatMulLines([k for j in i for k in (j if isinstance(j, _RecognizeMatMulLines) else [j])]) for i in it]) return ret return _RecognizeMatMulLines(args) elif isinstance(expr, CodegenArrayDiagonal): pexpr = expr.transform_to_product() if expr == pexpr: return expr return _recognize_matrix_expression(pexpr) elif isinstance(expr, Transpose): return expr elif isinstance(expr, MatrixExpr): return expr return expr def _suppress_trivial_dims_in_tensor_product(mat_list): # Recognize expressions like [x, y] with shape (k, 1, k, 1) as `x*y.T`. # The matrix expression has to be equivalent to the tensor product of the matrices, with trivial dimensions (i.e. dim=1) dropped. # That is, add contractions over trivial dimensions: mat_11 = [] mat_k1 = [] for mat in mat_list: if mat.shape == (1, 1): mat_11.append(mat) elif 1 in mat.shape: if mat.shape[0] == 1: mat_k1.append(mat.T) else: mat_k1.append(mat) else: return mat_list if len(mat_k1) > 2: return mat_list a = MatMul.fromiter(mat_k1[:1]) b = MatMul.fromiter(mat_k1[1:]) x = MatMul.fromiter(mat_11) return a*x*b.T def _unfold_recognized_expr(expr): if isinstance(expr, _RecognizeMatOp): return expr.operator(*[_unfold_recognized_expr(i) for i in expr.args]) elif isinstance(expr, _RecognizeMatMulLines): unfolded = [_unfold_recognized_expr(i) for i in expr] mat_list = [i for i in unfolded if isinstance(i, MatrixExpr)] scalar_list = [i for i in unfolded if i not in mat_list] scalar = Mul.fromiter(scalar_list) mat_list = [i.doit() for i in mat_list] mat_list = [i for i in mat_list if not (i.shape == (1, 1) and i.is_Identity)] if mat_list: mat_list[0] *= scalar if len(mat_list) == 1: return mat_list[0].doit() else: return _suppress_trivial_dims_in_tensor_product(mat_list) else: return scalar else: return expr def _apply_recursively_over_nested_lists(func, arr): if isinstance(arr, (tuple, list, Tuple)): return tuple(_apply_recursively_over_nested_lists(func, i) for i in arr) elif isinstance(arr, Tuple): return Tuple.fromiter(_apply_recursively_over_nested_lists(func, i) for i in arr) else: return func(arr) def _build_push_indices_up_func_transformation(flattened_contraction_indices): shifts = {0: 0} i = 0 cumulative = 0 while i < len(flattened_contraction_indices): j = 1 while i+j < len(flattened_contraction_indices): if flattened_contraction_indices[i] + j != flattened_contraction_indices[i+j]: break j += 1 cumulative += j shifts[flattened_contraction_indices[i]] = cumulative i += j shift_keys = sorted(shifts.keys()) def func(idx): return shifts[shift_keys[bisect.bisect_right(shift_keys, idx)-1]] def transform(j): if j in flattened_contraction_indices: return None else: return j - func(j) return transform def _build_push_indices_down_func_transformation(flattened_contraction_indices): N = flattened_contraction_indices[-1]+2 shifts = [i for i in range(N) if i not in flattened_contraction_indices] def transform(j): if j < len(shifts): return shifts[j] else: return j + shifts[-1] - len(shifts) + 1 return transform
291c7582d4a3853cfbafb8b8f990dc3068a8580d6237bdd71e0417abe05a772e
from sympy import And, Gt, Lt, Abs, Dummy, oo, Tuple, Symbol from sympy.codegen.ast import ( Assignment, AddAugmentedAssignment, CodeBlock, Declaration, FunctionDefinition, Print, Return, Scope, While, Variable, Pointer, real ) """ This module collects functions for constructing ASTs representing algorithms. """ def newtons_method(expr, wrt, atol=1e-12, delta=None, debug=False, itermax=None, counter=None): """ Generates an AST for Newton-Raphson method (a root-finding algorithm). Explanation =========== Returns an abstract syntax tree (AST) based on ``sympy.codegen.ast`` for Netwon's method of root-finding. Parameters ========== expr : expression wrt : Symbol With respect to, i.e. what is the variable. atol : number or expr Absolute tolerance (stopping criterion) delta : Symbol Will be a ``Dummy`` if ``None``. debug : bool Whether to print convergence information during iterations itermax : number or expr Maximum number of iterations. counter : Symbol Will be a ``Dummy`` if ``None``. Examples ======== >>> from sympy import symbols, cos >>> from sympy.codegen.ast import Assignment >>> from sympy.codegen.algorithms import newtons_method >>> x, dx, atol = symbols('x dx atol') >>> expr = cos(x) - x**3 >>> algo = newtons_method(expr, x, atol, dx) >>> algo.has(Assignment(dx, -expr/expr.diff(x))) True References ========== .. [1] https://en.wikipedia.org/wiki/Newton%27s_method """ if delta is None: delta = Dummy() Wrapper = Scope name_d = 'delta' else: Wrapper = lambda x: x name_d = delta.name delta_expr = -expr/expr.diff(wrt) whl_bdy = [Assignment(delta, delta_expr), AddAugmentedAssignment(wrt, delta)] if debug: prnt = Print([wrt, delta], r"{}=%12.5g {}=%12.5g\n".format(wrt.name, name_d)) whl_bdy = [whl_bdy[0], prnt] + whl_bdy[1:] req = Gt(Abs(delta), atol) declars = [Declaration(Variable(delta, type=real, value=oo))] if itermax is not None: counter = counter or Dummy(integer=True) v_counter = Variable.deduced(counter, 0) declars.append(Declaration(v_counter)) whl_bdy.append(AddAugmentedAssignment(counter, 1)) req = And(req, Lt(counter, itermax)) whl = While(req, CodeBlock(*whl_bdy)) blck = declars + [whl] return Wrapper(CodeBlock(*blck)) def _symbol_of(arg): if isinstance(arg, Declaration): arg = arg.variable.symbol elif isinstance(arg, Variable): arg = arg.symbol return arg def newtons_method_function(expr, wrt, params=None, func_name="newton", attrs=Tuple(), *, delta=None, **kwargs): """ Generates an AST for a function implementing the Newton-Raphson method. Parameters ========== expr : expression wrt : Symbol With respect to, i.e. what is the variable params : iterable of symbols Symbols appearing in expr that are taken as constants during the iterations (these will be accepted as parameters to the generated function). func_name : str Name of the generated function. attrs : Tuple Attribute instances passed as ``attrs`` to ``FunctionDefinition``. \\*\\*kwargs : Keyword arguments passed to :func:`sympy.codegen.algorithms.newtons_method`. Examples ======== >>> from sympy import symbols, cos >>> from sympy.codegen.algorithms import newtons_method_function >>> from sympy.codegen.pyutils import render_as_module >>> x = symbols('x') >>> expr = cos(x) - x**3 >>> func = newtons_method_function(expr, x) >>> py_mod = render_as_module(func) # source code as string >>> namespace = {} >>> exec(py_mod, namespace, namespace) >>> res = eval('newton(0.5)', namespace) >>> abs(res - 0.865474033102) < 1e-12 True See Also ======== sympy.codegen.algorithms.newtons_method """ if params is None: params = (wrt,) pointer_subs = {p.symbol: Symbol('(*%s)' % p.symbol.name) for p in params if isinstance(p, Pointer)} if delta is None: delta = Symbol('d_' + wrt.name) if expr.has(delta): delta = None # will use Dummy algo = newtons_method(expr, wrt, delta=delta, **kwargs).xreplace(pointer_subs) if isinstance(algo, Scope): algo = algo.body not_in_params = expr.free_symbols.difference({_symbol_of(p) for p in params}) if not_in_params: raise ValueError("Missing symbols in params: %s" % ', '.join(map(str, not_in_params))) declars = tuple(Variable(p, real) for p in params) body = CodeBlock(algo, Return(wrt)) return FunctionDefinition(real, func_name, declars, body, attrs=attrs)
a963461015ada52cd02429a95ca62ed35916f688f1569e3aa32dc4bd29ba43f2
""" Classes and functions useful for rewriting expressions for optimized code generation. Some languages (or standards thereof), e.g. C99, offer specialized math functions for better performance and/or precision. Using the ``optimize`` function in this module, together with a collection of rules (represented as instances of ``Optimization``), one can rewrite the expressions for this purpose:: >>> from sympy import Symbol, exp, log >>> from sympy.codegen.rewriting import optimize, optims_c99 >>> x = Symbol('x') >>> optimize(3*exp(2*x) - 3, optims_c99) 3*expm1(2*x) >>> optimize(exp(2*x) - 3, optims_c99) exp(2*x) - 3 >>> optimize(log(3*x + 3), optims_c99) log1p(x) + log(3) >>> optimize(log(2*x + 3), optims_c99) log(2*x + 3) The ``optims_c99`` imported above is tuple containing the following instances (which may be imported from ``sympy.codegen.rewriting``): - ``expm1_opt`` - ``log1p_opt`` - ``exp2_opt`` - ``log2_opt`` - ``log2const_opt`` """ from itertools import chain from sympy import cos, exp, log, Max, Min, Wild, expand_log, Dummy, sin, sinc from sympy.assumptions import Q, ask from sympy.codegen.cfunctions import log1p, log2, exp2, expm1 from sympy.codegen.matrix_nodes import MatrixSolve from sympy.core.expr import UnevaluatedExpr from sympy.core.power import Pow from sympy.codegen.numpy_nodes import logaddexp, logaddexp2 from sympy.codegen.scipy_nodes import cosm1 from sympy.core.mul import Mul from sympy.matrices.expressions.matexpr import MatrixSymbol from sympy.utilities.iterables import sift class Optimization: """ Abstract base class for rewriting optimization. Subclasses should implement ``__call__`` taking an expression as argument. Parameters ========== cost_function : callable returning number priority : number """ def __init__(self, cost_function=None, priority=1): self.cost_function = cost_function self.priority=priority class ReplaceOptim(Optimization): """ Rewriting optimization calling replace on expressions. Explanation =========== The instance can be used as a function on expressions for which it will apply the ``replace`` method (see :meth:`sympy.core.basic.Basic.replace`). Parameters ========== query : First argument passed to replace. value : Second argument passed to replace. Examples ======== >>> from sympy import Symbol >>> from sympy.codegen.rewriting import ReplaceOptim >>> from sympy.codegen.cfunctions import exp2 >>> x = Symbol('x') >>> exp2_opt = ReplaceOptim(lambda p: p.is_Pow and p.base == 2, ... lambda p: exp2(p.exp)) >>> exp2_opt(2**x) exp2(x) """ def __init__(self, query, value, **kwargs): super().__init__(**kwargs) self.query = query self.value = value def __call__(self, expr): return expr.replace(self.query, self.value) def optimize(expr, optimizations): """ Apply optimizations to an expression. Parameters ========== expr : expression optimizations : iterable of ``Optimization`` instances The optimizations will be sorted with respect to ``priority`` (highest first). Examples ======== >>> from sympy import log, Symbol >>> from sympy.codegen.rewriting import optims_c99, optimize >>> x = Symbol('x') >>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99) log1p(x**2) + log2(x + 3) """ for optim in sorted(optimizations, key=lambda opt: opt.priority, reverse=True): new_expr = optim(expr) if optim.cost_function is None: expr = new_expr else: before, after = map(lambda x: optim.cost_function(x), (expr, new_expr)) if before > after: expr = new_expr return expr exp2_opt = ReplaceOptim( lambda p: p.is_Pow and p.base == 2, lambda p: exp2(p.exp) ) _d = Wild('d', properties=[lambda x: x.is_Dummy]) _u = Wild('u', properties=[lambda x: not x.is_number and not x.is_Add]) _v = Wild('v') _w = Wild('w') _n = Wild('n', properties=[lambda x: x.is_number]) sinc_opt1 = ReplaceOptim( sin(_w)/_w, sinc(_w) ) sinc_opt2 = ReplaceOptim( sin(_n*_w)/_w, _n*sinc(_n*_w) ) sinc_opts = (sinc_opt1, sinc_opt2) log2_opt = ReplaceOptim(_v*log(_w)/log(2), _v*log2(_w), cost_function=lambda expr: expr.count( lambda e: ( # division & eval of transcendentals are expensive floating point operations... e.is_Pow and e.exp.is_negative # division or (isinstance(e, (log, log2)) and not e.args[0].is_number)) # transcendental ) ) log2const_opt = ReplaceOptim(log(2)*log2(_w), log(_w)) logsumexp_2terms_opt = ReplaceOptim( lambda l: (isinstance(l, log) and l.args[0].is_Add and len(l.args[0].args) == 2 and all(isinstance(t, exp) for t in l.args[0].args)), lambda l: ( Max(*[e.args[0] for e in l.args[0].args]) + log1p(exp(Min(*[e.args[0] for e in l.args[0].args]))) ) ) class _FuncMinusOne: def __init__(self, func, func_m_1): self.func = func self.func_m_1 = func_m_1 def _try_func_m_1(self, expr): protected, old_new = expr.replace(self.func, lambda arg: Dummy(), map=True) factored = protected.factor() new_old = {v: k for k, v in old_new.items()} return factored.replace(_d - 1, lambda d: self.func_m_1(new_old[d].args[0])).xreplace(new_old) def __call__(self, e): numbers, non_num = sift(e.args, lambda arg: arg.is_number, binary=True) non_num_func, non_num_other = sift(non_num, lambda arg: arg.has(self.func), binary=True) numsum = sum(numbers) new_func_terms, done = [], False for func_term in non_num_func: if done: new_func_terms.append(func_term) else: looking_at = func_term + numsum attempt = self._try_func_m_1(looking_at) if looking_at == attempt: new_func_terms.append(func_term) else: done = True new_func_terms.append(attempt) if not done: new_func_terms.append(numsum) return e.func(*chain(new_func_terms, non_num_other)) expm1_opt = ReplaceOptim(lambda e: e.is_Add, _FuncMinusOne(exp, expm1)) cosm1_opt = ReplaceOptim(lambda e: e.is_Add, _FuncMinusOne(cos, cosm1)) log1p_opt = ReplaceOptim( lambda e: isinstance(e, log), lambda l: expand_log(l.replace( log, lambda arg: log(arg.factor()) )).replace(log(_u+1), log1p(_u)) ) def create_expand_pow_optimization(limit): """ Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``. Explanation =========== The requirements for expansions are that the base needs to be a symbol and the exponent needs to be an Integer (and be less than or equal to ``limit``). Parameters ========== limit : int The highest power which is expanded into multiplication. Examples ======== >>> from sympy import Symbol, sin >>> from sympy.codegen.rewriting import create_expand_pow_optimization >>> x = Symbol('x') >>> expand_opt = create_expand_pow_optimization(3) >>> expand_opt(x**5 + x**3) x**5 + x*x*x >>> expand_opt(x**5 + x**3 + sin(x)**3) x**5 + sin(x)**3 + x*x*x """ return ReplaceOptim( lambda e: e.is_Pow and e.base.is_symbol and e.exp.is_Integer and abs(e.exp) <= limit, lambda p: ( UnevaluatedExpr(Mul(*([p.base]*+p.exp), evaluate=False)) if p.exp > 0 else 1/UnevaluatedExpr(Mul(*([p.base]*-p.exp), evaluate=False)) )) # Optimization procedures for turning A**(-1) * x into MatrixSolve(A, x) def _matinv_predicate(expr): # TODO: We should be able to support more than 2 elements if expr.is_MatMul and len(expr.args) == 2: left, right = expr.args if left.is_Inverse and right.shape[1] == 1: inv_arg = left.arg if isinstance(inv_arg, MatrixSymbol): return bool(ask(Q.fullrank(left.arg))) return False def _matinv_transform(expr): left, right = expr.args inv_arg = left.arg return MatrixSolve(inv_arg, right) matinv_opt = ReplaceOptim(_matinv_predicate, _matinv_transform) logaddexp_opt = ReplaceOptim(log(exp(_v)+exp(_w)), logaddexp(_v, _w)) logaddexp2_opt = ReplaceOptim(log(Pow(2, _v)+Pow(2, _w)), logaddexp2(_v, _w)*log(2)) # Collections of optimizations: optims_c99 = (expm1_opt, log1p_opt, exp2_opt, log2_opt, log2const_opt) optims_numpy = optims_c99 + (logaddexp_opt, logaddexp2_opt,) + sinc_opts optims_scipy = (cosm1_opt,)
ab283f48ec13e73775266d719ab82476953969ecef5dcf4d942eee05d4b00937
""" Types used to represent a full function/module as an Abstract Syntax Tree. Most types are small, and are merely used as tokens in the AST. A tree diagram has been included below to illustrate the relationships between the AST types. AST Type Tree ------------- :: *Basic* |--->AssignmentBase | |--->Assignment | |--->AugmentedAssignment | |--->AddAugmentedAssignment | |--->SubAugmentedAssignment | |--->MulAugmentedAssignment | |--->DivAugmentedAssignment | |--->ModAugmentedAssignment | |--->CodeBlock | | |--->Token | |--->Attribute | |--->For | |--->String | | |--->QuotedString | | |--->Comment | |--->Type | | |--->IntBaseType | | | |--->_SizedIntType | | | |--->SignedIntType | | | |--->UnsignedIntType | | |--->FloatBaseType | | |--->FloatType | | |--->ComplexBaseType | | |--->ComplexType | |--->Node | | |--->Variable | | | |---> Pointer | | |--->FunctionPrototype | | |--->FunctionDefinition | |--->Element | |--->Declaration | |--->While | |--->Scope | |--->Stream | |--->Print | |--->FunctionCall | |--->BreakToken | |--->ContinueToken | |--->NoneToken | |--->Statement |--->Return Predefined types ---------------- A number of ``Type`` instances are provided in the ``sympy.codegen.ast`` module for convenience. Perhaps the two most common ones for code-generation (of numeric codes) are ``float32`` and ``float64`` (known as single and double precision respectively). There are also precision generic versions of Types (for which the codeprinters selects the underlying data type at time of printing): ``real``, ``integer``, ``complex_``, ``bool_``. The other ``Type`` instances defined are: - ``intc``: Integer type used by C's "int". - ``intp``: Integer type used by C's "unsigned". - ``int8``, ``int16``, ``int32``, ``int64``: n-bit integers. - ``uint8``, ``uint16``, ``uint32``, ``uint64``: n-bit unsigned integers. - ``float80``: known as "extended precision" on modern x86/amd64 hardware. - ``complex64``: Complex number represented by two ``float32`` numbers - ``complex128``: Complex number represented by two ``float64`` numbers Using the nodes --------------- It is possible to construct simple algorithms using the AST nodes. Let's construct a loop applying Newton's method:: >>> from sympy import symbols, cos >>> from sympy.codegen.ast import While, Assignment, aug_assign, Print >>> t, dx, x = symbols('tol delta val') >>> expr = cos(x) - x**3 >>> whl = While(abs(dx) > t, [ ... Assignment(dx, -expr/expr.diff(x)), ... aug_assign(x, '+', dx), ... Print([x]) ... ]) >>> from sympy.printing import pycode >>> py_str = pycode(whl) >>> print(py_str) while (abs(delta) > tol): delta = (val**3 - math.cos(val))/(-3*val**2 - math.sin(val)) val += delta print(val) >>> import math >>> tol, val, delta = 1e-5, 0.5, float('inf') >>> exec(py_str) 1.1121416371 0.909672693737 0.867263818209 0.865477135298 0.865474033111 >>> print('%3.1g' % (math.cos(val) - val**3)) -3e-11 If we want to generate Fortran code for the same while loop we simple call ``fcode``:: >>> from sympy.printing import fcode >>> print(fcode(whl, standard=2003, source_format='free')) do while (abs(delta) > tol) delta = (val**3 - cos(val))/(-3*val**2 - sin(val)) val = val + delta print *, val end do There is a function constructing a loop (or a complete function) like this in :mod:`sympy.codegen.algorithms`. """ from typing import Any, Dict, List from collections import defaultdict from sympy import Lt, Le, Ge, Gt from sympy.core import Symbol, Tuple, Dummy from sympy.core.basic import Basic from sympy.core.expr import Expr from sympy.core.numbers import Float, Integer, oo from sympy.core.sympify import _sympify, sympify, SympifyError from sympy.utilities.iterables import iterable def _mk_Tuple(args): """ Create a Sympy Tuple object from an iterable, converting Python strings to AST strings. Parameters ========== args: iterable Arguments to :class:`sympy.Tuple`. Returns ======= sympy.Tuple """ args = [String(arg) if isinstance(arg, str) else arg for arg in args] return Tuple(*args) class Token(Basic): """ Base class for the AST types. Explanation =========== Defining fields are set in ``__slots__``. Attributes (defined in __slots__) are only allowed to contain instances of Basic (unless atomic, see ``String``). The arguments to ``__new__()`` correspond to the attributes in the order defined in ``__slots__`. The ``defaults`` class attribute is a dictionary mapping attribute names to their default values. Subclasses should not need to override the ``__new__()`` method. They may define a class or static method named ``_construct_<attr>`` for each attribute to process the value passed to ``__new__()``. Attributes listed in the class attribute ``not_in_args`` are not passed to :class:`~.Basic`. """ __slots__ = () defaults = {} # type: Dict[str, Any] not_in_args = [] # type: List[str] indented_args = ['body'] @property def is_Atom(self): return len(self.__slots__) == 0 @classmethod def _get_constructor(cls, attr): """ Get the constructor function for an attribute by name. """ return getattr(cls, '_construct_%s' % attr, lambda x: x) @classmethod def _construct(cls, attr, arg): """ Construct an attribute value from argument passed to ``__new__()``. """ # arg may be ``NoneToken()``, so comparation is done using == instead of ``is`` operator if arg == None: return cls.defaults.get(attr, none) else: if isinstance(arg, Dummy): # sympy's replace uses Dummy instances return arg else: return cls._get_constructor(attr)(arg) def __new__(cls, *args, **kwargs): # Pass through existing instances when given as sole argument if len(args) == 1 and not kwargs and isinstance(args[0], cls): return args[0] if len(args) > len(cls.__slots__): raise ValueError("Too many arguments (%d), expected at most %d" % (len(args), len(cls.__slots__))) attrvals = [] # Process positional arguments for attrname, argval in zip(cls.__slots__, args): if attrname in kwargs: raise TypeError('Got multiple values for attribute %r' % attrname) attrvals.append(cls._construct(attrname, argval)) # Process keyword arguments for attrname in cls.__slots__[len(args):]: if attrname in kwargs: argval = kwargs.pop(attrname) elif attrname in cls.defaults: argval = cls.defaults[attrname] else: raise TypeError('No value for %r given and attribute has no default' % attrname) attrvals.append(cls._construct(attrname, argval)) if kwargs: raise ValueError("Unknown keyword arguments: %s" % ' '.join(kwargs)) # Parent constructor basic_args = [ val for attr, val in zip(cls.__slots__, attrvals) if attr not in cls.not_in_args ] obj = Basic.__new__(cls, *basic_args) # Set attributes for attr, arg in zip(cls.__slots__, attrvals): setattr(obj, attr, arg) return obj def __eq__(self, other): if not isinstance(other, self.__class__): return False for attr in self.__slots__: if getattr(self, attr) != getattr(other, attr): return False return True def _hashable_content(self): return tuple([getattr(self, attr) for attr in self.__slots__]) def __hash__(self): return super().__hash__() def _joiner(self, k, indent_level): return (',\n' + ' '*indent_level) if k in self.indented_args else ', ' def _indented(self, printer, k, v, *args, **kwargs): il = printer._context['indent_level'] def _print(arg): if isinstance(arg, Token): return printer._print(arg, *args, joiner=self._joiner(k, il), **kwargs) else: return printer._print(arg, *args, **kwargs) if isinstance(v, Tuple): joined = self._joiner(k, il).join([_print(arg) for arg in v.args]) if k in self.indented_args: return '(\n' + ' '*il + joined + ',\n' + ' '*(il - 4) + ')' else: return ('({0},)' if len(v.args) == 1 else '({0})').format(joined) else: return _print(v) def _sympyrepr(self, printer, *args, joiner=', ', **kwargs): from sympy.printing.printer import printer_context exclude = kwargs.get('exclude', ()) values = [getattr(self, k) for k in self.__slots__] indent_level = printer._context.get('indent_level', 0) arg_reprs = [] for i, (attr, value) in enumerate(zip(self.__slots__, values)): if attr in exclude: continue # Skip attributes which have the default value if attr in self.defaults and value == self.defaults[attr]: continue ilvl = indent_level + 4 if attr in self.indented_args else 0 with printer_context(printer, indent_level=ilvl): indented = self._indented(printer, attr, value, *args, **kwargs) arg_reprs.append(('{1}' if i == 0 else '{0}={1}').format(attr, indented.lstrip())) return "{}({})".format(self.__class__.__name__, joiner.join(arg_reprs)) _sympystr = _sympyrepr def __repr__(self): # sympy.core.Basic.__repr__ uses sstr from sympy.printing import srepr return srepr(self) def kwargs(self, exclude=(), apply=None): """ Get instance's attributes as dict of keyword arguments. Parameters ========== exclude : collection of str Collection of keywords to exclude. apply : callable, optional Function to apply to all values. """ kwargs = {k: getattr(self, k) for k in self.__slots__ if k not in exclude} if apply is not None: return {k: apply(v) for k, v in kwargs.items()} else: return kwargs class BreakToken(Token): """ Represents 'break' in C/Python ('exit' in Fortran). Use the premade instance ``break_`` or instantiate manually. Examples ======== >>> from sympy.printing import ccode, fcode >>> from sympy.codegen.ast import break_ >>> ccode(break_) 'break' >>> fcode(break_, source_format='free') 'exit' """ break_ = BreakToken() class ContinueToken(Token): """ Represents 'continue' in C/Python ('cycle' in Fortran) Use the premade instance ``continue_`` or instantiate manually. Examples ======== >>> from sympy.printing import ccode, fcode >>> from sympy.codegen.ast import continue_ >>> ccode(continue_) 'continue' >>> fcode(continue_, source_format='free') 'cycle' """ continue_ = ContinueToken() class NoneToken(Token): """ The AST equivalence of Python's NoneType The corresponding instance of Python's ``None`` is ``none``. Examples ======== >>> from sympy.codegen.ast import none, Variable >>> from sympy.printing.pycode import pycode >>> print(pycode(Variable('x').as_Declaration(value=none))) x = None """ def __eq__(self, other): return other is None or isinstance(other, NoneToken) def _hashable_content(self): return () def __hash__(self): return super().__hash__() none = NoneToken() class AssignmentBase(Basic): """ Abstract base class for Assignment and AugmentedAssignment. Attributes: =========== op : str Symbol for assignment operator, e.g. "=", "+=", etc. """ def __new__(cls, lhs, rhs): lhs = _sympify(lhs) rhs = _sympify(rhs) cls._check_args(lhs, rhs) return super().__new__(cls, lhs, rhs) @property def lhs(self): return self.args[0] @property def rhs(self): return self.args[1] @classmethod def _check_args(cls, lhs, rhs): """ Check arguments to __new__ and raise exception if any problems found. Derived classes may wish to override this. """ from sympy.matrices.expressions.matexpr import ( MatrixElement, MatrixSymbol) from sympy.tensor.indexed import Indexed # Tuple of things that can be on the lhs of an assignment assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed, Element, Variable) if not isinstance(lhs, assignable): raise TypeError("Cannot assign to lhs of type %s." % type(lhs)) # Indexed types implement shape, but don't define it until later. This # causes issues in assignment validation. For now, matrices are defined # as anything with a shape that is not an Indexed lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed) rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed) # If lhs and rhs have same structure, then this assignment is ok if lhs_is_mat: if not rhs_is_mat: raise ValueError("Cannot assign a scalar to a matrix.") elif lhs.shape != rhs.shape: raise ValueError("Dimensions of lhs and rhs don't align.") elif rhs_is_mat and not lhs_is_mat: raise ValueError("Cannot assign a matrix to a scalar.") class Assignment(AssignmentBase): """ Represents variable assignment for code generation. Parameters ========== lhs : Expr Sympy object representing the lhs of the expression. These should be singular objects, such as one would use in writing code. Notable types include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that subclass these types are also supported. rhs : Expr Sympy object representing the rhs of the expression. This can be any type, provided its shape corresponds to that of the lhs. For example, a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as the dimensions will not align. Examples ======== >>> from sympy import symbols, MatrixSymbol, Matrix >>> from sympy.codegen.ast import Assignment >>> x, y, z = symbols('x, y, z') >>> Assignment(x, y) Assignment(x, y) >>> Assignment(x, 0) Assignment(x, 0) >>> A = MatrixSymbol('A', 1, 3) >>> mat = Matrix([x, y, z]).T >>> Assignment(A, mat) Assignment(A, Matrix([[x, y, z]])) >>> Assignment(A[0, 1], x) Assignment(A[0, 1], x) """ op = ':=' class AugmentedAssignment(AssignmentBase): """ Base class for augmented assignments. Attributes: =========== binop : str Symbol for binary operation being applied in the assignment, such as "+", "*", etc. """ binop = None # type: str @property def op(self): return self.binop + '=' class AddAugmentedAssignment(AugmentedAssignment): binop = '+' class SubAugmentedAssignment(AugmentedAssignment): binop = '-' class MulAugmentedAssignment(AugmentedAssignment): binop = '*' class DivAugmentedAssignment(AugmentedAssignment): binop = '/' class ModAugmentedAssignment(AugmentedAssignment): binop = '%' # Mapping from binary op strings to AugmentedAssignment subclasses augassign_classes = { cls.binop: cls for cls in [ AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment, DivAugmentedAssignment, ModAugmentedAssignment ] } def aug_assign(lhs, op, rhs): """ Create 'lhs op= rhs'. Explanation =========== Represents augmented variable assignment for code generation. This is a convenience function. You can also use the AugmentedAssignment classes directly, like AddAugmentedAssignment(x, y). Parameters ========== lhs : Expr Sympy object representing the lhs of the expression. These should be singular objects, such as one would use in writing code. Notable types include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that subclass these types are also supported. op : str Operator (+, -, /, \\*, %). rhs : Expr Sympy object representing the rhs of the expression. This can be any type, provided its shape corresponds to that of the lhs. For example, a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as the dimensions will not align. Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import aug_assign >>> x, y = symbols('x, y') >>> aug_assign(x, '+', y) AddAugmentedAssignment(x, y) """ if op not in augassign_classes: raise ValueError("Unrecognized operator %s" % op) return augassign_classes[op](lhs, rhs) class CodeBlock(Basic): """ Represents a block of code. Explanation =========== For now only assignments are supported. This restriction will be lifted in the future. Useful attributes on this object are: ``left_hand_sides``: Tuple of left-hand sides of assignments, in order. ``left_hand_sides``: Tuple of right-hand sides of assignments, in order. ``free_symbols``: Free symbols of the expressions in the right-hand sides which do not appear in the left-hand side of an assignment. Useful methods on this object are: ``topological_sort``: Class method. Return a CodeBlock with assignments sorted so that variables are assigned before they are used. ``cse``: Return a new CodeBlock with common subexpressions eliminated and pulled out as assignments. Examples ======== >>> from sympy import symbols, ccode >>> from sympy.codegen.ast import CodeBlock, Assignment >>> x, y = symbols('x y') >>> c = CodeBlock(Assignment(x, 1), Assignment(y, x + 1)) >>> print(ccode(c)) x = 1; y = x + 1; """ def __new__(cls, *args): left_hand_sides = [] right_hand_sides = [] for i in args: if isinstance(i, Assignment): lhs, rhs = i.args left_hand_sides.append(lhs) right_hand_sides.append(rhs) obj = Basic.__new__(cls, *args) obj.left_hand_sides = Tuple(*left_hand_sides) obj.right_hand_sides = Tuple(*right_hand_sides) return obj def __iter__(self): return iter(self.args) def _sympyrepr(self, printer, *args, **kwargs): il = printer._context.get('indent_level', 0) joiner = ',\n' + ' '*il joined = joiner.join(map(printer._print, self.args)) return ('{}(\n'.format(' '*(il-4) + self.__class__.__name__,) + ' '*il + joined + '\n' + ' '*(il - 4) + ')') _sympystr = _sympyrepr @property def free_symbols(self): return super().free_symbols - set(self.left_hand_sides) @classmethod def topological_sort(cls, assignments): """ Return a CodeBlock with topologically sorted assignments so that variables are assigned before they are used. Examples ======== The existing order of assignments is preserved as much as possible. This function assumes that variables are assigned to only once. This is a class constructor so that the default constructor for CodeBlock can error when variables are used before they are assigned. Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import CodeBlock, Assignment >>> x, y, z = symbols('x y z') >>> assignments = [ ... Assignment(x, y + z), ... Assignment(y, z + 1), ... Assignment(z, 2), ... ] >>> CodeBlock.topological_sort(assignments) CodeBlock( Assignment(z, 2), Assignment(y, z + 1), Assignment(x, y + z) ) """ from sympy.utilities.iterables import topological_sort if not all(isinstance(i, Assignment) for i in assignments): # Will support more things later raise NotImplementedError("CodeBlock.topological_sort only supports Assignments") if any(isinstance(i, AugmentedAssignment) for i in assignments): raise NotImplementedError("CodeBlock.topological_sort doesn't yet work with AugmentedAssignments") # Create a graph where the nodes are assignments and there is a directed edge # between nodes that use a variable and nodes that assign that # variable, like # [(x := 1, y := x + 1), (x := 1, z := y + z), (y := x + 1, z := y + z)] # If we then topologically sort these nodes, they will be in # assignment order, like # x := 1 # y := x + 1 # z := y + z # A = The nodes # # enumerate keeps nodes in the same order they are already in if # possible. It will also allow us to handle duplicate assignments to # the same variable when those are implemented. A = list(enumerate(assignments)) # var_map = {variable: [nodes for which this variable is assigned to]} # like {x: [(1, x := y + z), (4, x := 2 * w)], ...} var_map = defaultdict(list) for node in A: i, a = node var_map[a.lhs].append(node) # E = Edges in the graph E = [] for dst_node in A: i, a = dst_node for s in a.rhs.free_symbols: for src_node in var_map[s]: E.append((src_node, dst_node)) ordered_assignments = topological_sort([A, E]) # De-enumerate the result return cls(*[a for i, a in ordered_assignments]) def cse(self, symbols=None, optimizations=None, postprocess=None, order='canonical'): """ Return a new code block with common subexpressions eliminated. Explanation =========== See the docstring of :func:`sympy.simplify.cse_main.cse` for more information. Examples ======== >>> from sympy import symbols, sin >>> from sympy.codegen.ast import CodeBlock, Assignment >>> x, y, z = symbols('x y z') >>> c = CodeBlock( ... Assignment(x, 1), ... Assignment(y, sin(x) + 1), ... Assignment(z, sin(x) - 1), ... ) ... >>> c.cse() CodeBlock( Assignment(x, 1), Assignment(x0, sin(x)), Assignment(y, x0 + 1), Assignment(z, x0 - 1) ) """ from sympy.simplify.cse_main import cse from sympy.utilities.iterables import numbered_symbols, filter_symbols # Check that the CodeBlock only contains assignments to unique variables if not all(isinstance(i, Assignment) for i in self.args): # Will support more things later raise NotImplementedError("CodeBlock.cse only supports Assignments") if any(isinstance(i, AugmentedAssignment) for i in self.args): raise NotImplementedError("CodeBlock.cse doesn't yet work with AugmentedAssignments") for i, lhs in enumerate(self.left_hand_sides): if lhs in self.left_hand_sides[:i]: raise NotImplementedError("Duplicate assignments to the same " "variable are not yet supported (%s)" % lhs) # Ensure new symbols for subexpressions do not conflict with existing existing_symbols = self.atoms(Symbol) if symbols is None: symbols = numbered_symbols() symbols = filter_symbols(symbols, existing_symbols) replacements, reduced_exprs = cse(list(self.right_hand_sides), symbols=symbols, optimizations=optimizations, postprocess=postprocess, order=order) new_block = [Assignment(var, expr) for var, expr in zip(self.left_hand_sides, reduced_exprs)] new_assignments = [Assignment(var, expr) for var, expr in replacements] return self.topological_sort(new_assignments + new_block) class For(Token): """Represents a 'for-loop' in the code. Expressions are of the form: "for target in iter: body..." Parameters ========== target : symbol iter : iterable body : CodeBlock or iterable ! When passed an iterable it is used to instantiate a CodeBlock. Examples ======== >>> from sympy import symbols, Range >>> from sympy.codegen.ast import aug_assign, For >>> x, i, j, k = symbols('x i j k') >>> for_i = For(i, Range(10), [aug_assign(x, '+', i*j*k)]) >>> for_i # doctest: -NORMALIZE_WHITESPACE For(i, iterable=Range(0, 10, 1), body=CodeBlock( AddAugmentedAssignment(x, i*j*k) )) >>> for_ji = For(j, Range(7), [for_i]) >>> for_ji # doctest: -NORMALIZE_WHITESPACE For(j, iterable=Range(0, 7, 1), body=CodeBlock( For(i, iterable=Range(0, 10, 1), body=CodeBlock( AddAugmentedAssignment(x, i*j*k) )) )) >>> for_kji =For(k, Range(5), [for_ji]) >>> for_kji # doctest: -NORMALIZE_WHITESPACE For(k, iterable=Range(0, 5, 1), body=CodeBlock( For(j, iterable=Range(0, 7, 1), body=CodeBlock( For(i, iterable=Range(0, 10, 1), body=CodeBlock( AddAugmentedAssignment(x, i*j*k) )) )) )) """ __slots__ = ('target', 'iterable', 'body') _construct_target = staticmethod(_sympify) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) @classmethod def _construct_iterable(cls, itr): if not iterable(itr): raise TypeError("iterable must be an iterable") if isinstance(itr, list): # _sympify errors on lists because they are mutable itr = tuple(itr) return _sympify(itr) class String(Token): """ SymPy object representing a string. Atomic object which is not an expression (as opposed to Symbol). Parameters ========== text : str Examples ======== >>> from sympy.codegen.ast import String >>> f = String('foo') >>> f foo >>> str(f) 'foo' >>> f.text 'foo' >>> print(repr(f)) String('foo') """ __slots__ = ('text',) not_in_args = ['text'] is_Atom = True @classmethod def _construct_text(cls, text): if not isinstance(text, str): raise TypeError("Argument text is not a string type.") return text def _sympystr(self, printer, *args, **kwargs): return self.text class QuotedString(String): """ Represents a string which should be printed with quotes. """ class Comment(String): """ Represents a comment. """ class Node(Token): """ Subclass of Token, carrying the attribute 'attrs' (Tuple) Examples ======== >>> from sympy.codegen.ast import Node, value_const, pointer_const >>> n1 = Node([value_const]) >>> n1.attr_params('value_const') # get the parameters of attribute (by name) () >>> from sympy.codegen.fnodes import dimension >>> n2 = Node([value_const, dimension(5, 3)]) >>> n2.attr_params(value_const) # get the parameters of attribute (by Attribute instance) () >>> n2.attr_params('dimension') # get the parameters of attribute (by name) (5, 3) >>> n2.attr_params(pointer_const) is None True """ __slots__ = ('attrs',) defaults = {'attrs': Tuple()} # type: Dict[str, Any] _construct_attrs = staticmethod(_mk_Tuple) def attr_params(self, looking_for): """ Returns the parameters of the Attribute with name ``looking_for`` in self.attrs """ for attr in self.attrs: if str(attr.name) == str(looking_for): return attr.parameters class Type(Token): """ Represents a type. Explanation =========== The naming is a super-set of NumPy naming. Type has a classmethod ``from_expr`` which offer type deduction. It also has a method ``cast_check`` which casts the argument to its type, possibly raising an exception if rounding error is not within tolerances, or if the value is not representable by the underlying data type (e.g. unsigned integers). Parameters ========== name : str Name of the type, e.g. ``object``, ``int16``, ``float16`` (where the latter two would use the ``Type`` sub-classes ``IntType`` and ``FloatType`` respectively). If a ``Type`` instance is given, the said instance is returned. Examples ======== >>> from sympy.codegen.ast import Type >>> t = Type.from_expr(42) >>> t integer >>> print(repr(t)) IntBaseType(String('integer')) >>> from sympy.codegen.ast import uint8 >>> uint8.cast_check(-1) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Minimum value for data type bigger than new value. >>> from sympy.codegen.ast import float32 >>> v6 = 0.123456 >>> float32.cast_check(v6) 0.123456 >>> v10 = 12345.67894 >>> float32.cast_check(v10) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Casting gives a significantly different value. >>> boost_mp50 = Type('boost::multiprecision::cpp_dec_float_50') >>> from sympy.printing import cxxcode >>> from sympy.codegen.ast import Declaration, Variable >>> cxxcode(Declaration(Variable('x', type=boost_mp50))) 'boost::multiprecision::cpp_dec_float_50 x' References ========== .. [1] https://docs.scipy.org/doc/numpy/user/basics.types.html """ __slots__ = ('name',) _construct_name = String def _sympystr(self, printer, *args, **kwargs): return str(self.name) @classmethod def from_expr(cls, expr): """ Deduces type from an expression or a ``Symbol``. Parameters ========== expr : number or SymPy object The type will be deduced from type or properties. Examples ======== >>> from sympy.codegen.ast import Type, integer, complex_ >>> Type.from_expr(2) == integer True >>> from sympy import Symbol >>> Type.from_expr(Symbol('z', complex=True)) == complex_ True >>> Type.from_expr(sum) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Could not deduce type from expr. Raises ====== ValueError when type deduction fails. """ if isinstance(expr, (float, Float)): return real if isinstance(expr, (int, Integer)) or getattr(expr, 'is_integer', False): return integer if getattr(expr, 'is_real', False): return real if isinstance(expr, complex) or getattr(expr, 'is_complex', False): return complex_ if isinstance(expr, bool) or getattr(expr, 'is_Relational', False): return bool_ else: raise ValueError("Could not deduce type from expr.") def _check(self, value): pass def cast_check(self, value, rtol=None, atol=0, limits=None, precision_targets=None): """ Casts a value to the data type of the instance. Parameters ========== value : number rtol : floating point number Relative tolerance. (will be deduced if not given). atol : floating point number Absolute tolerance (in addition to ``rtol``). limits : dict Values given by ``limits.h``, x86/IEEE754 defaults if not given. type_aliases : dict Maps substitutions for Type, e.g. {integer: int64, real: float32} Examples ======== >>> from sympy.codegen.ast import integer, float32, int8 >>> integer.cast_check(3.0) == 3 True >>> float32.cast_check(1e-40) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Minimum value for data type bigger than new value. >>> int8.cast_check(256) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Maximum value for data type smaller than new value. >>> v10 = 12345.67894 >>> float32.cast_check(v10) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Casting gives a significantly different value. >>> from sympy.codegen.ast import float64 >>> float64.cast_check(v10) 12345.67894 >>> from sympy import Float >>> v18 = Float('0.123456789012345646') >>> float64.cast_check(v18) Traceback (most recent call last): ... ValueError: Casting gives a significantly different value. >>> from sympy.codegen.ast import float80 >>> float80.cast_check(v18) 0.123456789012345649 """ val = sympify(value) ten = Integer(10) exp10 = getattr(self, 'decimal_dig', None) if rtol is None: rtol = 1e-15 if exp10 is None else 2.0*ten**(-exp10) def tol(num): return atol + rtol*abs(num) new_val = self.cast_nocheck(value) self._check(new_val) delta = new_val - val if abs(delta) > tol(val): # rounding, e.g. int(3.5) != 3.5 raise ValueError("Casting gives a significantly different value.") return new_val class IntBaseType(Type): """ Integer base type, contains no size information. """ __slots__ = ('name',) cast_nocheck = lambda self, i: Integer(int(i)) class _SizedIntType(IntBaseType): __slots__ = ('name', 'nbits',) _construct_nbits = Integer def _check(self, value): if value < self.min: raise ValueError("Value is too small: %d < %d" % (value, self.min)) if value > self.max: raise ValueError("Value is too big: %d > %d" % (value, self.max)) class SignedIntType(_SizedIntType): """ Represents a signed integer type. """ @property def min(self): return -2**(self.nbits-1) @property def max(self): return 2**(self.nbits-1) - 1 class UnsignedIntType(_SizedIntType): """ Represents an unsigned integer type. """ @property def min(self): return 0 @property def max(self): return 2**self.nbits - 1 two = Integer(2) class FloatBaseType(Type): """ Represents a floating point number type. """ cast_nocheck = Float class FloatType(FloatBaseType): """ Represents a floating point type with fixed bit width. Base 2 & one sign bit is assumed. Parameters ========== name : str Name of the type. nbits : integer Number of bits used (storage). nmant : integer Number of bits used to represent the mantissa. nexp : integer Number of bits used to represent the mantissa. Examples ======== >>> from sympy import S >>> from sympy.codegen.ast import FloatType >>> half_precision = FloatType('f16', nbits=16, nmant=10, nexp=5) >>> half_precision.max 65504 >>> half_precision.tiny == S(2)**-14 True >>> half_precision.eps == S(2)**-10 True >>> half_precision.dig == 3 True >>> half_precision.decimal_dig == 5 True >>> half_precision.cast_check(1.0) 1.0 >>> half_precision.cast_check(1e5) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Maximum value for data type smaller than new value. """ __slots__ = ('name', 'nbits', 'nmant', 'nexp',) _construct_nbits = _construct_nmant = _construct_nexp = Integer @property def max_exponent(self): """ The largest positive number n, such that 2**(n - 1) is a representable finite value. """ # cf. C++'s ``std::numeric_limits::max_exponent`` return two**(self.nexp - 1) @property def min_exponent(self): """ The lowest negative number n, such that 2**(n - 1) is a valid normalized number. """ # cf. C++'s ``std::numeric_limits::min_exponent`` return 3 - self.max_exponent @property def max(self): """ Maximum value representable. """ return (1 - two**-(self.nmant+1))*two**self.max_exponent @property def tiny(self): """ The minimum positive normalized value. """ # See C macros: FLT_MIN, DBL_MIN, LDBL_MIN # or C++'s ``std::numeric_limits::min`` # or numpy.finfo(dtype).tiny return two**(self.min_exponent - 1) @property def eps(self): """ Difference between 1.0 and the next representable value. """ return two**(-self.nmant) @property def dig(self): """ Number of decimal digits that are guaranteed to be preserved in text. When converting text -> float -> text, you are guaranteed that at least ``dig`` number of digits are preserved with respect to rounding or overflow. """ from sympy.functions import floor, log return floor(self.nmant * log(2)/log(10)) @property def decimal_dig(self): """ Number of digits needed to store & load without loss. Explanation =========== Number of decimal digits needed to guarantee that two consecutive conversions (float -> text -> float) to be idempotent. This is useful when one do not want to loose precision due to rounding errors when storing a floating point value as text. """ from sympy.functions import ceiling, log return ceiling((self.nmant + 1) * log(2)/log(10) + 1) def cast_nocheck(self, value): """ Casts without checking if out of bounds or subnormal. """ if value == oo: # float(oo) or oo return float(oo) elif value == -oo: # float(-oo) or -oo return float(-oo) return Float(str(sympify(value).evalf(self.decimal_dig)), self.decimal_dig) def _check(self, value): if value < -self.max: raise ValueError("Value is too small: %d < %d" % (value, -self.max)) if value > self.max: raise ValueError("Value is too big: %d > %d" % (value, self.max)) if abs(value) < self.tiny: raise ValueError("Smallest (absolute) value for data type bigger than new value.") class ComplexBaseType(FloatBaseType): def cast_nocheck(self, value): """ Casts without checking if out of bounds or subnormal. """ from sympy.functions import re, im return ( super().cast_nocheck(re(value)) + super().cast_nocheck(im(value))*1j ) def _check(self, value): from sympy.functions import re, im super()._check(re(value)) super()._check(im(value)) class ComplexType(ComplexBaseType, FloatType): """ Represents a complex floating point number. """ # NumPy types: intc = IntBaseType('intc') intp = IntBaseType('intp') int8 = SignedIntType('int8', 8) int16 = SignedIntType('int16', 16) int32 = SignedIntType('int32', 32) int64 = SignedIntType('int64', 64) uint8 = UnsignedIntType('uint8', 8) uint16 = UnsignedIntType('uint16', 16) uint32 = UnsignedIntType('uint32', 32) uint64 = UnsignedIntType('uint64', 64) float16 = FloatType('float16', 16, nexp=5, nmant=10) # IEEE 754 binary16, Half precision float32 = FloatType('float32', 32, nexp=8, nmant=23) # IEEE 754 binary32, Single precision float64 = FloatType('float64', 64, nexp=11, nmant=52) # IEEE 754 binary64, Double precision float80 = FloatType('float80', 80, nexp=15, nmant=63) # x86 extended precision (1 integer part bit), "long double" float128 = FloatType('float128', 128, nexp=15, nmant=112) # IEEE 754 binary128, Quadruple precision float256 = FloatType('float256', 256, nexp=19, nmant=236) # IEEE 754 binary256, Octuple precision complex64 = ComplexType('complex64', nbits=64, **float32.kwargs(exclude=('name', 'nbits'))) complex128 = ComplexType('complex128', nbits=128, **float64.kwargs(exclude=('name', 'nbits'))) # Generic types (precision may be chosen by code printers): untyped = Type('untyped') real = FloatBaseType('real') integer = IntBaseType('integer') complex_ = ComplexBaseType('complex') bool_ = Type('bool') class Attribute(Token): """ Attribute (possibly parametrized) For use with :class:`sympy.codegen.ast.Node` (which takes instances of ``Attribute`` as ``attrs``). Parameters ========== name : str parameters : Tuple Examples ======== >>> from sympy.codegen.ast import Attribute >>> volatile = Attribute('volatile') >>> volatile volatile >>> print(repr(volatile)) Attribute(String('volatile')) >>> a = Attribute('foo', [1, 2, 3]) >>> a foo(1, 2, 3) >>> a.parameters == (1, 2, 3) True """ __slots__ = ('name', 'parameters') defaults = {'parameters': Tuple()} _construct_name = String _construct_parameters = staticmethod(_mk_Tuple) def _sympystr(self, printer, *args, **kwargs): result = str(self.name) if self.parameters: result += '(%s)' % ', '.join(map(lambda arg: printer._print( arg, *args, **kwargs), self.parameters)) return result value_const = Attribute('value_const') pointer_const = Attribute('pointer_const') class Variable(Node): """ Represents a variable. Parameters ========== symbol : Symbol type : Type (optional) Type of the variable. attrs : iterable of Attribute instances Will be stored as a Tuple. Examples ======== >>> from sympy import Symbol >>> from sympy.codegen.ast import Variable, float32, integer >>> x = Symbol('x') >>> v = Variable(x, type=float32) >>> v.attrs () >>> v == Variable('x') False >>> v == Variable('x', type=float32) True >>> v Variable(x, type=float32) One may also construct a ``Variable`` instance with the type deduced from assumptions about the symbol using the ``deduced`` classmethod: >>> i = Symbol('i', integer=True) >>> v = Variable.deduced(i) >>> v.type == integer True >>> v == Variable('i') False >>> from sympy.codegen.ast import value_const >>> value_const in v.attrs False >>> w = Variable('w', attrs=[value_const]) >>> w Variable(w, attrs=(value_const,)) >>> value_const in w.attrs True >>> w.as_Declaration(value=42) Declaration(Variable(w, value=42, attrs=(value_const,))) """ __slots__ = ('symbol', 'type', 'value') + Node.__slots__ defaults = Node.defaults.copy() defaults.update({'type': untyped, 'value': none}) _construct_symbol = staticmethod(sympify) _construct_value = staticmethod(sympify) @classmethod def deduced(cls, symbol, value=None, attrs=Tuple(), cast_check=True): """ Alt. constructor with type deduction from ``Type.from_expr``. Deduces type primarily from ``symbol``, secondarily from ``value``. Parameters ========== symbol : Symbol value : expr (optional) value of the variable. attrs : iterable of Attribute instances cast_check : bool Whether to apply ``Type.cast_check`` on ``value``. Examples ======== >>> from sympy import Symbol >>> from sympy.codegen.ast import Variable, complex_ >>> n = Symbol('n', integer=True) >>> str(Variable.deduced(n).type) 'integer' >>> x = Symbol('x', real=True) >>> v = Variable.deduced(x) >>> v.type real >>> z = Symbol('z', complex=True) >>> Variable.deduced(z).type == complex_ True """ if isinstance(symbol, Variable): return symbol try: type_ = Type.from_expr(symbol) except ValueError: type_ = Type.from_expr(value) if value is not None and cast_check: value = type_.cast_check(value) return cls(symbol, type=type_, value=value, attrs=attrs) def as_Declaration(self, **kwargs): """ Convenience method for creating a Declaration instance. Explanation =========== If the variable of the Declaration need to wrap a modified variable keyword arguments may be passed (overriding e.g. the ``value`` of the Variable instance). Examples ======== >>> from sympy.codegen.ast import Variable, NoneToken >>> x = Variable('x') >>> decl1 = x.as_Declaration() >>> # value is special NoneToken() which must be tested with == operator >>> decl1.variable.value is None # won't work False >>> decl1.variable.value == None # not PEP-8 compliant True >>> decl1.variable.value == NoneToken() # OK True >>> decl2 = x.as_Declaration(value=42.0) >>> decl2.variable.value == 42 True """ kw = self.kwargs() kw.update(kwargs) return Declaration(self.func(**kw)) def _relation(self, rhs, op): try: rhs = _sympify(rhs) except SympifyError: raise TypeError("Invalid comparison %s < %s" % (self, rhs)) return op(self, rhs, evaluate=False) __lt__ = lambda self, other: self._relation(other, Lt) __le__ = lambda self, other: self._relation(other, Le) __ge__ = lambda self, other: self._relation(other, Ge) __gt__ = lambda self, other: self._relation(other, Gt) class Pointer(Variable): """ Represents a pointer. See ``Variable``. Examples ======== Can create instances of ``Element``: >>> from sympy import Symbol >>> from sympy.codegen.ast import Pointer >>> i = Symbol('i', integer=True) >>> p = Pointer('x') >>> p[i+1] Element(x, indices=(i + 1,)) """ def __getitem__(self, key): try: return Element(self.symbol, key) except TypeError: return Element(self.symbol, (key,)) class Element(Token): """ Element in (a possibly N-dimensional) array. Examples ======== >>> from sympy.codegen.ast import Element >>> elem = Element('x', 'ijk') >>> elem.symbol.name == 'x' True >>> elem.indices (i, j, k) >>> from sympy import ccode >>> ccode(elem) 'x[i][j][k]' >>> ccode(Element('x', 'ijk', strides='lmn', offset='o')) 'x[i*l + j*m + k*n + o]' """ __slots__ = ('symbol', 'indices', 'strides', 'offset') defaults = {'strides': none, 'offset': none} _construct_symbol = staticmethod(sympify) _construct_indices = staticmethod(lambda arg: Tuple(*arg)) _construct_strides = staticmethod(lambda arg: Tuple(*arg)) _construct_offset = staticmethod(sympify) class Declaration(Token): """ Represents a variable declaration Parameters ========== variable : Variable Examples ======== >>> from sympy.codegen.ast import Declaration, NoneToken, untyped >>> z = Declaration('z') >>> z.variable.type == untyped True >>> # value is special NoneToken() which must be tested with == operator >>> z.variable.value is None # won't work False >>> z.variable.value == None # not PEP-8 compliant True >>> z.variable.value == NoneToken() # OK True """ __slots__ = ('variable',) _construct_variable = Variable class While(Token): """ Represents a 'for-loop' in the code. Expressions are of the form: "while condition: body..." Parameters ========== condition : expression convertible to Boolean body : CodeBlock or iterable When passed an iterable it is used to instantiate a CodeBlock. Examples ======== >>> from sympy import symbols, Gt, Abs >>> from sympy.codegen import aug_assign, Assignment, While >>> x, dx = symbols('x dx') >>> expr = 1 - x**2 >>> whl = While(Gt(Abs(dx), 1e-9), [ ... Assignment(dx, -expr/expr.diff(x)), ... aug_assign(x, '+', dx) ... ]) """ __slots__ = ('condition', 'body') _construct_condition = staticmethod(lambda cond: _sympify(cond)) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) class Scope(Token): """ Represents a scope in the code. Parameters ========== body : CodeBlock or iterable When passed an iterable it is used to instantiate a CodeBlock. """ __slots__ = ('body',) @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) class Stream(Token): """ Represents a stream. There are two predefined Stream instances ``stdout`` & ``stderr``. Parameters ========== name : str Examples ======== >>> from sympy import Symbol >>> from sympy.printing.pycode import pycode >>> from sympy.codegen.ast import Print, stderr, QuotedString >>> print(pycode(Print(['x'], file=stderr))) print(x, file=sys.stderr) >>> x = Symbol('x') >>> print(pycode(Print([QuotedString('x')], file=stderr))) # print literally "x" print("x", file=sys.stderr) """ __slots__ = ('name',) _construct_name = String stdout = Stream('stdout') stderr = Stream('stderr') class Print(Token): """ Represents print command in the code. Parameters ========== formatstring : str *args : Basic instances (or convertible to such through sympify) Examples ======== >>> from sympy.codegen.ast import Print >>> from sympy.printing.pycode import pycode >>> print(pycode(Print('x y'.split(), "coordinate: %12.5g %12.5g"))) print("coordinate: %12.5g %12.5g" % (x, y)) """ __slots__ = ('print_args', 'format_string', 'file') defaults = {'format_string': none, 'file': none} _construct_print_args = staticmethod(_mk_Tuple) _construct_format_string = QuotedString _construct_file = Stream class FunctionPrototype(Node): """ Represents a function prototype Allows the user to generate forward declaration in e.g. C/C++. Parameters ========== return_type : Type name : str parameters: iterable of Variable instances attrs : iterable of Attribute instances Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import real, FunctionPrototype >>> from sympy.printing import ccode >>> x, y = symbols('x y', real=True) >>> fp = FunctionPrototype(real, 'foo', [x, y]) >>> ccode(fp) 'double foo(double x, double y)' """ __slots__ = ('return_type', 'name', 'parameters', 'attrs') _construct_return_type = Type _construct_name = String @staticmethod def _construct_parameters(args): def _var(arg): if isinstance(arg, Declaration): return arg.variable elif isinstance(arg, Variable): return arg else: return Variable.deduced(arg) return Tuple(*map(_var, args)) @classmethod def from_FunctionDefinition(cls, func_def): if not isinstance(func_def, FunctionDefinition): raise TypeError("func_def is not an instance of FunctionDefiniton") return cls(**func_def.kwargs(exclude=('body',))) class FunctionDefinition(FunctionPrototype): """ Represents a function definition in the code. Parameters ========== return_type : Type name : str parameters: iterable of Variable instances body : CodeBlock or iterable attrs : iterable of Attribute instances Examples ======== >>> from sympy import symbols >>> from sympy.codegen.ast import real, FunctionPrototype >>> from sympy.printing import ccode >>> x, y = symbols('x y', real=True) >>> fp = FunctionPrototype(real, 'foo', [x, y]) >>> ccode(fp) 'double foo(double x, double y)' >>> from sympy.codegen.ast import FunctionDefinition, Return >>> body = [Return(x*y)] >>> fd = FunctionDefinition.from_FunctionPrototype(fp, body) >>> print(ccode(fd)) double foo(double x, double y){ return x*y; } """ __slots__ = FunctionPrototype.__slots__[:-1] + ('body', 'attrs') @classmethod def _construct_body(cls, itr): if isinstance(itr, CodeBlock): return itr else: return CodeBlock(*itr) @classmethod def from_FunctionPrototype(cls, func_proto, body): if not isinstance(func_proto, FunctionPrototype): raise TypeError("func_proto is not an instance of FunctionPrototype") return cls(body=body, **func_proto.kwargs()) class Return(Basic): """ Represents a return command in the code. """ class FunctionCall(Token, Expr): """ Represents a call to a function in the code. Parameters ========== name : str function_args : Tuple Examples ======== >>> from sympy.codegen.ast import FunctionCall >>> from sympy.printing.pycode import pycode >>> fcall = FunctionCall('foo', 'bar baz'.split()) >>> print(pycode(fcall)) foo(bar, baz) """ __slots__ = ('name', 'function_args') _construct_name = String _construct_function_args = staticmethod(lambda args: Tuple(*args))
cc016acaad95cb31737721caa8a7884e0111fb48434940827a293bdc13d11049
""" This module contains SymPy functions mathcin corresponding to special math functions in the C standard library (since C99, also available in C++11). The functions defined in this module allows the user to express functions such as ``expm1`` as a SymPy function for symbolic manipulation. """ from sympy.core.function import ArgumentIndexError, Function from sympy.core.numbers import Rational from sympy.core.power import Pow from sympy.core.singleton import S from sympy.functions.elementary.exponential import exp, log from sympy.functions.elementary.miscellaneous import sqrt def _expm1(x): return exp(x) - S.One class expm1(Function): """ Represents the exponential function minus one. Explanation =========== The benefit of using ``expm1(x)`` over ``exp(x) - 1`` is that the latter is prone to cancellation under finite precision arithmetic when x is close to zero. Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cfunctions import expm1 >>> '%.0e' % expm1(1e-99).evalf() '1e-99' >>> from math import exp >>> exp(1e-99) - 1 0.0 >>> expm1(x).diff(x) exp(x) See Also ======== log1p """ nargs = 1 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return exp(*self.args) else: raise ArgumentIndexError(self, argindex) def _eval_expand_func(self, **hints): return _expm1(*self.args) def _eval_rewrite_as_exp(self, arg, **kwargs): return exp(arg) - S.One _eval_rewrite_as_tractable = _eval_rewrite_as_exp @classmethod def eval(cls, arg): exp_arg = exp.eval(arg) if exp_arg is not None: return exp_arg - S.One def _eval_is_real(self): return self.args[0].is_real def _eval_is_finite(self): return self.args[0].is_finite def _log1p(x): return log(x + S.One) class log1p(Function): """ Represents the natural logarithm of a number plus one. Explanation =========== The benefit of using ``log1p(x)`` over ``log(x + 1)`` is that the latter is prone to cancellation under finite precision arithmetic when x is close to zero. Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cfunctions import log1p >>> from sympy.core.function import expand_log >>> '%.0e' % expand_log(log1p(1e-99)).evalf() '1e-99' >>> from math import log >>> log(1 + 1e-99) 0.0 >>> log1p(x).diff(x) 1/(x + 1) See Also ======== expm1 """ nargs = 1 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return S.One/(self.args[0] + S.One) else: raise ArgumentIndexError(self, argindex) def _eval_expand_func(self, **hints): return _log1p(*self.args) def _eval_rewrite_as_log(self, arg, **kwargs): return _log1p(arg) _eval_rewrite_as_tractable = _eval_rewrite_as_log @classmethod def eval(cls, arg): if arg.is_Rational: return log(arg + S.One) elif not arg.is_Float: # not safe to add 1 to Float return log.eval(arg + S.One) elif arg.is_number: return log(Rational(arg) + S.One) def _eval_is_real(self): return (self.args[0] + S.One).is_nonnegative def _eval_is_finite(self): if (self.args[0] + S.One).is_zero: return False return self.args[0].is_finite def _eval_is_positive(self): return self.args[0].is_positive def _eval_is_zero(self): return self.args[0].is_zero def _eval_is_nonnegative(self): return self.args[0].is_nonnegative _Two = S(2) def _exp2(x): return Pow(_Two, x) class exp2(Function): """ Represents the exponential function with base two. Explanation =========== The benefit of using ``exp2(x)`` over ``2**x`` is that the latter is not as efficient under finite precision arithmetic. Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cfunctions import exp2 >>> exp2(2).evalf() == 4 True >>> exp2(x).diff(x) log(2)*exp2(x) See Also ======== log2 """ nargs = 1 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return self*log(_Two) else: raise ArgumentIndexError(self, argindex) def _eval_rewrite_as_Pow(self, arg, **kwargs): return _exp2(arg) _eval_rewrite_as_tractable = _eval_rewrite_as_Pow def _eval_expand_func(self, **hints): return _exp2(*self.args) @classmethod def eval(cls, arg): if arg.is_number: return _exp2(arg) def _log2(x): return log(x)/log(_Two) class log2(Function): """ Represents the logarithm function with base two. Explanation =========== The benefit of using ``log2(x)`` over ``log(x)/log(2)`` is that the latter is not as efficient under finite precision arithmetic. Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cfunctions import log2 >>> log2(4).evalf() == 2 True >>> log2(x).diff(x) 1/(x*log(2)) See Also ======== exp2 log10 """ nargs = 1 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return S.One/(log(_Two)*self.args[0]) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): if arg.is_number: result = log.eval(arg, base=_Two) if result.is_Atom: return result elif arg.is_Pow and arg.base == _Two: return arg.exp def _eval_evalf(self, *args, **kwargs): return self.rewrite(log).evalf(*args, **kwargs) def _eval_expand_func(self, **hints): return _log2(*self.args) def _eval_rewrite_as_log(self, arg, **kwargs): return _log2(arg) _eval_rewrite_as_tractable = _eval_rewrite_as_log def _fma(x, y, z): return x*y + z class fma(Function): """ Represents "fused multiply add". Explanation =========== The benefit of using ``fma(x, y, z)`` over ``x*y + z`` is that, under finite precision arithmetic, the former is supported by special instructions on some CPUs. Examples ======== >>> from sympy.abc import x, y, z >>> from sympy.codegen.cfunctions import fma >>> fma(x, y, z).diff(x) y """ nargs = 3 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex in (1, 2): return self.args[2 - argindex] elif argindex == 3: return S.One else: raise ArgumentIndexError(self, argindex) def _eval_expand_func(self, **hints): return _fma(*self.args) def _eval_rewrite_as_tractable(self, arg, limitvar=None, **kwargs): return _fma(arg) _Ten = S(10) def _log10(x): return log(x)/log(_Ten) class log10(Function): """ Represents the logarithm function with base ten. Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cfunctions import log10 >>> log10(100).evalf() == 2 True >>> log10(x).diff(x) 1/(x*log(10)) See Also ======== log2 """ nargs = 1 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return S.One/(log(_Ten)*self.args[0]) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): if arg.is_number: result = log.eval(arg, base=_Ten) if result.is_Atom: return result elif arg.is_Pow and arg.base == _Ten: return arg.exp def _eval_expand_func(self, **hints): return _log10(*self.args) def _eval_rewrite_as_log(self, arg, **kwargs): return _log10(arg) _eval_rewrite_as_tractable = _eval_rewrite_as_log def _Sqrt(x): return Pow(x, S.Half) class Sqrt(Function): # 'sqrt' already defined in sympy.functions.elementary.miscellaneous """ Represents the square root function. Explanation =========== The reason why one would use ``Sqrt(x)`` over ``sqrt(x)`` is that the latter is internally represented as ``Pow(x, S.Half)`` which may not be what one wants when doing code-generation. Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cfunctions import Sqrt >>> Sqrt(x) Sqrt(x) >>> Sqrt(x).diff(x) 1/(2*sqrt(x)) See Also ======== Cbrt """ nargs = 1 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return Pow(self.args[0], Rational(-1, 2))/_Two else: raise ArgumentIndexError(self, argindex) def _eval_expand_func(self, **hints): return _Sqrt(*self.args) def _eval_rewrite_as_Pow(self, arg, **kwargs): return _Sqrt(arg) _eval_rewrite_as_tractable = _eval_rewrite_as_Pow def _Cbrt(x): return Pow(x, Rational(1, 3)) class Cbrt(Function): # 'cbrt' already defined in sympy.functions.elementary.miscellaneous """ Represents the cube root function. Explanation =========== The reason why one would use ``Cbrt(x)`` over ``cbrt(x)`` is that the latter is internally represented as ``Pow(x, Rational(1, 3))`` which may not be what one wants when doing code-generation. Examples ======== >>> from sympy.abc import x >>> from sympy.codegen.cfunctions import Cbrt >>> Cbrt(x) Cbrt(x) >>> Cbrt(x).diff(x) 1/(3*x**(2/3)) See Also ======== Sqrt """ nargs = 1 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return Pow(self.args[0], Rational(-_Two/3))/3 else: raise ArgumentIndexError(self, argindex) def _eval_expand_func(self, **hints): return _Cbrt(*self.args) def _eval_rewrite_as_Pow(self, arg, **kwargs): return _Cbrt(arg) _eval_rewrite_as_tractable = _eval_rewrite_as_Pow def _hypot(x, y): return sqrt(Pow(x, 2) + Pow(y, 2)) class hypot(Function): """ Represents the hypotenuse function. Explanation =========== The hypotenuse function is provided by e.g. the math library in the C99 standard, hence one may want to represent the function symbolically when doing code-generation. Examples ======== >>> from sympy.abc import x, y >>> from sympy.codegen.cfunctions import hypot >>> hypot(3, 4).evalf() == 5 True >>> hypot(x, y) hypot(x, y) >>> hypot(x, y).diff(x) x/hypot(x, y) """ nargs = 2 def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex in (1, 2): return 2*self.args[argindex-1]/(_Two*self.func(*self.args)) else: raise ArgumentIndexError(self, argindex) def _eval_expand_func(self, **hints): return _hypot(*self.args) def _eval_rewrite_as_Pow(self, arg, **kwargs): return _hypot(arg) _eval_rewrite_as_tractable = _eval_rewrite_as_Pow
0a688065f84fdab595e7ac45d3dcfd0a255603bc2014d9d6a1693b03edc7f2ce
from typing import Dict, Callable from sympy.core import S, Add, Expr, Basic, Mul from sympy.logic.boolalg import Boolean from sympy.assumptions import Q, ask # type: ignore def refine(expr, assumptions=True): """ Simplify an expression using assumptions. Explanation =========== Gives the form of expr that would be obtained if symbols in it were replaced by explicit numerical expressions satisfying the assumptions. Examples ======== >>> from sympy import refine, sqrt, Q >>> from sympy.abc import x >>> refine(sqrt(x**2), Q.real(x)) Abs(x) >>> refine(sqrt(x**2), Q.positive(x)) x """ if not isinstance(expr, Basic): return expr if not expr.is_Atom: args = [refine(arg, assumptions) for arg in expr.args] # TODO: this will probably not work with Integral or Polynomial expr = expr.func(*args) if hasattr(expr, '_eval_refine'): ref_expr = expr._eval_refine(assumptions) if ref_expr is not None: return ref_expr name = expr.__class__.__name__ handler = handlers_dict.get(name, None) if handler is None: return expr new_expr = handler(expr, assumptions) if (new_expr is None) or (expr == new_expr): return expr if not isinstance(new_expr, Expr): return new_expr return refine(new_expr, assumptions) def refine_abs(expr, assumptions): """ Handler for the absolute value. Examples ======== >>> from sympy import Q, Abs >>> from sympy.assumptions.refine import refine_abs >>> from sympy.abc import x >>> refine_abs(Abs(x), Q.real(x)) >>> refine_abs(Abs(x), Q.positive(x)) x >>> refine_abs(Abs(x), Q.negative(x)) -x """ from sympy.core.logic import fuzzy_not from sympy import Abs arg = expr.args[0] if ask(Q.real(arg), assumptions) and \ fuzzy_not(ask(Q.negative(arg), assumptions)): # if it's nonnegative return arg if ask(Q.negative(arg), assumptions): return -arg # arg is Mul if isinstance(arg, Mul): r = [refine(abs(a), assumptions) for a in arg.args] non_abs = [] in_abs = [] for i in r: if isinstance(i, Abs): in_abs.append(i.args[0]) else: non_abs.append(i) return Mul(*non_abs) * Abs(Mul(*in_abs)) def refine_Pow(expr, assumptions): """ Handler for instances of Pow. Examples ======== >>> from sympy import Q >>> from sympy.assumptions.refine import refine_Pow >>> from sympy.abc import x,y,z >>> refine_Pow((-1)**x, Q.real(x)) >>> refine_Pow((-1)**x, Q.even(x)) 1 >>> refine_Pow((-1)**x, Q.odd(x)) -1 For powers of -1, even parts of the exponent can be simplified: >>> refine_Pow((-1)**(x+y), Q.even(x)) (-1)**y >>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z)) (-1)**y >>> refine_Pow((-1)**(x+y+2), Q.odd(x)) (-1)**(y + 1) >>> refine_Pow((-1)**(x+3), True) (-1)**(x + 1) """ from sympy.core import Pow, Rational from sympy.functions.elementary.complexes import Abs from sympy.functions import sign if isinstance(expr.base, Abs): if ask(Q.real(expr.base.args[0]), assumptions) and \ ask(Q.even(expr.exp), assumptions): return expr.base.args[0] ** expr.exp if ask(Q.real(expr.base), assumptions): if expr.base.is_number: if ask(Q.even(expr.exp), assumptions): return abs(expr.base) ** expr.exp if ask(Q.odd(expr.exp), assumptions): return sign(expr.base) * abs(expr.base) ** expr.exp if isinstance(expr.exp, Rational): if type(expr.base) is Pow: return abs(expr.base.base) ** (expr.base.exp * expr.exp) if expr.base is S.NegativeOne: if expr.exp.is_Add: old = expr # For powers of (-1) we can remove # - even terms # - pairs of odd terms # - a single odd term + 1 # - A numerical constant N can be replaced with mod(N,2) coeff, terms = expr.exp.as_coeff_add() terms = set(terms) even_terms = set() odd_terms = set() initial_number_of_terms = len(terms) for t in terms: if ask(Q.even(t), assumptions): even_terms.add(t) elif ask(Q.odd(t), assumptions): odd_terms.add(t) terms -= even_terms if len(odd_terms) % 2: terms -= odd_terms new_coeff = (coeff + S.One) % 2 else: terms -= odd_terms new_coeff = coeff % 2 if new_coeff != coeff or len(terms) < initial_number_of_terms: terms.add(new_coeff) expr = expr.base**(Add(*terms)) # Handle (-1)**((-1)**n/2 + m/2) e2 = 2*expr.exp if ask(Q.even(e2), assumptions): if e2.could_extract_minus_sign(): e2 *= expr.base if e2.is_Add: i, p = e2.as_two_terms() if p.is_Pow and p.base is S.NegativeOne: if ask(Q.integer(p.exp), assumptions): i = (i + 1)/2 if ask(Q.even(i), assumptions): return expr.base**p.exp elif ask(Q.odd(i), assumptions): return expr.base**(p.exp + 1) else: return expr.base**(p.exp + i) if old != expr: return expr def refine_atan2(expr, assumptions): """ Handler for the atan2 function. Examples ======== >>> from sympy import Q, atan2 >>> from sympy.assumptions.refine import refine_atan2 >>> from sympy.abc import x, y >>> refine_atan2(atan2(y,x), Q.real(y) & Q.positive(x)) atan(y/x) >>> refine_atan2(atan2(y,x), Q.negative(y) & Q.negative(x)) atan(y/x) - pi >>> refine_atan2(atan2(y,x), Q.positive(y) & Q.negative(x)) atan(y/x) + pi >>> refine_atan2(atan2(y,x), Q.zero(y) & Q.negative(x)) pi >>> refine_atan2(atan2(y,x), Q.positive(y) & Q.zero(x)) pi/2 >>> refine_atan2(atan2(y,x), Q.negative(y) & Q.zero(x)) -pi/2 >>> refine_atan2(atan2(y,x), Q.zero(y) & Q.zero(x)) nan """ from sympy.functions.elementary.trigonometric import atan from sympy.core import S y, x = expr.args if ask(Q.real(y) & Q.positive(x), assumptions): return atan(y / x) elif ask(Q.negative(y) & Q.negative(x), assumptions): return atan(y / x) - S.Pi elif ask(Q.positive(y) & Q.negative(x), assumptions): return atan(y / x) + S.Pi elif ask(Q.zero(y) & Q.negative(x), assumptions): return S.Pi elif ask(Q.positive(y) & Q.zero(x), assumptions): return S.Pi/2 elif ask(Q.negative(y) & Q.zero(x), assumptions): return -S.Pi/2 elif ask(Q.zero(y) & Q.zero(x), assumptions): return S.NaN else: return expr def refine_Relational(expr, assumptions): """ Handler for Relational. Examples ======== >>> from sympy.assumptions.refine import refine_Relational >>> from sympy.assumptions.ask import Q >>> from sympy.abc import x >>> refine_Relational(x<0, ~Q.is_true(x<0)) False """ return ask(Q.is_true(expr), assumptions) def refine_re(expr, assumptions): """ Handler for real part. Examples ======== >>> from sympy.assumptions.refine import refine_re >>> from sympy import Q, re >>> from sympy.abc import x >>> refine_re(re(x), Q.real(x)) x >>> refine_re(re(x), Q.imaginary(x)) 0 """ arg = expr.args[0] if ask(Q.real(arg), assumptions): return arg if ask(Q.imaginary(arg), assumptions): return S.Zero return _refine_reim(expr, assumptions) def refine_im(expr, assumptions): """ Handler for imaginary part. Explanation =========== >>> from sympy.assumptions.refine import refine_im >>> from sympy import Q, im >>> from sympy.abc import x >>> refine_im(im(x), Q.real(x)) 0 >>> refine_im(im(x), Q.imaginary(x)) -I*x """ arg = expr.args[0] if ask(Q.real(arg), assumptions): return S.Zero if ask(Q.imaginary(arg), assumptions): return - S.ImaginaryUnit * arg return _refine_reim(expr, assumptions) def _refine_reim(expr, assumptions): # Helper function for refine_re & refine_im expanded = expr.expand(complex = True) if expanded != expr: refined = refine(expanded, assumptions) if refined != expanded: return refined # Best to leave the expression as is return None def refine_sign(expr, assumptions): """ Handler for sign. Examples ======== >>> from sympy.assumptions.refine import refine_sign >>> from sympy import Symbol, Q, sign, im >>> x = Symbol('x', real = True) >>> expr = sign(x) >>> refine_sign(expr, Q.positive(x) & Q.nonzero(x)) 1 >>> refine_sign(expr, Q.negative(x) & Q.nonzero(x)) -1 >>> refine_sign(expr, Q.zero(x)) 0 >>> y = Symbol('y', imaginary = True) >>> expr = sign(y) >>> refine_sign(expr, Q.positive(im(y))) I >>> refine_sign(expr, Q.negative(im(y))) -I """ arg = expr.args[0] if ask(Q.zero(arg), assumptions): return S.Zero if ask(Q.real(arg)): if ask(Q.positive(arg), assumptions): return S.One if ask(Q.negative(arg), assumptions): return S.NegativeOne if ask(Q.imaginary(arg)): arg_re, arg_im = arg.as_real_imag() if ask(Q.positive(arg_im), assumptions): return S.ImaginaryUnit if ask(Q.negative(arg_im), assumptions): return -S.ImaginaryUnit return expr def refine_matrixelement(expr, assumptions): """ Handler for symmetric part. Examples ======== >>> from sympy.assumptions.refine import refine_matrixelement >>> from sympy import Q >>> from sympy.matrices.expressions.matexpr import MatrixSymbol >>> X = MatrixSymbol('X', 3, 3) >>> refine_matrixelement(X[0, 1], Q.symmetric(X)) X[0, 1] >>> refine_matrixelement(X[1, 0], Q.symmetric(X)) X[0, 1] """ from sympy.matrices.expressions.matexpr import MatrixElement matrix, i, j = expr.args if ask(Q.symmetric(matrix), assumptions): if (i - j).could_extract_minus_sign(): return expr return MatrixElement(matrix, j, i) handlers_dict = { 'Abs': refine_abs, 'Pow': refine_Pow, 'atan2': refine_atan2, 'Equality': refine_Relational, 'Unequality': refine_Relational, 'GreaterThan': refine_Relational, 'LessThan': refine_Relational, 'StrictGreaterThan': refine_Relational, 'StrictLessThan': refine_Relational, 're': refine_re, 'im': refine_im, 'sign': refine_sign, 'MatrixElement': refine_matrixelement } # type: Dict[str, Callable[[Expr, Boolean], Expr]]
f47123de8daecc862b0e7706ba831391489aaf8027ca6bc1c4840120f382cee1
"""Module for querying SymPy objects about assumptions.""" from sympy.assumptions.assume import (global_assumptions, Predicate, AppliedPredicate) from sympy.core import sympify from sympy.core.cache import cacheit from sympy.core.relational import Relational from sympy.logic.boolalg import (to_cnf, And, Not, Or, Implies, Equivalent, BooleanFunction, BooleanAtom) from sympy.logic.inference import satisfiable from sympy.utilities.decorator import memoize_property from sympy.assumptions.cnf import CNF, EncodedCNF, Literal # Memoization is necessary for the properties of AssumptionKeys to # ensure that only one object of Predicate objects are created. # This is because assumption handlers are registered on those objects. class AssumptionKeys: """ This class contains all the supported keys by ``ask``. It should be accessed via the instance ``sympy.Q``. """ @memoize_property def hermitian(self): """ Hermitian predicate. Explanation =========== ``ask(Q.hermitian(x))`` is true iff ``x`` belongs to the set of Hermitian operators. References ========== .. [1] http://mathworld.wolfram.com/HermitianOperator.html """ # TODO: Add examples return Predicate('hermitian') @memoize_property def antihermitian(self): """ Antihermitian predicate. Explanation =========== ``Q.antihermitian(x)`` is true iff ``x`` belongs to the field of antihermitian operators, i.e., operators in the form ``x*I``, where ``x`` is Hermitian. References ========== .. [1] http://mathworld.wolfram.com/HermitianOperator.html """ # TODO: Add examples return Predicate('antihermitian') @memoize_property def real(self): r""" Real number predicate. Explanation =========== ``Q.real(x)`` is true iff ``x`` is a real number, i.e., it is in the interval `(-\infty, \infty)`. Note that, in particular the infinities are not real. Use ``Q.extended_real`` if you want to consider those as well. A few important facts about reals: - Every real number is positive, negative, or zero. Furthermore, because these sets are pairwise disjoint, each real number is exactly one of those three. - Every real number is also complex. - Every real number is finite. - Every real number is either rational or irrational. - Every real number is either algebraic or transcendental. - The facts ``Q.negative``, ``Q.zero``, ``Q.positive``, ``Q.nonnegative``, ``Q.nonpositive``, ``Q.nonzero``, ``Q.integer``, ``Q.rational``, and ``Q.irrational`` all imply ``Q.real``, as do all facts that imply those facts. - The facts ``Q.algebraic``, and ``Q.transcendental`` do not imply ``Q.real``; they imply ``Q.complex``. An algebraic or transcendental number may or may not be real. - The "non" facts (i.e., ``Q.nonnegative``, ``Q.nonzero``, ``Q.nonpositive`` and ``Q.noninteger``) are not equivalent to not the fact, but rather, not the fact *and* ``Q.real``. For example, ``Q.nonnegative`` means ``~Q.negative & Q.real``. So for example, ``I`` is not nonnegative, nonzero, or nonpositive. Examples ======== >>> from sympy import Q, ask, symbols >>> x = symbols('x') >>> ask(Q.real(x), Q.positive(x)) True >>> ask(Q.real(0)) True References ========== .. [1] https://en.wikipedia.org/wiki/Real_number """ return Predicate('real') @memoize_property def extended_real(self): r""" Extended real predicate. Explanation =========== ``Q.extended_real(x)`` is true iff ``x`` is a real number or `\{-\infty, \infty\}`. See documentation of ``Q.real`` for more information about related facts. Examples ======== >>> from sympy import ask, Q, oo, I >>> ask(Q.extended_real(1)) True >>> ask(Q.extended_real(I)) False >>> ask(Q.extended_real(oo)) True """ return Predicate('extended_real') @memoize_property def imaginary(self): """ Imaginary number predicate. Explanation =========== ``Q.imaginary(x)`` is true iff ``x`` can be written as a real number multiplied by the imaginary unit ``I``. Please note that ``0`` is not considered to be an imaginary number. Examples ======== >>> from sympy import Q, ask, I >>> ask(Q.imaginary(3*I)) True >>> ask(Q.imaginary(2 + 3*I)) False >>> ask(Q.imaginary(0)) False References ========== .. [1] https://en.wikipedia.org/wiki/Imaginary_number """ return Predicate('imaginary') @memoize_property def complex(self): """ Complex number predicate. Explanation =========== ``Q.complex(x)`` is true iff ``x`` belongs to the set of complex numbers. Note that every complex number is finite. Examples ======== >>> from sympy import Q, Symbol, ask, I, oo >>> x = Symbol('x') >>> ask(Q.complex(0)) True >>> ask(Q.complex(2 + 3*I)) True >>> ask(Q.complex(oo)) False References ========== .. [1] https://en.wikipedia.org/wiki/Complex_number """ return Predicate('complex') @memoize_property def algebraic(self): r""" Algebraic number predicate. Explanation =========== ``Q.algebraic(x)`` is true iff ``x`` belongs to the set of algebraic numbers. ``x`` is algebraic if there is some polynomial in ``p(x)\in \mathbb\{Q\}[x]`` such that ``p(x) = 0``. Examples ======== >>> from sympy import ask, Q, sqrt, I, pi >>> ask(Q.algebraic(sqrt(2))) True >>> ask(Q.algebraic(I)) True >>> ask(Q.algebraic(pi)) False References ========== .. [1] https://en.wikipedia.org/wiki/Algebraic_number """ return Predicate('algebraic') @memoize_property def transcendental(self): """ Transcedental number predicate. Explanation =========== ``Q.transcendental(x)`` is true iff ``x`` belongs to the set of transcendental numbers. A transcendental number is a real or complex number that is not algebraic. """ # TODO: Add examples return Predicate('transcendental') @memoize_property def integer(self): """ Integer predicate. Explanation =========== ``Q.integer(x)`` is true iff ``x`` belongs to the set of integer numbers. Examples ======== >>> from sympy import Q, ask, S >>> ask(Q.integer(5)) True >>> ask(Q.integer(S(1)/2)) False References ========== .. [1] https://en.wikipedia.org/wiki/Integer """ return Predicate('integer') @memoize_property def rational(self): """ Rational number predicate. Explanation =========== ``Q.rational(x)`` is true iff ``x`` belongs to the set of rational numbers. Examples ======== >>> from sympy import ask, Q, pi, S >>> ask(Q.rational(0)) True >>> ask(Q.rational(S(1)/2)) True >>> ask(Q.rational(pi)) False References ========== https://en.wikipedia.org/wiki/Rational_number """ return Predicate('rational') @memoize_property def irrational(self): """ Irrational number predicate. Explanation =========== ``Q.irrational(x)`` is true iff ``x`` is any real number that cannot be expressed as a ratio of integers. Examples ======== >>> from sympy import ask, Q, pi, S, I >>> ask(Q.irrational(0)) False >>> ask(Q.irrational(S(1)/2)) False >>> ask(Q.irrational(pi)) True >>> ask(Q.irrational(I)) False References ========== .. [1] https://en.wikipedia.org/wiki/Irrational_number """ return Predicate('irrational') @memoize_property def finite(self): """ Finite predicate. Explanation =========== ``Q.finite(x)`` is true if ``x`` is neither an infinity nor a ``NaN``. In other words, ``ask(Q.finite(x))`` is true for all ``x`` having a bounded absolute value. Examples ======== >>> from sympy import Q, ask, Symbol, S, oo, I >>> x = Symbol('x') >>> ask(Q.finite(S.NaN)) False >>> ask(Q.finite(oo)) False >>> ask(Q.finite(1)) True >>> ask(Q.finite(2 + 3*I)) True References ========== .. [1] https://en.wikipedia.org/wiki/Finite """ return Predicate('finite') @memoize_property def infinite(self): """ Infinite number predicate. ``Q.infinite(x)`` is true iff the absolute value of ``x`` is infinity. """ # TODO: Add examples return Predicate('infinite') @memoize_property def positive(self): r""" Positive real number predicate. Explanation =========== ``Q.positive(x)`` is true iff ``x`` is real and `x > 0`, that is if ``x`` is in the interval `(0, \infty)`. In particular, infinity is not positive. A few important facts about positive numbers: - Note that ``Q.nonpositive`` and ``~Q.positive`` are *not* the same thing. ``~Q.positive(x)`` simply means that ``x`` is not positive, whereas ``Q.nonpositive(x)`` means that ``x`` is real and not positive, i.e., ``Q.nonpositive(x)`` is logically equivalent to `Q.negative(x) | Q.zero(x)``. So for example, ``~Q.positive(I)`` is true, whereas ``Q.nonpositive(I)`` is false. - See the documentation of ``Q.real`` for more information about related facts. Examples ======== >>> from sympy import Q, ask, symbols, I >>> x = symbols('x') >>> ask(Q.positive(x), Q.real(x) & ~Q.negative(x) & ~Q.zero(x)) True >>> ask(Q.positive(1)) True >>> ask(Q.nonpositive(I)) False >>> ask(~Q.positive(I)) True """ return Predicate('positive') @memoize_property def negative(self): r""" Negative number predicate. Explanation =========== ``Q.negative(x)`` is true iff ``x`` is a real number and :math:`x < 0`, that is, it is in the interval :math:`(-\infty, 0)`. Note in particular that negative infinity is not negative. A few important facts about negative numbers: - Note that ``Q.nonnegative`` and ``~Q.negative`` are *not* the same thing. ``~Q.negative(x)`` simply means that ``x`` is not negative, whereas ``Q.nonnegative(x)`` means that ``x`` is real and not negative, i.e., ``Q.nonnegative(x)`` is logically equivalent to ``Q.zero(x) | Q.positive(x)``. So for example, ``~Q.negative(I)`` is true, whereas ``Q.nonnegative(I)`` is false. - See the documentation of ``Q.real`` for more information about related facts. Examples ======== >>> from sympy import Q, ask, symbols, I >>> x = symbols('x') >>> ask(Q.negative(x), Q.real(x) & ~Q.positive(x) & ~Q.zero(x)) True >>> ask(Q.negative(-1)) True >>> ask(Q.nonnegative(I)) False >>> ask(~Q.negative(I)) True """ return Predicate('negative') @memoize_property def zero(self): """ Zero number predicate. Explanation =========== ``ask(Q.zero(x))`` is true iff the value of ``x`` is zero. Examples ======== >>> from sympy import ask, Q, oo, symbols >>> x, y = symbols('x, y') >>> ask(Q.zero(0)) True >>> ask(Q.zero(1/oo)) True >>> ask(Q.zero(0*oo)) False >>> ask(Q.zero(1)) False >>> ask(Q.zero(x*y), Q.zero(x) | Q.zero(y)) True """ return Predicate('zero') @memoize_property def nonzero(self): """ Nonzero real number predicate. Explanation =========== ``ask(Q.nonzero(x))`` is true iff ``x`` is real and ``x`` is not zero. Note in particular that ``Q.nonzero(x)`` is false if ``x`` is not real. Use ``~Q.zero(x)`` if you want the negation of being zero without any real assumptions. A few important facts about nonzero numbers: - ``Q.nonzero`` is logically equivalent to ``Q.positive | Q.negative``. - See the documentation of ``Q.real`` for more information about related facts. Examples ======== >>> from sympy import Q, ask, symbols, I, oo >>> x = symbols('x') >>> print(ask(Q.nonzero(x), ~Q.zero(x))) None >>> ask(Q.nonzero(x), Q.positive(x)) True >>> ask(Q.nonzero(x), Q.zero(x)) False >>> ask(Q.nonzero(0)) False >>> ask(Q.nonzero(I)) False >>> ask(~Q.zero(I)) True >>> ask(Q.nonzero(oo)) False """ return Predicate('nonzero') @memoize_property def nonpositive(self): """ Nonpositive real number predicate. Explanation =========== ``ask(Q.nonpositive(x))`` is true iff ``x`` belongs to the set of negative numbers including zero. - Note that ``Q.nonpositive`` and ``~Q.positive`` are *not* the same thing. ``~Q.positive(x)`` simply means that ``x`` is not positive, whereas ``Q.nonpositive(x)`` means that ``x`` is real and not positive, i.e., ``Q.nonpositive(x)`` is logically equivalent to `Q.negative(x) | Q.zero(x)``. So for example, ``~Q.positive(I)`` is true, whereas ``Q.nonpositive(I)`` is false. Examples ======== >>> from sympy import Q, ask, I >>> ask(Q.nonpositive(-1)) True >>> ask(Q.nonpositive(0)) True >>> ask(Q.nonpositive(1)) False >>> ask(Q.nonpositive(I)) False >>> ask(Q.nonpositive(-I)) False """ return Predicate('nonpositive') @memoize_property def nonnegative(self): """ Nonnegative real number predicate. Explanation =========== ``ask(Q.nonnegative(x))`` is true iff ``x`` belongs to the set of positive numbers including zero. - Note that ``Q.nonnegative`` and ``~Q.negative`` are *not* the same thing. ``~Q.negative(x)`` simply means that ``x`` is not negative, whereas ``Q.nonnegative(x)`` means that ``x`` is real and not negative, i.e., ``Q.nonnegative(x)`` is logically equivalent to ``Q.zero(x) | Q.positive(x)``. So for example, ``~Q.negative(I)`` is true, whereas ``Q.nonnegative(I)`` is false. Examples ======== >>> from sympy import Q, ask, I >>> ask(Q.nonnegative(1)) True >>> ask(Q.nonnegative(0)) True >>> ask(Q.nonnegative(-1)) False >>> ask(Q.nonnegative(I)) False >>> ask(Q.nonnegative(-I)) False """ return Predicate('nonnegative') @memoize_property def even(self): """ Even number predicate. Explanation =========== ``ask(Q.even(x))`` is true iff ``x`` belongs to the set of even integers. Examples ======== >>> from sympy import Q, ask, pi >>> ask(Q.even(0)) True >>> ask(Q.even(2)) True >>> ask(Q.even(3)) False >>> ask(Q.even(pi)) False """ return Predicate('even') @memoize_property def odd(self): """ Odd number predicate. Explanation =========== ``ask(Q.odd(x))`` is true iff ``x`` belongs to the set of odd numbers. Examples ======== >>> from sympy import Q, ask, pi >>> ask(Q.odd(0)) False >>> ask(Q.odd(2)) False >>> ask(Q.odd(3)) True >>> ask(Q.odd(pi)) False """ return Predicate('odd') @memoize_property def prime(self): """ Prime number predicate. Explanation =========== ``ask(Q.prime(x))`` is true iff ``x`` is a natural number greater than 1 that has no positive divisors other than ``1`` and the number itself. Examples ======== >>> from sympy import Q, ask >>> ask(Q.prime(0)) False >>> ask(Q.prime(1)) False >>> ask(Q.prime(2)) True >>> ask(Q.prime(20)) False >>> ask(Q.prime(-3)) False """ return Predicate('prime') @memoize_property def composite(self): """ Composite number predicate. Explanation =========== ``ask(Q.composite(x))`` is true iff ``x`` is a positive integer and has at least one positive divisor other than ``1`` and the number itself. Examples ======== >>> from sympy import Q, ask >>> ask(Q.composite(0)) False >>> ask(Q.composite(1)) False >>> ask(Q.composite(2)) False >>> ask(Q.composite(20)) True """ return Predicate('composite') @memoize_property def commutative(self): """ Commutative predicate. Explanation =========== ``ask(Q.commutative(x))`` is true iff ``x`` commutes with any other object with respect to multiplication operation. """ # TODO: Add examples return Predicate('commutative') @memoize_property def is_true(self): """ Generic predicate. Explanation =========== ``ask(Q.is_true(x))`` is true iff ``x`` is true. This only makes sense if ``x`` is a predicate. Examples ======== >>> from sympy import ask, Q, symbols >>> x = symbols('x') >>> ask(Q.is_true(True)) True """ return Predicate('is_true') @memoize_property def symmetric(self): """ Symmetric matrix predicate. Explanation =========== ``Q.symmetric(x)`` is true iff ``x`` is a square matrix and is equal to its transpose. Every square diagonal matrix is a symmetric matrix. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 2, 2) >>> Y = MatrixSymbol('Y', 2, 3) >>> Z = MatrixSymbol('Z', 2, 2) >>> ask(Q.symmetric(X*Z), Q.symmetric(X) & Q.symmetric(Z)) True >>> ask(Q.symmetric(X + Z), Q.symmetric(X) & Q.symmetric(Z)) True >>> ask(Q.symmetric(Y)) False References ========== .. [1] https://en.wikipedia.org/wiki/Symmetric_matrix """ # TODO: Add handlers to make these keys work with # actual matrices and add more examples in the docstring. return Predicate('symmetric') @memoize_property def invertible(self): """ Invertible matrix predicate. Explanation =========== ``Q.invertible(x)`` is true iff ``x`` is an invertible matrix. A square matrix is called invertible only if its determinant is 0. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 2, 2) >>> Y = MatrixSymbol('Y', 2, 3) >>> Z = MatrixSymbol('Z', 2, 2) >>> ask(Q.invertible(X*Y), Q.invertible(X)) False >>> ask(Q.invertible(X*Z), Q.invertible(X) & Q.invertible(Z)) True >>> ask(Q.invertible(X), Q.fullrank(X) & Q.square(X)) True References ========== .. [1] https://en.wikipedia.org/wiki/Invertible_matrix """ return Predicate('invertible') @memoize_property def orthogonal(self): """ Orthogonal matrix predicate. Explanation =========== ``Q.orthogonal(x)`` is true iff ``x`` is an orthogonal matrix. A square matrix ``M`` is an orthogonal matrix if it satisfies ``M^TM = MM^T = I`` where ``M^T`` is the transpose matrix of ``M`` and ``I`` is an identity matrix. Note that an orthogonal matrix is necessarily invertible. Examples ======== >>> from sympy import Q, ask, MatrixSymbol, Identity >>> X = MatrixSymbol('X', 2, 2) >>> Y = MatrixSymbol('Y', 2, 3) >>> Z = MatrixSymbol('Z', 2, 2) >>> ask(Q.orthogonal(Y)) False >>> ask(Q.orthogonal(X*Z*X), Q.orthogonal(X) & Q.orthogonal(Z)) True >>> ask(Q.orthogonal(Identity(3))) True >>> ask(Q.invertible(X), Q.orthogonal(X)) True References ========== .. [1] https://en.wikipedia.org/wiki/Orthogonal_matrix """ return Predicate('orthogonal') @memoize_property def unitary(self): """ Unitary matrix predicate. Explanation =========== ``Q.unitary(x)`` is true iff ``x`` is a unitary matrix. Unitary matrix is an analogue to orthogonal matrix. A square matrix ``M`` with complex elements is unitary if :math:``M^TM = MM^T= I`` where :math:``M^T`` is the conjugate transpose matrix of ``M``. Examples ======== >>> from sympy import Q, ask, MatrixSymbol, Identity >>> X = MatrixSymbol('X', 2, 2) >>> Y = MatrixSymbol('Y', 2, 3) >>> Z = MatrixSymbol('Z', 2, 2) >>> ask(Q.unitary(Y)) False >>> ask(Q.unitary(X*Z*X), Q.unitary(X) & Q.unitary(Z)) True >>> ask(Q.unitary(Identity(3))) True References ========== .. [1] https://en.wikipedia.org/wiki/Unitary_matrix """ return Predicate('unitary') @memoize_property def positive_definite(self): r""" Positive definite matrix predicate. Explanation =========== If ``M`` is a :math:``n \times n`` symmetric real matrix, it is said to be positive definite if :math:`Z^TMZ` is positive for every non-zero column vector ``Z`` of ``n`` real numbers. Examples ======== >>> from sympy import Q, ask, MatrixSymbol, Identity >>> X = MatrixSymbol('X', 2, 2) >>> Y = MatrixSymbol('Y', 2, 3) >>> Z = MatrixSymbol('Z', 2, 2) >>> ask(Q.positive_definite(Y)) False >>> ask(Q.positive_definite(Identity(3))) True >>> ask(Q.positive_definite(X + Z), Q.positive_definite(X) & ... Q.positive_definite(Z)) True References ========== .. [1] https://en.wikipedia.org/wiki/Positive-definite_matrix """ return Predicate('positive_definite') @memoize_property def upper_triangular(self): """ Upper triangular matrix predicate. Explanation =========== A matrix ``M`` is called upper triangular matrix if :math:`M_{ij}=0` for :math:`i<j`. Examples ======== >>> from sympy import Q, ask, ZeroMatrix, Identity >>> ask(Q.upper_triangular(Identity(3))) True >>> ask(Q.upper_triangular(ZeroMatrix(3, 3))) True References ========== .. [1] http://mathworld.wolfram.com/UpperTriangularMatrix.html """ return Predicate('upper_triangular') @memoize_property def lower_triangular(self): """ Lower triangular matrix predicate. Explanation =========== A matrix ``M`` is called lower triangular matrix if :math:`a_{ij}=0` for :math:`i>j`. Examples ======== >>> from sympy import Q, ask, ZeroMatrix, Identity >>> ask(Q.lower_triangular(Identity(3))) True >>> ask(Q.lower_triangular(ZeroMatrix(3, 3))) True References ========== .. [1] http://mathworld.wolfram.com/LowerTriangularMatrix.html """ return Predicate('lower_triangular') @memoize_property def diagonal(self): """ Diagonal matrix predicate. Explanation =========== ``Q.diagonal(x)`` is true iff ``x`` is a diagonal matrix. A diagonal matrix is a matrix in which the entries outside the main diagonal are all zero. Examples ======== >>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix >>> X = MatrixSymbol('X', 2, 2) >>> ask(Q.diagonal(ZeroMatrix(3, 3))) True >>> ask(Q.diagonal(X), Q.lower_triangular(X) & ... Q.upper_triangular(X)) True References ========== .. [1] https://en.wikipedia.org/wiki/Diagonal_matrix """ return Predicate('diagonal') @memoize_property def fullrank(self): """ Fullrank matrix predicate. Explanation =========== ``Q.fullrank(x)`` is true iff ``x`` is a full rank matrix. A matrix is full rank if all rows and columns of the matrix are linearly independent. A square matrix is full rank iff its determinant is nonzero. Examples ======== >>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix, Identity >>> X = MatrixSymbol('X', 2, 2) >>> ask(Q.fullrank(X.T), Q.fullrank(X)) True >>> ask(Q.fullrank(ZeroMatrix(3, 3))) False >>> ask(Q.fullrank(Identity(3))) True """ return Predicate('fullrank') @memoize_property def square(self): """ Square matrix predicate. Explanation =========== ``Q.square(x)`` is true iff ``x`` is a square matrix. A square matrix is a matrix with the same number of rows and columns. Examples ======== >>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix, Identity >>> X = MatrixSymbol('X', 2, 2) >>> Y = MatrixSymbol('X', 2, 3) >>> ask(Q.square(X)) True >>> ask(Q.square(Y)) False >>> ask(Q.square(ZeroMatrix(3, 3))) True >>> ask(Q.square(Identity(3))) True References ========== .. [1] https://en.wikipedia.org/wiki/Square_matrix """ return Predicate('square') @memoize_property def integer_elements(self): """ Integer elements matrix predicate. Explanation =========== ``Q.integer_elements(x)`` is true iff all the elements of ``x`` are integers. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 4, 4) >>> ask(Q.integer(X[1, 2]), Q.integer_elements(X)) True """ return Predicate('integer_elements') @memoize_property def real_elements(self): """ Real elements matrix predicate. Explanation =========== ``Q.real_elements(x)`` is true iff all the elements of ``x`` are real numbers. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 4, 4) >>> ask(Q.real(X[1, 2]), Q.real_elements(X)) True """ return Predicate('real_elements') @memoize_property def complex_elements(self): """ Complex elements matrix predicate. Explanation =========== ``Q.complex_elements(x)`` is true iff all the elements of ``x`` are complex numbers. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 4, 4) >>> ask(Q.complex(X[1, 2]), Q.complex_elements(X)) True >>> ask(Q.complex_elements(X), Q.integer_elements(X)) True """ return Predicate('complex_elements') @memoize_property def singular(self): """ Singular matrix predicate. A matrix is singular iff the value of its determinant is 0. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 4, 4) >>> ask(Q.singular(X), Q.invertible(X)) False >>> ask(Q.singular(X), ~Q.invertible(X)) True References ========== .. [1] http://mathworld.wolfram.com/SingularMatrix.html """ return Predicate('singular') @memoize_property def normal(self): """ Normal matrix predicate. A matrix is normal if it commutes with its conjugate transpose. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 4, 4) >>> ask(Q.normal(X), Q.unitary(X)) True References ========== .. [1] https://en.wikipedia.org/wiki/Normal_matrix """ return Predicate('normal') @memoize_property def triangular(self): """ Triangular matrix predicate. Explanation =========== ``Q.triangular(X)`` is true if ``X`` is one that is either lower triangular or upper triangular. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 4, 4) >>> ask(Q.triangular(X), Q.upper_triangular(X)) True >>> ask(Q.triangular(X), Q.lower_triangular(X)) True References ========== .. [1] https://en.wikipedia.org/wiki/Triangular_matrix """ return Predicate('triangular') @memoize_property def unit_triangular(self): """ Unit triangular matrix predicate. Explanation =========== A unit triangular matrix is a triangular matrix with 1s on the diagonal. Examples ======== >>> from sympy import Q, ask, MatrixSymbol >>> X = MatrixSymbol('X', 4, 4) >>> ask(Q.triangular(X), Q.unit_triangular(X)) True """ return Predicate('unit_triangular') Q = AssumptionKeys() def _extract_facts(expr, symbol, check_reversed_rel=True): """ Helper for ask(). Explanation =========== Extracts the facts relevant to the symbol from an assumption. Returns None if there is nothing to extract. """ if isinstance(symbol, Relational): if check_reversed_rel: rev = _extract_facts(expr, symbol.reversed, False) if rev is not None: return rev if isinstance(expr, bool): return if not expr.has(symbol): return if isinstance(expr, AppliedPredicate): if expr.arg == symbol: return expr.func else: return if isinstance(expr, Not) and expr.args[0].func in (And, Or): cls = Or if expr.args[0] == And else And expr = cls(*[~arg for arg in expr.args[0].args]) args = [_extract_facts(arg, symbol) for arg in expr.args] if isinstance(expr, And): args = [x for x in args if x is not None] if args: return expr.func(*args) if args and all(x is not None for x in args): return expr.func(*args) def _extract_all_facts(expr, symbol): facts = set() if isinstance(symbol, Relational): symbols = (symbol, symbol.reversed) else: symbols = (symbol,) for clause in expr.clauses: args = [] for literal in clause: if isinstance(literal.lit, AppliedPredicate): if literal.lit.arg in symbols: # Add literal if it has 'symbol' in it args.append(Literal(literal.lit.func, literal.is_Not)) else: # If any of the literals doesn't have 'symbol' don't add the whole clause. break else: if args: facts.add(frozenset(args)) return CNF(facts) def ask(proposition, assumptions=True, context=global_assumptions): """ Method for inferring properties about objects. Explanation =========== **Syntax** * ask(proposition) * ask(proposition, assumptions) where ``proposition`` is any boolean expression Examples ======== >>> from sympy import ask, Q, pi >>> from sympy.abc import x, y >>> ask(Q.rational(pi)) False >>> ask(Q.even(x*y), Q.even(x) & Q.integer(y)) True >>> ask(Q.prime(4*x), Q.integer(x)) False **Remarks** Relations in assumptions are not implemented (yet), so the following will not give a meaningful result. >>> ask(Q.positive(x), Q.is_true(x > 0)) It is however a work in progress. """ from sympy.assumptions.satask import satask if not isinstance(proposition, (BooleanFunction, AppliedPredicate, bool, BooleanAtom)): raise TypeError("proposition must be a valid logical expression") if not isinstance(assumptions, (BooleanFunction, AppliedPredicate, bool, BooleanAtom)): raise TypeError("assumptions must be a valid logical expression") if isinstance(proposition, AppliedPredicate): key, expr = proposition.func, sympify(proposition.arg) else: key, expr = Q.is_true, sympify(proposition) assump = CNF.from_prop(assumptions) assump.extend(context) local_facts = _extract_all_facts(assump, expr) known_facts_cnf = get_all_known_facts() known_facts_dict = get_known_facts_dict() enc_cnf = EncodedCNF() enc_cnf.from_cnf(CNF(known_facts_cnf)) enc_cnf.add_from_cnf(local_facts) if local_facts.clauses and satisfiable(enc_cnf) is False: raise ValueError("inconsistent assumptions %s" % assumptions) if local_facts.clauses: if len(local_facts.clauses) == 1: cl, = local_facts.clauses f, = cl if len(cl)==1 else [None] if f and f.is_Not and f.arg in known_facts_dict.get(key, []): return False for clause in local_facts.clauses: if len(clause) == 1: f, = clause fdict = known_facts_dict.get(f.arg, None) if not f.is_Not else None if fdict and key in fdict: return True if fdict and Not(key) in known_facts_dict[f.arg]: return False # direct resolution method, no logic res = key(expr)._eval_ask(assumptions) if res is not None: return bool(res) # using satask (still costly) res = satask(proposition, assumptions=assumptions, context=context) return res def ask_full_inference(proposition, assumptions, known_facts_cnf): """ Method for inferring properties about objects. """ if not satisfiable(And(known_facts_cnf, assumptions, proposition)): return False if not satisfiable(And(known_facts_cnf, assumptions, Not(proposition))): return True return None def register_handler(key, handler): """ Register a handler in the ask system. key must be a string and handler a class inheriting from AskHandler:: >>> from sympy.assumptions import register_handler, ask, Q >>> from sympy.assumptions.handlers import AskHandler >>> class MersenneHandler(AskHandler): ... # Mersenne numbers are in the form 2**n - 1, n integer ... @staticmethod ... def Integer(expr, assumptions): ... from sympy import log ... return ask(Q.integer(log(expr + 1, 2))) >>> register_handler('mersenne', MersenneHandler) >>> ask(Q.mersenne(7)) True """ if type(key) is Predicate: key = key.name Qkey = getattr(Q, key, None) if Qkey is not None: Qkey.add_handler(handler) else: setattr(Q, key, Predicate(key, handlers=[handler])) def remove_handler(key, handler): """Removes a handler from the ask system. Same syntax as register_handler""" if type(key) is Predicate: key = key.name getattr(Q, key).remove_handler(handler) def single_fact_lookup(known_facts_keys, known_facts_cnf): # Compute the quick lookup for single facts mapping = {} for key in known_facts_keys: mapping[key] = {key} for other_key in known_facts_keys: if other_key != key: if ask_full_inference(other_key, key, known_facts_cnf): mapping[key].add(other_key) return mapping def compute_known_facts(known_facts, known_facts_keys): """Compute the various forms of knowledge compilation used by the assumptions system. Explanation =========== This function is typically applied to the results of the ``get_known_facts`` and ``get_known_facts_keys`` functions defined at the bottom of this file. """ from textwrap import dedent, wrap fact_string = dedent('''\ """ The contents of this file are the return value of ``sympy.assumptions.ask.compute_known_facts``. Do NOT manually edit this file. Instead, run ./bin/ask_update.py. """ from sympy.core.cache import cacheit from sympy.logic.boolalg import And from sympy.assumptions.cnf import Literal from sympy.assumptions.ask import Q # -{ Known facts as a set }- @cacheit def get_all_known_facts(): return { %s } # -{ Known facts in Conjunctive Normal Form }- @cacheit def get_known_facts_cnf(): return And( %s ) # -{ Known facts in compressed sets }- @cacheit def get_known_facts_dict(): return { %s } ''') # Compute the known facts in CNF form for logical inference LINE = ",\n " HANG = ' '*8 cnf = to_cnf(known_facts) cnf_ = CNF.to_CNF(known_facts) c = LINE.join([str(a) for a in cnf.args]) p = LINE.join(sorted(['frozenset((' + ', '.join(str(lit) for lit in sorted(clause, key=str)) +'))' for clause in cnf_.clauses])) mapping = single_fact_lookup(known_facts_keys, cnf) items = sorted(mapping.items(), key=str) keys = [str(i[0]) for i in items] values = ['set(%s)' % sorted(i[1], key=str) for i in items] m = LINE.join(['\n'.join( wrap("{}: {}".format(k, v), subsequent_indent=HANG, break_long_words=False)) for k, v in zip(keys, values)]) + ',' return fact_string % (p, c, m) # handlers tells us what ask handler we should use # for a particular key _val_template = 'sympy.assumptions.handlers.%s' _handlers = [ ("antihermitian", "sets.AskAntiHermitianHandler"), ("finite", "calculus.AskFiniteHandler"), ("commutative", "AskCommutativeHandler"), ("complex", "sets.AskComplexHandler"), ("composite", "ntheory.AskCompositeHandler"), ("even", "ntheory.AskEvenHandler"), ("extended_real", "sets.AskExtendedRealHandler"), ("hermitian", "sets.AskHermitianHandler"), ("imaginary", "sets.AskImaginaryHandler"), ("integer", "sets.AskIntegerHandler"), ("irrational", "sets.AskIrrationalHandler"), ("rational", "sets.AskRationalHandler"), ("negative", "order.AskNegativeHandler"), ("nonzero", "order.AskNonZeroHandler"), ("nonpositive", "order.AskNonPositiveHandler"), ("nonnegative", "order.AskNonNegativeHandler"), ("zero", "order.AskZeroHandler"), ("positive", "order.AskPositiveHandler"), ("prime", "ntheory.AskPrimeHandler"), ("real", "sets.AskRealHandler"), ("odd", "ntheory.AskOddHandler"), ("algebraic", "sets.AskAlgebraicHandler"), ("is_true", "common.TautologicalHandler"), ("symmetric", "matrices.AskSymmetricHandler"), ("invertible", "matrices.AskInvertibleHandler"), ("orthogonal", "matrices.AskOrthogonalHandler"), ("unitary", "matrices.AskUnitaryHandler"), ("positive_definite", "matrices.AskPositiveDefiniteHandler"), ("upper_triangular", "matrices.AskUpperTriangularHandler"), ("lower_triangular", "matrices.AskLowerTriangularHandler"), ("diagonal", "matrices.AskDiagonalHandler"), ("fullrank", "matrices.AskFullRankHandler"), ("square", "matrices.AskSquareHandler"), ("integer_elements", "matrices.AskIntegerElementsHandler"), ("real_elements", "matrices.AskRealElementsHandler"), ("complex_elements", "matrices.AskComplexElementsHandler"), ] for name, value in _handlers: register_handler(name, _val_template % value) @cacheit def get_known_facts_keys(): return [ getattr(Q, attr) for attr in Q.__class__.__dict__ if not attr.startswith('__')] @cacheit def get_known_facts(): return And( Implies(Q.infinite, ~Q.finite), Implies(Q.real, Q.complex), Implies(Q.real, Q.hermitian), Equivalent(Q.extended_real, Q.real | Q.infinite), Equivalent(Q.even | Q.odd, Q.integer), Implies(Q.even, ~Q.odd), Implies(Q.prime, Q.integer & Q.positive & ~Q.composite), Implies(Q.integer, Q.rational), Implies(Q.rational, Q.algebraic), Implies(Q.algebraic, Q.complex), Implies(Q.algebraic, Q.finite), Equivalent(Q.transcendental | Q.algebraic, Q.complex & Q.finite), Implies(Q.transcendental, ~Q.algebraic), Implies(Q.transcendental, Q.finite), Implies(Q.imaginary, Q.complex & ~Q.real), Implies(Q.imaginary, Q.antihermitian), Implies(Q.antihermitian, ~Q.hermitian), Equivalent(Q.irrational | Q.rational, Q.real & Q.finite), Implies(Q.irrational, ~Q.rational), Implies(Q.zero, Q.even), Equivalent(Q.real, Q.negative | Q.zero | Q.positive), Implies(Q.zero, ~Q.negative & ~Q.positive), Implies(Q.negative, ~Q.positive), Equivalent(Q.nonnegative, Q.zero | Q.positive), Equivalent(Q.nonpositive, Q.zero | Q.negative), Equivalent(Q.nonzero, Q.negative | Q.positive), Implies(Q.orthogonal, Q.positive_definite), Implies(Q.orthogonal, Q.unitary), Implies(Q.unitary & Q.real, Q.orthogonal), Implies(Q.unitary, Q.normal), Implies(Q.unitary, Q.invertible), Implies(Q.normal, Q.square), Implies(Q.diagonal, Q.normal), Implies(Q.positive_definite, Q.invertible), Implies(Q.diagonal, Q.upper_triangular), Implies(Q.diagonal, Q.lower_triangular), Implies(Q.lower_triangular, Q.triangular), Implies(Q.upper_triangular, Q.triangular), Implies(Q.triangular, Q.upper_triangular | Q.lower_triangular), Implies(Q.upper_triangular & Q.lower_triangular, Q.diagonal), Implies(Q.diagonal, Q.symmetric), Implies(Q.unit_triangular, Q.triangular), Implies(Q.invertible, Q.fullrank), Implies(Q.invertible, Q.square), Implies(Q.symmetric, Q.square), Implies(Q.fullrank & Q.square, Q.invertible), Equivalent(Q.invertible, ~Q.singular), Implies(Q.integer_elements, Q.real_elements), Implies(Q.real_elements, Q.complex_elements), ) from sympy.assumptions.ask_generated import ( get_known_facts_dict, get_all_known_facts)
d42021f2821b7464954b05d996e2c6399323e6701256787cedf5e063979bc88c
import inspect from sympy.core.cache import cacheit from sympy.core.singleton import S from sympy.core.sympify import _sympify from sympy.logic.boolalg import Boolean from sympy.utilities.source import get_class from contextlib import contextmanager class AssumptionsContext(set): """ Set representing assumptions. Explanation =========== This is used to represent global assumptions, but you can also use this class to create your own local assumptions contexts. It is basically a thin wrapper to Python's set, so see its documentation for advanced usage. Examples ======== >>> from sympy import Q >>> from sympy.assumptions.assume import global_assumptions >>> global_assumptions AssumptionsContext() >>> from sympy.abc import x >>> global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext() >>> global_assumptions.clear() """ def add(self, *assumptions): """Add an assumption.""" for a in assumptions: super().add(a) def _sympystr(self, printer): if not self: return "%s()" % self.__class__.__name__ return "{}({})".format(self.__class__.__name__, printer._print_set(self)) global_assumptions = AssumptionsContext() class AppliedPredicate(Boolean): """The class of expressions resulting from applying a Predicate. Examples ======== >>> from sympy import Q, Symbol >>> x = Symbol('x') >>> Q.integer(x) Q.integer(x) >>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> """ __slots__ = () def __new__(cls, predicate, arg): arg = _sympify(arg) return Boolean.__new__(cls, predicate, arg) is_Atom = True # do not attempt to decompose this @property def arg(self): """ Return the expression used by this assumption. Examples ======== >>> from sympy import Q, Symbol >>> x = Symbol('x') >>> a = Q.integer(x + 1) >>> a.arg x + 1 """ return self._args[1] @property def args(self): return self._args[1:] @property def func(self): return self._args[0] @cacheit def sort_key(self, order=None): return (self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One) def __eq__(self, other): if type(other) is AppliedPredicate: return self._args == other._args return False def __hash__(self): return super().__hash__() def _eval_ask(self, assumptions): return self.func.eval(self.arg, assumptions) @property def binary_symbols(self): from sympy.core.relational import Eq, Ne if self.func.name in ['is_true', 'is_false']: i = self.arg if i.is_Boolean or i.is_Symbol or isinstance(i, (Eq, Ne)): return i.binary_symbols return set() class Predicate(Boolean): """ A predicate is a function that returns a boolean value. Predicates merely wrap their argument and remain unevaluated: >>> from sympy import Q, ask >>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime' >>> Q.prime(7) Q.prime(7) >>> _.func.name 'prime' To obtain the truth value of an expression containing predicates, use the function ``ask``: >>> ask(Q.prime(7)) True The tautological predicate ``Q.is_true`` can be used to wrap other objects: >>> from sympy.abc import x >>> Q.is_true(x > 1) Q.is_true(x > 1) """ is_Atom = True def __new__(cls, name, handlers=None): obj = Boolean.__new__(cls) obj.name = name obj.handlers = handlers or [] return obj def _hashable_content(self): return (self.name,) def __getnewargs__(self): return (self.name,) def __call__(self, expr): return AppliedPredicate(self, expr) def add_handler(self, handler): self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler) @cacheit def sort_key(self, order=None): return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One def eval(self, expr, assumptions=True): """ Evaluate self(expr) under the given assumptions. This uses only direct resolution methods, not logical inference. """ res, _res = None, None mro = inspect.getmro(type(expr)) for handler in self.handlers: cls = get_class(handler) for subclass in mro: eval_ = getattr(cls, subclass.__name__, None) if eval_ is None: continue res = eval_(expr, assumptions) # Do not stop if value returned is None # Try to check for higher classes if res is None: continue if _res is None: _res = res elif res is None: # since first resolutor was conclusive, we keep that value res = _res else: # only check consistency if both resolutors have concluded if _res != res: raise ValueError('incompatible resolutors') break return res @contextmanager def assuming(*assumptions): """ Context manager for assumptions. Examples ======== >>> from sympy.assumptions import assuming, Q, ask >>> from sympy.abc import x, y >>> print(ask(Q.integer(x + y))) None >>> with assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x + y))) True """ old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions) try: yield finally: global_assumptions.clear() global_assumptions.update(old_global_assumptions)
3206f9148277f32d3fb525e043c106956a23d82a7feb6e1f992c4ce95ef2e972
""" The classes used here are for the internal use of assumptions system only and should not be used anywhere else as these don't possess the signatures common to SymPy objects. For general use of logic constructs please refer to sympy.logic classes And, Or, Not, etc. """ from itertools import combinations, product from sympy import S, Nor, Nand, Xor, Implies, Equivalent, ITE from sympy.logic.boolalg import Or, And, Not, Xnor from itertools import zip_longest class Literal: """ The smallest element of a CNF object. """ def __new__(cls, lit, is_Not=False): if isinstance(lit, Not): lit = lit.args[0] is_Not = True elif isinstance(lit, (AND, OR, Literal)): return ~lit if is_Not else lit obj = super().__new__(cls) obj.lit = lit obj.is_Not = is_Not return obj @property def arg(self): return self.lit def rcall(self, expr): if callable(self.lit): lit = self.lit(expr) else: try: lit = self.lit.apply(expr) except AttributeError: lit = self.lit.rcall(expr) return type(self)(lit, self.is_Not) def __invert__(self): is_Not = not self.is_Not return Literal(self.lit, is_Not) def __str__(self): return '{}({}, {})'.format(type(self).__name__, self.lit, self.is_Not) __repr__ = __str__ def __eq__(self, other): return self.arg == other.arg and self.is_Not == other.is_Not def __hash__(self): h = hash((type(self).__name__, self.arg, self.is_Not)) return h class OR: """ A low-level implementation for Or """ def __init__(self, *args): self._args = args @property def args(self): return sorted(self._args, key=str) def rcall(self, expr): return type(self)(*[arg.rcall(expr) for arg in self._args ]) def __invert__(self): return AND(*[~arg for arg in self._args]) def __hash__(self): return hash((type(self).__name__,) + tuple(self.args)) def __eq__(self, other): return self.args == other.args def __str__(self): s = '(' + ' | '.join([str(arg) for arg in self.args]) + ')' return s __repr__ = __str__ class AND: """ A low-level implementation for And """ def __init__(self, *args): self._args = args def __invert__(self): return OR(*[~arg for arg in self._args]) @property def args(self): return sorted(self._args, key=str) def rcall(self, expr): return type(self)(*[arg.rcall(expr) for arg in self._args ]) def __hash__(self): return hash((type(self).__name__,) + tuple(self.args)) def __eq__(self, other): return self.args == other.args def __str__(self): s = '('+' & '.join([str(arg) for arg in self.args])+')' return s __repr__ = __str__ def to_NNF(expr): """ Generates the Negation Normal Form of any boolean expression in terms of AND, OR, and Literal objects. """ if isinstance(expr, Not): arg = expr.args[0] tmp = to_NNF(arg) # Strategy: negate the NNF of expr return ~tmp if isinstance(expr, Or): return OR(*[to_NNF(x) for x in Or.make_args(expr)]) if isinstance(expr, And): return AND(*[to_NNF(x) for x in And.make_args(expr)]) if isinstance(expr, Nand): tmp = AND(*[to_NNF(x) for x in expr.args]) return ~tmp if isinstance(expr, Nor): tmp = OR(*[to_NNF(x) for x in expr.args]) return ~tmp if isinstance(expr, Xor): cnfs = [] for i in range(0, len(expr.args) + 1, 2): for neg in combinations(expr.args, i): clause = [~to_NNF(s) if s in neg else to_NNF(s) for s in expr.args] cnfs.append(OR(*clause)) return AND(*cnfs) if isinstance(expr, Xnor): cnfs = [] for i in range(0, len(expr.args) + 1, 2): for neg in combinations(expr.args, i): clause = [~to_NNF(s) if s in neg else to_NNF(s) for s in expr.args] cnfs.append(OR(*clause)) return ~AND(*cnfs) if isinstance(expr, Implies): L, R = to_NNF(expr.args[0]), to_NNF(expr.args[1]) return OR(~L, R) if isinstance(expr, Equivalent): cnfs = [] for a, b in zip_longest(expr.args, expr.args[1:], fillvalue=expr.args[0]): a = to_NNF(a) b = to_NNF(b) cnfs.append(OR(~a, b)) return AND(*cnfs) if isinstance(expr, ITE): L = to_NNF(expr.args[0]) M = to_NNF(expr.args[1]) R = to_NNF(expr.args[2]) return AND(OR(~L, M), OR(L, R)) else: return Literal(expr) def distribute_AND_over_OR(expr): """ Distributes AND over OR in the NNF expression. Returns the result( Conjunctive Normal Form of expression) as a CNF object. """ if not isinstance(expr, (AND, OR)): tmp = set() tmp.add(frozenset((expr,))) return CNF(tmp) if isinstance(expr, OR): return CNF.all_or(*[distribute_AND_over_OR(arg) for arg in expr._args]) if isinstance(expr, AND): return CNF.all_and(*[distribute_AND_over_OR(arg) for arg in expr._args]) class CNF: """ Class to represent CNF of a Boolean expression. Consists of set of clauses, which themselves are stored as frozenset of Literal objects. """ def __init__(self, clauses=None): if not clauses: clauses = set() self.clauses = clauses def add(self, prop): clauses = CNF.to_CNF(prop).clauses self.add_clauses(clauses) def __str__(self): s = ' & '.join( ['(' + ' | '.join([str(lit) for lit in clause]) +')' for clause in self.clauses] ) return s def extend(self, props): for p in props: self.add(p) return self def copy(self): return CNF(set(self.clauses)) def add_clauses(self, clauses): self.clauses |= clauses @classmethod def from_prop(cls, prop): res = cls() res.add(prop) return res def __iand__(self, other): self.add_clauses(other.clauses) return self def all_predicates(self): predicates = set() for c in self.clauses: predicates |= {arg.lit for arg in c} return predicates def _or(self, cnf): clauses = set() for a, b in product(self.clauses, cnf.clauses): tmp = set(a) for t in b: tmp.add(t) clauses.add(frozenset(tmp)) return CNF(clauses) def _and(self, cnf): clauses = self.clauses.union(cnf.clauses) return CNF(clauses) def _not(self): clss = list(self.clauses) ll = set() for x in clss[-1]: ll.add(frozenset((~x,))) ll = CNF(ll) for rest in clss[:-1]: p = set() for x in rest: p.add(frozenset((~x,))) ll = ll._or(CNF(p)) return ll def rcall(self, expr): clause_list = list() for clause in self.clauses: lits = [arg.rcall(expr) for arg in clause] clause_list.append(OR(*lits)) expr = AND(*clause_list) return distribute_AND_over_OR(expr) @classmethod def all_or(cls, *cnfs): b = cnfs[0].copy() for rest in cnfs[1:]: b = b._or(rest) return b @classmethod def all_and(cls, *cnfs): b = cnfs[0].copy() for rest in cnfs[1:]: b = b._and(rest) return b @classmethod def to_CNF(cls, expr): expr = to_NNF(expr) expr = distribute_AND_over_OR(expr) return expr @classmethod def CNF_to_cnf(cls, cnf): """ Converts CNF object to SymPy's boolean expression retaining the form of expression. """ def remove_literal(arg): return Not(arg.lit) if arg.is_Not else arg.lit return And(*(Or(*(remove_literal(arg) for arg in clause)) for clause in cnf.clauses)) class EncodedCNF: """ Class for encoding the CNF expression. """ def __init__(self, data=None, encoding=None): if not data and not encoding: data = list() encoding = dict() self.data = data self.encoding = encoding self._symbols = list(encoding.keys()) def from_cnf(self, cnf): self._symbols = list(cnf.all_predicates()) n = len(self._symbols) self.encoding = dict(list(zip(self._symbols, list(range(1, n + 1))))) self.data = [self.encode(clause) for clause in cnf.clauses] @property def symbols(self): return self._symbols @property def variables(self): return range(1, len(self._symbols) + 1) def copy(self): new_data = [set(clause) for clause in self.data] return EncodedCNF(new_data, dict(self.encoding)) def add_prop(self, prop): cnf = CNF.from_prop(prop) self.add_from_cnf(cnf) def add_from_cnf(self, cnf): clauses = [self.encode(clause) for clause in cnf.clauses] self.data += clauses def encode_arg(self, arg): literal = arg.lit value = self.encoding.get(literal, None) if value is None: n = len(self._symbols) self._symbols.append(literal) value = self.encoding[literal] = n + 1 if arg.is_Not: return -value else: return value def encode(self, clause): return {self.encode_arg(arg) if not arg.lit == S.false else 0 for arg in clause}
558b0126eab82ba81e22cb03985a6c86bf8716f985c2a1acde3f40af6effc1c0
from collections import defaultdict from collections.abc import MutableMapping from sympy.assumptions.ask import Q from sympy.assumptions.assume import Predicate, AppliedPredicate from sympy.assumptions.cnf import AND, OR, to_NNF from sympy.core import (Add, Mul, Pow, Integer, Number, NumberSymbol,) from sympy.core.numbers import ImaginaryUnit from sympy.core.rules import Transform from sympy.core.sympify import _sympify from sympy.functions.elementary.complexes import Abs from sympy.logic.boolalg import (Equivalent, Implies, BooleanFunction) from sympy.matrices.expressions import MatMul # APIs here may be subject to change class UnevaluatedOnFree(BooleanFunction): """ Represents a Boolean function that remains unevaluated on free predicates. Explanation =========== This is intended to be a superclass of other classes, which define the behavior on singly applied predicates. A free predicate is a predicate that is not applied, or a combination thereof. For example, Q.zero or Or(Q.positive, Q.negative). A singly applied predicate is a free predicate applied everywhere to a single expression. For instance, Q.zero(x) and Or(Q.positive(x*y), Q.negative(x*y)) are singly applied, but Or(Q.positive(x), Q.negative(y)) and Or(Q.positive, Q.negative(y)) are not. The boolean literals True and False are considered to be both free and singly applied. This class raises ValueError unless the input is a free predicate or a singly applied predicate. On a free predicate, this class remains unevaluated. On a singly applied predicate, the method apply() is called and returned, or the original expression returned if apply() returns None. When apply() is called, self.expr is set to the unique expression that the predicates are applied at. self.pred is set to the free form of the predicate. The typical usage is to create this class with free predicates and evaluate it using .rcall(). """ def __new__(cls, arg): # Mostly type checking here arg = _sympify(arg) predicates = arg.atoms(Predicate) applied_predicates = arg.atoms(AppliedPredicate) if predicates and applied_predicates: raise ValueError("arg must be either completely free or singly applied") if not applied_predicates: obj = BooleanFunction.__new__(cls, arg) obj.pred = arg obj.expr = None return obj predicate_args = {pred.args[0] for pred in applied_predicates} if len(predicate_args) > 1: raise ValueError("The AppliedPredicates in arg must be applied to a single expression.") obj = BooleanFunction.__new__(cls, arg) obj.expr = predicate_args.pop() obj.pred = arg.xreplace(Transform(lambda e: e.func, lambda e: isinstance(e, AppliedPredicate))) applied = obj.apply(obj.expr) if applied is None: return obj return applied def apply(self, expr=None): if expr is None: return pred = to_NNF(self.pred) return self._eval_apply(expr, pred) def _eval_apply(self, expr, pred): return None class AllArgs(UnevaluatedOnFree): """ Class representing vectorizing a predicate over all the .args of an expression See the docstring of UnevaluatedOnFree for more information on this class. The typical usage is to evaluate predicates with expressions using .rcall(). Example ======= >>> from sympy.assumptions.sathandlers import AllArgs >>> from sympy import symbols, Q >>> x, y = symbols('x y') >>> a = AllArgs(Q.positive | Q.negative) >>> a AllArgs(Q.negative | Q.positive) >>> a.rcall(x*y) ((Literal(Q.negative(x), False) | Literal(Q.positive(x), False)) & (Literal(Q.negative(y), False) | \ Literal(Q.positive(y), False))) See Also ======== UnevaluatedOnFree """ def _eval_apply(self, expr, pred): return AND(*[pred.rcall(arg) for arg in expr.args]) class AnyArgs(UnevaluatedOnFree): """ Class representing vectorizing a predicate over any of the .args of an expression. See the docstring of UnevaluatedOnFree for more information on this class. The typical usage is to evaluate predicates with expressions using .rcall(). Example ======= >>> from sympy.assumptions.sathandlers import AnyArgs >>> from sympy import symbols, Q >>> x, y = symbols('x y') >>> a = AnyArgs(Q.positive & Q.negative) >>> a AnyArgs(Q.negative & Q.positive) >>> a.rcall(x*y) ((Literal(Q.negative(x), False) & Literal(Q.positive(x), False)) | (Literal(Q.negative(y), False) & \ Literal(Q.positive(y), False))) """ def _eval_apply(self, expr, pred): return OR(*[pred.rcall(arg) for arg in expr.args]) class ExactlyOneArg(UnevaluatedOnFree): """ Class representing a predicate holding on exactly one of the .args of an expression. See the docstring of UnevaluatedOnFree for more information on this class. The typical usage is to evaluate predicate with expressions using .rcall(). Example ======= >>> from sympy.assumptions.sathandlers import ExactlyOneArg >>> from sympy import symbols, Q >>> x, y = symbols('x y') >>> a = ExactlyOneArg(Q.positive) >>> a ExactlyOneArg(Q.positive) >>> a.rcall(x*y) ((Literal(Q.positive(x), False) & Literal(Q.positive(y), True)) | (Literal(Q.positive(x), True) & \ Literal(Q.positive(y), False))) """ def _eval_apply(self, expr, pred): pred_args = [pred.rcall(arg) for arg in expr.args] # Technically this is xor, but if one term in the disjunction is true, # it is not possible for the remainder to be true, so regular or is # fine in this case. res = OR(*[AND(pred_args[i], *[~lit for lit in pred_args[:i] + pred_args[i+1:]]) for i in range(len(pred_args))]) return res # Note: this is the equivalent cnf form. The above is more efficient # as the first argument of an implication, since p >> q is the same as # q | ~p, so the the ~ will convert the Or to and, and one just needs # to distribute the q across it to get to cnf. # return And(*[Or(*map(Not, c)) for c in combinations(pred_args, 2)]) & Or(*pred_args) def _old_assump_replacer(obj): if not isinstance(obj, AppliedPredicate): return obj e = obj.args[0] ret = None if obj.func == Q.positive: ret = e.is_positive elif obj.func == Q.zero: ret = e.is_zero elif obj.func == Q.negative: ret = e.is_negative elif obj.func == Q.nonpositive: ret = e.is_nonpositive elif obj.func == Q.nonzero: ret = e.is_nonzero elif obj.func == Q.nonnegative: ret = e.is_nonnegative elif obj.func == Q.rational: ret = e.is_rational elif obj.func == Q.irrational: ret = e.is_irrational elif obj.func == Q.even: ret = e.is_even elif obj.func == Q.odd: ret = e.is_odd elif obj.func == Q.integer: ret = e.is_integer elif obj.func == Q.composite: ret = e.is_composite elif obj.func == Q.imaginary: ret = e.is_imaginary elif obj.func == Q.commutative: ret = e.is_commutative if ret is None: return obj return ret def evaluate_old_assump(pred): """ Replace assumptions of expressions replaced with their values in the old assumptions (like Q.negative(-1) => True). Useful because some direct computations for numeric objects is defined most conveniently in the old assumptions. """ return pred.xreplace(Transform(_old_assump_replacer)) class CheckOldAssump(UnevaluatedOnFree): def apply(self, expr=None, is_Not=False): arg = self.args[0](expr) if callable(self.args[0]) else self.args[0] res = Equivalent(arg, evaluate_old_assump(arg)) return to_NNF(res) class CheckIsPrime(UnevaluatedOnFree): def apply(self, expr=None, is_Not=False): from sympy import isprime arg = self.args[0](expr) if callable(self.args[0]) else self.args[0] res = Equivalent(arg, isprime(expr)) return to_NNF(res) class CustomLambda: """ Interface to lambda with rcall Workaround until we get a better way to represent certain facts. """ def __init__(self, lamda): self.lamda = lamda def apply(self, *args): return to_NNF(self.lamda(*args)) class ClassFactRegistry(MutableMapping): """ Register handlers against classes. Explanation =========== ``registry[C] = handler`` registers ``handler`` for class ``C``. ``registry[C]`` returns a set of handlers for class ``C``, or any of its superclasses. """ def __init__(self, d=None): d = d or {} self.d = defaultdict(frozenset, d) super().__init__() def __setitem__(self, key, item): self.d[key] = frozenset(item) def __getitem__(self, key): ret = self.d[key] for k in self.d: if issubclass(key, k): ret |= self.d[k] return ret def __delitem__(self, key): del self.d[key] def __iter__(self): return self.d.__iter__() def __len__(self): return len(self.d) def __repr__(self): return repr(self.d) fact_registry = ClassFactRegistry() def register_fact(klass, fact, registry=fact_registry): registry[klass] |= {fact} for klass, fact in [ (Mul, Equivalent(Q.zero, AnyArgs(Q.zero))), (MatMul, Implies(AllArgs(Q.square), Equivalent(Q.invertible, AllArgs(Q.invertible)))), (Add, Implies(AllArgs(Q.positive), Q.positive)), (Add, Implies(AllArgs(Q.negative), Q.negative)), (Mul, Implies(AllArgs(Q.positive), Q.positive)), (Mul, Implies(AllArgs(Q.commutative), Q.commutative)), (Mul, Implies(AllArgs(Q.real), Q.commutative)), (Pow, CustomLambda(lambda power: Implies(Q.real(power.base) & Q.even(power.exp) & Q.nonnegative(power.exp), Q.nonnegative(power)))), (Pow, CustomLambda(lambda power: Implies(Q.nonnegative(power.base) & Q.odd(power.exp) & Q.nonnegative(power.exp), Q.nonnegative(power)))), (Pow, CustomLambda(lambda power: Implies(Q.nonpositive(power.base) & Q.odd(power.exp) & Q.nonnegative(power.exp), Q.nonpositive(power)))), # This one can still be made easier to read. I think we need basic pattern # matching, so that we can just write Equivalent(Q.zero(x**y), Q.zero(x) & Q.positive(y)) (Pow, CustomLambda(lambda power: Equivalent(Q.zero(power), Q.zero(power.base) & Q.positive(power.exp)))), (Integer, CheckIsPrime(Q.prime)), (Integer, CheckOldAssump(Q.composite)), # Implicitly assumes Mul has more than one arg # Would be AllArgs(Q.prime | Q.composite) except 1 is composite (Mul, Implies(AllArgs(Q.prime), ~Q.prime)), # More advanced prime assumptions will require inequalities, as 1 provides # a corner case. (Mul, Implies(AllArgs(Q.imaginary | Q.real), Implies(ExactlyOneArg(Q.imaginary), Q.imaginary))), (Mul, Implies(AllArgs(Q.real), Q.real)), (Add, Implies(AllArgs(Q.real), Q.real)), # General Case: Odd number of imaginary args implies mul is imaginary(To be implemented) (Mul, Implies(AllArgs(Q.real), Implies(ExactlyOneArg(Q.irrational), Q.irrational))), (Add, Implies(AllArgs(Q.real), Implies(ExactlyOneArg(Q.irrational), Q.irrational))), (Mul, Implies(AllArgs(Q.rational), Q.rational)), (Add, Implies(AllArgs(Q.rational), Q.rational)), (Abs, Q.nonnegative), (Abs, Equivalent(AllArgs(~Q.zero), ~Q.zero)), # Including the integer qualification means we don't need to add any facts # for odd, since the assumptions already know that every integer is # exactly one of even or odd. (Mul, Implies(AllArgs(Q.integer), Equivalent(AnyArgs(Q.even), Q.even))), (Abs, Implies(AllArgs(Q.even), Q.even)), (Abs, Implies(AllArgs(Q.odd), Q.odd)), (Add, Implies(AllArgs(Q.integer), Q.integer)), (Add, Implies(ExactlyOneArg(~Q.integer), ~Q.integer)), (Mul, Implies(AllArgs(Q.integer), Q.integer)), (Mul, Implies(ExactlyOneArg(~Q.rational), ~Q.integer)), (Abs, Implies(AllArgs(Q.integer), Q.integer)), (Number, CheckOldAssump(Q.negative)), (Number, CheckOldAssump(Q.zero)), (Number, CheckOldAssump(Q.positive)), (Number, CheckOldAssump(Q.nonnegative)), (Number, CheckOldAssump(Q.nonzero)), (Number, CheckOldAssump(Q.nonpositive)), (Number, CheckOldAssump(Q.rational)), (Number, CheckOldAssump(Q.irrational)), (Number, CheckOldAssump(Q.even)), (Number, CheckOldAssump(Q.odd)), (Number, CheckOldAssump(Q.integer)), (Number, CheckOldAssump(Q.imaginary)), # For some reason NumberSymbol does not subclass Number (NumberSymbol, CheckOldAssump(Q.negative)), (NumberSymbol, CheckOldAssump(Q.zero)), (NumberSymbol, CheckOldAssump(Q.positive)), (NumberSymbol, CheckOldAssump(Q.nonnegative)), (NumberSymbol, CheckOldAssump(Q.nonzero)), (NumberSymbol, CheckOldAssump(Q.nonpositive)), (NumberSymbol, CheckOldAssump(Q.rational)), (NumberSymbol, CheckOldAssump(Q.irrational)), (NumberSymbol, CheckOldAssump(Q.imaginary)), (ImaginaryUnit, CheckOldAssump(Q.negative)), (ImaginaryUnit, CheckOldAssump(Q.zero)), (ImaginaryUnit, CheckOldAssump(Q.positive)), (ImaginaryUnit, CheckOldAssump(Q.nonnegative)), (ImaginaryUnit, CheckOldAssump(Q.nonzero)), (ImaginaryUnit, CheckOldAssump(Q.nonpositive)), (ImaginaryUnit, CheckOldAssump(Q.rational)), (ImaginaryUnit, CheckOldAssump(Q.irrational)), (ImaginaryUnit, CheckOldAssump(Q.imaginary)) ]: register_fact(klass, fact)
96e93a797a17fd310ac3ba60104c0f41de149ae09eaa76b1b578dd5cfc37e0b4
r""" This module is intended for solving recurrences or, in other words, difference equations. Currently supported are linear, inhomogeneous equations with polynomial or rational coefficients. The solutions are obtained among polynomials, rational functions, hypergeometric terms, or combinations of hypergeometric term which are pairwise dissimilar. ``rsolve_X`` functions were meant as a low level interface for ``rsolve`` which would use Mathematica's syntax. Given a recurrence relation: .. math:: a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) + ... + a_{0}(n) y(n) = f(n) where `k > 0` and `a_{i}(n)` are polynomials in `n`. To use ``rsolve_X`` we need to put all coefficients in to a list ``L`` of `k+1` elements the following way: ``L = [a_{0}(n), ..., a_{k-1}(n), a_{k}(n)]`` where ``L[i]``, for `i=0, \ldots, k`, maps to `a_{i}(n) y(n+i)` (`y(n+i)` is implicit). For example if we would like to compute `m`-th Bernoulli polynomial up to a constant (example was taken from rsolve_poly docstring), then we would use `b(n+1) - b(n) = m n^{m-1}` recurrence, which has solution `b(n) = B_m + C`. Then ``L = [-1, 1]`` and `f(n) = m n^(m-1)` and finally for `m=4`: >>> from sympy import Symbol, bernoulli, rsolve_poly >>> n = Symbol('n', integer=True) >>> rsolve_poly([-1, 1], 4*n**3, n) C0 + n**4 - 2*n**3 + n**2 >>> bernoulli(4, n) n**4 - 2*n**3 + n**2 - 1/30 For the sake of completeness, `f(n)` can be: [1] a polynomial -> rsolve_poly [2] a rational function -> rsolve_ratio [3] a hypergeometric function -> rsolve_hyper """ from collections import defaultdict from sympy.core.singleton import S from sympy.core.numbers import Rational, I from sympy.core.symbol import Symbol, Wild, Dummy from sympy.core.relational import Equality from sympy.core.add import Add from sympy.core.mul import Mul from sympy.core import sympify from sympy.simplify import simplify, hypersimp, hypersimilar # type: ignore from sympy.solvers import solve, solve_undetermined_coeffs from sympy.polys import Poly, quo, gcd, lcm, roots, resultant from sympy.functions import binomial, factorial, FallingFactorial, RisingFactorial from sympy.matrices import Matrix, casoratian from sympy.concrete import product from sympy.core.compatibility import default_sort_key from sympy.utilities.iterables import numbered_symbols def rsolve_poly(coeffs, f, n, shift=0, **hints): r""" Given linear recurrence operator `\operatorname{L}` of order `k` with polynomial coefficients and inhomogeneous equation `\operatorname{L} y = f`, where `f` is a polynomial, we seek for all polynomial solutions over field `K` of characteristic zero. The algorithm performs two basic steps: (1) Compute degree `N` of the general polynomial solution. (2) Find all polynomials of degree `N` or less of `\operatorname{L} y = f`. There are two methods for computing the polynomial solutions. If the degree bound is relatively small, i.e. it's smaller than or equal to the order of the recurrence, then naive method of undetermined coefficients is being used. This gives system of algebraic equations with `N+1` unknowns. In the other case, the algorithm performs transformation of the initial equation to an equivalent one, for which the system of algebraic equations has only `r` indeterminates. This method is quite sophisticated (in comparison with the naive one) and was invented together by Abramov, Bronstein and Petkovsek. It is possible to generalize the algorithm implemented here to the case of linear q-difference and differential equations. Lets say that we would like to compute `m`-th Bernoulli polynomial up to a constant. For this we can use `b(n+1) - b(n) = m n^{m-1}` recurrence, which has solution `b(n) = B_m + C`. For example: >>> from sympy import Symbol, rsolve_poly >>> n = Symbol('n', integer=True) >>> rsolve_poly([-1, 1], 4*n**3, n) C0 + n**4 - 2*n**3 + n**2 References ========== .. [1] S. A. Abramov, M. Bronstein and M. Petkovsek, On polynomial solutions of linear operator equations, in: T. Levelt, ed., Proc. ISSAC '95, ACM Press, New York, 1995, 290-296. .. [2] M. Petkovsek, Hypergeometric solutions of linear recurrences with polynomial coefficients, J. Symbolic Computation, 14 (1992), 243-264. .. [3] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996. """ f = sympify(f) if not f.is_polynomial(n): return None homogeneous = f.is_zero r = len(coeffs) - 1 coeffs = [Poly(coeff, n) for coeff in coeffs] polys = [Poly(0, n)]*(r + 1) terms = [(S.Zero, S.NegativeInfinity)]*(r + 1) for i in range(r + 1): for j in range(i, r + 1): polys[i] += coeffs[j]*(binomial(j, i).as_poly(n)) if not polys[i].is_zero: (exp,), coeff = polys[i].LT() terms[i] = (coeff, exp) d = b = terms[0][1] for i in range(1, r + 1): if terms[i][1] > d: d = terms[i][1] if terms[i][1] - i > b: b = terms[i][1] - i d, b = int(d), int(b) x = Dummy('x') degree_poly = S.Zero for i in range(r + 1): if terms[i][1] - i == b: degree_poly += terms[i][0]*FallingFactorial(x, i) nni_roots = list(roots(degree_poly, x, filter='Z', predicate=lambda r: r >= 0).keys()) if nni_roots: N = [max(nni_roots)] else: N = [] if homogeneous: N += [-b - 1] else: N += [f.as_poly(n).degree() - b, -b - 1] N = int(max(N)) if N < 0: if homogeneous: if hints.get('symbols', False): return (S.Zero, []) else: return S.Zero else: return None if N <= r: C = [] y = E = S.Zero for i in range(N + 1): C.append(Symbol('C' + str(i + shift))) y += C[i] * n**i for i in range(r + 1): E += coeffs[i].as_expr()*y.subs(n, n + i) solutions = solve_undetermined_coeffs(E - f, C, n) if solutions is not None: C = [c for c in C if (c not in solutions)] result = y.subs(solutions) else: return None # TBD else: A = r U = N + A + b + 1 nni_roots = list(roots(polys[r], filter='Z', predicate=lambda r: r >= 0).keys()) if nni_roots != []: a = max(nni_roots) + 1 else: a = S.Zero def _zero_vector(k): return [S.Zero] * k def _one_vector(k): return [S.One] * k def _delta(p, k): B = S.One D = p.subs(n, a + k) for i in range(1, k + 1): B *= Rational(i - k - 1, i) D += B * p.subs(n, a + k - i) return D alpha = {} for i in range(-A, d + 1): I = _one_vector(d + 1) for k in range(1, d + 1): I[k] = I[k - 1] * (x + i - k + 1)/k alpha[i] = S.Zero for j in range(A + 1): for k in range(d + 1): B = binomial(k, i + j) D = _delta(polys[j].as_expr(), k) alpha[i] += I[k]*B*D V = Matrix(U, A, lambda i, j: int(i == j)) if homogeneous: for i in range(A, U): v = _zero_vector(A) for k in range(1, A + b + 1): if i - k < 0: break B = alpha[k - A].subs(x, i - k) for j in range(A): v[j] += B * V[i - k, j] denom = alpha[-A].subs(x, i) for j in range(A): V[i, j] = -v[j] / denom else: G = _zero_vector(U) for i in range(A, U): v = _zero_vector(A) g = S.Zero for k in range(1, A + b + 1): if i - k < 0: break B = alpha[k - A].subs(x, i - k) for j in range(A): v[j] += B * V[i - k, j] g += B * G[i - k] denom = alpha[-A].subs(x, i) for j in range(A): V[i, j] = -v[j] / denom G[i] = (_delta(f, i - A) - g) / denom P, Q = _one_vector(U), _zero_vector(A) for i in range(1, U): P[i] = (P[i - 1] * (n - a - i + 1)/i).expand() for i in range(A): Q[i] = Add(*[(v*p).expand() for v, p in zip(V[:, i], P)]) if not homogeneous: h = Add(*[(g*p).expand() for g, p in zip(G, P)]) C = [Symbol('C' + str(i + shift)) for i in range(A)] g = lambda i: Add(*[c*_delta(q, i) for c, q in zip(C, Q)]) if homogeneous: E = [g(i) for i in range(N + 1, U)] else: E = [g(i) + _delta(h, i) for i in range(N + 1, U)] if E != []: solutions = solve(E, *C) if not solutions: if homogeneous: if hints.get('symbols', False): return (S.Zero, []) else: return S.Zero else: return None else: solutions = {} if homogeneous: result = S.Zero else: result = h for c, q in list(zip(C, Q)): if c in solutions: s = solutions[c]*q C.remove(c) else: s = c*q result += s.expand() if hints.get('symbols', False): return (result, C) else: return result def rsolve_ratio(coeffs, f, n, **hints): r""" Given linear recurrence operator `\operatorname{L}` of order `k` with polynomial coefficients and inhomogeneous equation `\operatorname{L} y = f`, where `f` is a polynomial, we seek for all rational solutions over field `K` of characteristic zero. This procedure accepts only polynomials, however if you are interested in solving recurrence with rational coefficients then use ``rsolve`` which will pre-process the given equation and run this procedure with polynomial arguments. The algorithm performs two basic steps: (1) Compute polynomial `v(n)` which can be used as universal denominator of any rational solution of equation `\operatorname{L} y = f`. (2) Construct new linear difference equation by substitution `y(n) = u(n)/v(n)` and solve it for `u(n)` finding all its polynomial solutions. Return ``None`` if none were found. Algorithm implemented here is a revised version of the original Abramov's algorithm, developed in 1989. The new approach is much simpler to implement and has better overall efficiency. This method can be easily adapted to q-difference equations case. Besides finding rational solutions alone, this functions is an important part of Hyper algorithm were it is used to find particular solution of inhomogeneous part of a recurrence. Examples ======== >>> from sympy.abc import x >>> from sympy.solvers.recurr import rsolve_ratio >>> rsolve_ratio([-2*x**3 + x**2 + 2*x - 1, 2*x**3 + x**2 - 6*x, ... - 2*x**3 - 11*x**2 - 18*x - 9, 2*x**3 + 13*x**2 + 22*x + 8], 0, x) C2*(2*x - 3)/(2*(x**2 - 1)) References ========== .. [1] S. A. Abramov, Rational solutions of linear difference and q-difference equations with polynomial coefficients, in: T. Levelt, ed., Proc. ISSAC '95, ACM Press, New York, 1995, 285-289 See Also ======== rsolve_hyper """ f = sympify(f) if not f.is_polynomial(n): return None coeffs = list(map(sympify, coeffs)) r = len(coeffs) - 1 A, B = coeffs[r], coeffs[0] A = A.subs(n, n - r).expand() h = Dummy('h') res = resultant(A, B.subs(n, n + h), n) if not res.is_polynomial(h): p, q = res.as_numer_denom() res = quo(p, q, h) nni_roots = list(roots(res, h, filter='Z', predicate=lambda r: r >= 0).keys()) if not nni_roots: return rsolve_poly(coeffs, f, n, **hints) else: C, numers = S.One, [S.Zero]*(r + 1) for i in range(int(max(nni_roots)), -1, -1): d = gcd(A, B.subs(n, n + i), n) A = quo(A, d, n) B = quo(B, d.subs(n, n - i), n) C *= Mul(*[d.subs(n, n - j) for j in range(i + 1)]) denoms = [C.subs(n, n + i) for i in range(r + 1)] for i in range(r + 1): g = gcd(coeffs[i], denoms[i], n) numers[i] = quo(coeffs[i], g, n) denoms[i] = quo(denoms[i], g, n) for i in range(r + 1): numers[i] *= Mul(*(denoms[:i] + denoms[i + 1:])) result = rsolve_poly(numers, f * Mul(*denoms), n, **hints) if result is not None: if hints.get('symbols', False): return (simplify(result[0] / C), result[1]) else: return simplify(result / C) else: return None def rsolve_hyper(coeffs, f, n, **hints): r""" Given linear recurrence operator `\operatorname{L}` of order `k` with polynomial coefficients and inhomogeneous equation `\operatorname{L} y = f` we seek for all hypergeometric solutions over field `K` of characteristic zero. The inhomogeneous part can be either hypergeometric or a sum of a fixed number of pairwise dissimilar hypergeometric terms. The algorithm performs three basic steps: (1) Group together similar hypergeometric terms in the inhomogeneous part of `\operatorname{L} y = f`, and find particular solution using Abramov's algorithm. (2) Compute generating set of `\operatorname{L}` and find basis in it, so that all solutions are linearly independent. (3) Form final solution with the number of arbitrary constants equal to dimension of basis of `\operatorname{L}`. Term `a(n)` is hypergeometric if it is annihilated by first order linear difference equations with polynomial coefficients or, in simpler words, if consecutive term ratio is a rational function. The output of this procedure is a linear combination of fixed number of hypergeometric terms. However the underlying method can generate larger class of solutions - D'Alembertian terms. Note also that this method not only computes the kernel of the inhomogeneous equation, but also reduces in to a basis so that solutions generated by this procedure are linearly independent Examples ======== >>> from sympy.solvers import rsolve_hyper >>> from sympy.abc import x >>> rsolve_hyper([-1, -1, 1], 0, x) C0*(1/2 - sqrt(5)/2)**x + C1*(1/2 + sqrt(5)/2)**x >>> rsolve_hyper([-1, 1], 1 + x, x) C0 + x*(x + 1)/2 References ========== .. [1] M. Petkovsek, Hypergeometric solutions of linear recurrences with polynomial coefficients, J. Symbolic Computation, 14 (1992), 243-264. .. [2] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996. """ coeffs = list(map(sympify, coeffs)) f = sympify(f) r, kernel, symbols = len(coeffs) - 1, [], set() if not f.is_zero: if f.is_Add: similar = {} for g in f.expand().args: if not g.is_hypergeometric(n): return None for h in similar.keys(): if hypersimilar(g, h, n): similar[h] += g break else: similar[g] = S.Zero inhomogeneous = [] for g, h in similar.items(): inhomogeneous.append(g + h) elif f.is_hypergeometric(n): inhomogeneous = [f] else: return None for i, g in enumerate(inhomogeneous): coeff, polys = S.One, coeffs[:] denoms = [S.One]*(r + 1) s = hypersimp(g, n) for j in range(1, r + 1): coeff *= s.subs(n, n + j - 1) p, q = coeff.as_numer_denom() polys[j] *= p denoms[j] = q for j in range(r + 1): polys[j] *= Mul(*(denoms[:j] + denoms[j + 1:])) R = rsolve_poly(polys, Mul(*denoms), n) if not (R is None or R is S.Zero): inhomogeneous[i] *= R else: return None result = Add(*inhomogeneous) else: result = S.Zero Z = Dummy('Z') p, q = coeffs[0], coeffs[r].subs(n, n - r + 1) p_factors = [z for z in roots(p, n).keys()] q_factors = [z for z in roots(q, n).keys()] factors = [(S.One, S.One)] for p in p_factors: for q in q_factors: if p.is_integer and q.is_integer and p <= q: continue else: factors += [(n - p, n - q)] p = [(n - p, S.One) for p in p_factors] q = [(S.One, n - q) for q in q_factors] factors = p + factors + q for A, B in factors: polys, degrees = [], [] D = A*B.subs(n, n + r - 1) for i in range(r + 1): a = Mul(*[A.subs(n, n + j) for j in range(i)]) b = Mul(*[B.subs(n, n + j) for j in range(i, r)]) poly = quo(coeffs[i]*a*b, D, n) polys.append(poly.as_poly(n)) if not poly.is_zero: degrees.append(polys[i].degree()) if degrees: d, poly = max(degrees), S.Zero else: return None for i in range(r + 1): coeff = polys[i].nth(d) if coeff is not S.Zero: poly += coeff * Z**i for z in roots(poly, Z).keys(): if z.is_zero: continue recurr_coeffs = [polys[i].as_expr()*z**i for i in range(r + 1)] if d == 0 and 0 != Add(*[recurr_coeffs[j]*j for j in range(1, r + 1)]): # faster inline check (than calling rsolve_poly) for a # constant solution to a constant coefficient recurrence. C = Symbol("C" + str(len(symbols))) s = [C] else: C, s = rsolve_poly(recurr_coeffs, 0, n, len(symbols), symbols=True) if C is not None and C is not S.Zero: symbols |= set(s) ratio = z * A * C.subs(n, n + 1) / B / C ratio = simplify(ratio) # If there is a nonnegative root in the denominator of the ratio, # this indicates that the term y(n_root) is zero, and one should # start the product with the term y(n_root + 1). n0 = 0 for n_root in roots(ratio.as_numer_denom()[1], n).keys(): if n_root.has(I): return None elif (n0 < (n_root + 1)) == True: n0 = n_root + 1 K = product(ratio, (n, n0, n - 1)) if K.has(factorial, FallingFactorial, RisingFactorial): K = simplify(K) if casoratian(kernel + [K], n, zero=False) != 0: kernel.append(K) kernel.sort(key=default_sort_key) sk = list(zip(numbered_symbols('C'), kernel)) if sk: for C, ker in sk: result += C * ker else: return None if hints.get('symbols', False): # XXX: This returns the symbols in a non-deterministic order symbols |= {s for s, k in sk} return (result, list(symbols)) else: return result def rsolve(f, y, init=None): r""" Solve univariate recurrence with rational coefficients. Given `k`-th order linear recurrence `\operatorname{L} y = f`, or equivalently: .. math:: a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) + \cdots + a_{0}(n) y(n) = f(n) where `a_{i}(n)`, for `i=0, \ldots, k`, are polynomials or rational functions in `n`, and `f` is a hypergeometric function or a sum of a fixed number of pairwise dissimilar hypergeometric terms in `n`, finds all solutions or returns ``None``, if none were found. Initial conditions can be given as a dictionary in two forms: (1) ``{ n_0 : v_0, n_1 : v_1, ..., n_m : v_m}`` (2) ``{y(n_0) : v_0, y(n_1) : v_1, ..., y(n_m) : v_m}`` or as a list ``L`` of values: ``L = [v_0, v_1, ..., v_m]`` where ``L[i] = v_i``, for `i=0, \ldots, m`, maps to `y(n_i)`. Examples ======== Lets consider the following recurrence: .. math:: (n - 1) y(n + 2) - (n^2 + 3 n - 2) y(n + 1) + 2 n (n + 1) y(n) = 0 >>> from sympy import Function, rsolve >>> from sympy.abc import n >>> y = Function('y') >>> f = (n - 1)*y(n + 2) - (n**2 + 3*n - 2)*y(n + 1) + 2*n*(n + 1)*y(n) >>> rsolve(f, y(n)) 2**n*C0 + C1*factorial(n) >>> rsolve(f, y(n), {y(0):0, y(1):3}) 3*2**n - 3*factorial(n) See Also ======== rsolve_poly, rsolve_ratio, rsolve_hyper """ if isinstance(f, Equality): f = f.lhs - f.rhs n = y.args[0] k = Wild('k', exclude=(n,)) # Preprocess user input to allow things like # y(n) + a*(y(n + 1) + y(n - 1))/2 f = f.expand().collect(y.func(Wild('m', integer=True))) h_part = defaultdict(list) i_part = [] for g in Add.make_args(f): coeff, dep = g.as_coeff_mul(y.func) if not dep: i_part.append(coeff) continue for h in dep: if h.is_Function and h.func == y.func: result = h.args[0].match(n + k) if result is not None: h_part[int(result[k])].append(coeff) continue raise ValueError( "'%s(%s + k)' expected, got '%s'" % (y.func, n, h)) for k in h_part: h_part[k] = Add(*h_part[k]) h_part.default_factory = lambda: 0 i_part = Add(*i_part) for k, coeff in h_part.items(): h_part[k] = simplify(coeff) common = S.One if not i_part.is_zero and not i_part.is_hypergeometric(n) and \ not (i_part.is_Add and all(map(lambda x: x.is_hypergeometric(n), i_part.expand().args))): raise ValueError("The independent term should be a sum of hypergeometric functions, got '%s'" % i_part) for coeff in h_part.values(): if coeff.is_rational_function(n): if not coeff.is_polynomial(n): common = lcm(common, coeff.as_numer_denom()[1], n) else: raise ValueError( "Polynomial or rational function expected, got '%s'" % coeff) i_numer, i_denom = i_part.as_numer_denom() if i_denom.is_polynomial(n): common = lcm(common, i_denom, n) if common is not S.One: for k, coeff in h_part.items(): numer, denom = coeff.as_numer_denom() h_part[k] = numer*quo(common, denom, n) i_part = i_numer*quo(common, i_denom, n) K_min = min(h_part.keys()) if K_min < 0: K = abs(K_min) H_part = defaultdict(lambda: S.Zero) i_part = i_part.subs(n, n + K).expand() common = common.subs(n, n + K).expand() for k, coeff in h_part.items(): H_part[k + K] = coeff.subs(n, n + K).expand() else: H_part = h_part K_max = max(H_part.keys()) coeffs = [H_part[i] for i in range(K_max + 1)] result = rsolve_hyper(coeffs, -i_part, n, symbols=True) if result is None: return None solution, symbols = result if init == {} or init == []: init = None if symbols and init is not None: if isinstance(init, list): init = {i: init[i] for i in range(len(init))} equations = [] for k, v in init.items(): try: i = int(k) except TypeError: if k.is_Function and k.func == y.func: i = int(k.args[0]) else: raise ValueError("Integer or term expected, got '%s'" % k) eq = solution.subs(n, i) - v if eq.has(S.NaN): eq = solution.limit(n, i) - v equations.append(eq) result = solve(equations, *symbols) if not result: return None else: solution = solution.subs(result) return solution
f0557306dbb9d87791cfa30950c42ddd3b846e854a9e7d03a2de9355590ed4fe
""" This module contains pdsolve() and different helper functions that it uses. It is heavily inspired by the ode module and hence the basic infrastructure remains the same. **Functions in this module** These are the user functions in this module: - pdsolve() - Solves PDE's - classify_pde() - Classifies PDEs into possible hints for dsolve(). - pde_separate() - Separate variables in partial differential equation either by additive or multiplicative separation approach. These are the helper functions in this module: - pde_separate_add() - Helper function for searching additive separable solutions. - pde_separate_mul() - Helper function for searching multiplicative separable solutions. **Currently implemented solver methods** The following methods are implemented for solving partial differential equations. See the docstrings of the various pde_hint() functions for more information on each (run help(pde)): - 1st order linear homogeneous partial differential equations with constant coefficients. - 1st order linear general partial differential equations with constant coefficients. - 1st order linear partial differential equations with variable coefficients. """ from functools import reduce from itertools import combinations_with_replacement from sympy.simplify import simplify # type: ignore from sympy.core import Add, S from sympy.core.compatibility import is_sequence from sympy.core.function import Function, expand, AppliedUndef, Subs from sympy.core.relational import Equality, Eq from sympy.core.symbol import Symbol, Wild, symbols from sympy.functions import exp from sympy.integrals.integrals import Integral from sympy.utilities.iterables import has_dups from sympy.utilities.misc import filldedent from sympy.solvers.deutils import _preprocess, ode_order, _desolve from sympy.solvers.solvers import solve from sympy.simplify.radsimp import collect import operator allhints = ( "1st_linear_constant_coeff_homogeneous", "1st_linear_constant_coeff", "1st_linear_constant_coeff_Integral", "1st_linear_variable_coeff" ) def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs): """ Solves any (supported) kind of partial differential equation. **Usage** pdsolve(eq, f(x,y), hint) -> Solve partial differential equation eq for function f(x,y), using method hint. **Details** ``eq`` can be any supported partial differential equation (see the pde docstring for supported methods). This can either be an Equality, or an expression, which is assumed to be equal to 0. ``f(x,y)`` is a function of two variables whose derivatives in that variable make up the partial differential equation. In many cases it is not necessary to provide this; it will be autodetected (and an error raised if it couldn't be detected). ``hint`` is the solving method that you want pdsolve to use. Use classify_pde(eq, f(x,y)) to get all of the possible hints for a PDE. The default hint, 'default', will use whatever hint is returned first by classify_pde(). See Hints below for more options that you can use for hint. ``solvefun`` is the convention used for arbitrary functions returned by the PDE solver. If not set by the user, it is set by default to be F. **Hints** Aside from the various solving methods, there are also some meta-hints that you can pass to pdsolve(): "default": This uses whatever hint is returned first by classify_pde(). This is the default argument to pdsolve(). "all": To make pdsolve apply all relevant classification hints, use pdsolve(PDE, func, hint="all"). This will return a dictionary of hint:solution terms. If a hint causes pdsolve to raise the NotImplementedError, value of that hint's key will be the exception object raised. The dictionary will also include some special keys: - order: The order of the PDE. See also ode_order() in deutils.py - default: The solution that would be returned by default. This is the one produced by the hint that appears first in the tuple returned by classify_pde(). "all_Integral": This is the same as "all", except if a hint also has a corresponding "_Integral" hint, it only returns the "_Integral" hint. This is useful if "all" causes pdsolve() to hang because of a difficult or impossible integral. This meta-hint will also be much faster than "all", because integrate() is an expensive routine. See also the classify_pde() docstring for more info on hints, and the pde docstring for a list of all supported hints. **Tips** - You can declare the derivative of an unknown function this way: >>> from sympy import Function, Derivative >>> from sympy.abc import x, y # x and y are the independent variables >>> f = Function("f")(x, y) # f is a function of x and y >>> # fx will be the partial derivative of f with respect to x >>> fx = Derivative(f, x) >>> # fy will be the partial derivative of f with respect to y >>> fy = Derivative(f, y) - See test_pde.py for many tests, which serves also as a set of examples for how to use pdsolve(). - pdsolve always returns an Equality class (except for the case when the hint is "all" or "all_Integral"). Note that it is not possible to get an explicit solution for f(x, y) as in the case of ODE's - Do help(pde.pde_hintname) to get help more information on a specific hint Examples ======== >>> from sympy.solvers.pde import pdsolve >>> from sympy import Function, Eq >>> from sympy.abc import x, y >>> f = Function('f') >>> u = f(x, y) >>> ux = u.diff(x) >>> uy = u.diff(y) >>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0) >>> pdsolve(eq) Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13)) """ if not solvefun: solvefun = Function('F') # See the docstring of _desolve for more details. hints = _desolve(eq, func=func, hint=hint, simplify=True, type='pde', **kwargs) eq = hints.pop('eq', False) all_ = hints.pop('all', False) if all_: # TODO : 'best' hint should be implemented when adequate # number of hints are added. pdedict = {} failed_hints = {} gethints = classify_pde(eq, dict=True) pdedict.update({'order': gethints['order'], 'default': gethints['default']}) for hint in hints: try: rv = _helper_simplify(eq, hint, hints[hint]['func'], hints[hint]['order'], hints[hint][hint], solvefun) except NotImplementedError as detail: failed_hints[hint] = detail else: pdedict[hint] = rv pdedict.update(failed_hints) return pdedict else: return _helper_simplify(eq, hints['hint'], hints['func'], hints['order'], hints[hints['hint']], solvefun) def _helper_simplify(eq, hint, func, order, match, solvefun): """Helper function of pdsolve that calls the respective pde functions to solve for the partial differential equations. This minimizes the computation in calling _desolve multiple times. """ if hint.endswith("_Integral"): solvefunc = globals()[ "pde_" + hint[:-len("_Integral")]] else: solvefunc = globals()["pde_" + hint] return _handle_Integral(solvefunc(eq, func, order, match, solvefun), func, order, hint) def _handle_Integral(expr, func, order, hint): r""" Converts a solution with integrals in it into an actual solution. Simplifies the integral mainly using doit() """ if hint.endswith("_Integral"): return expr elif hint == "1st_linear_constant_coeff": return simplify(expr.doit()) else: return expr def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs): """ Returns a tuple of possible pdsolve() classifications for a PDE. The tuple is ordered so that first item is the classification that pdsolve() uses to solve the PDE by default. In general, classifications near the beginning of the list will produce better solutions faster than those near the end, though there are always exceptions. To make pdsolve use a different classification, use pdsolve(PDE, func, hint=<classification>). See also the pdsolve() docstring for different meta-hints you can use. If ``dict`` is true, classify_pde() will return a dictionary of hint:match expression terms. This is intended for internal use by pdsolve(). Note that because dictionaries are ordered arbitrarily, this will most likely not be in the same order as the tuple. You can get help on different hints by doing help(pde.pde_hintname), where hintname is the name of the hint without "_Integral". See sympy.pde.allhints or the sympy.pde docstring for a list of all supported hints that can be returned from classify_pde. Examples ======== >>> from sympy.solvers.pde import classify_pde >>> from sympy import Function, Eq >>> from sympy.abc import x, y >>> f = Function('f') >>> u = f(x, y) >>> ux = u.diff(x) >>> uy = u.diff(y) >>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0) >>> classify_pde(eq) ('1st_linear_constant_coeff_homogeneous',) """ if func and len(func.args) != 2: raise NotImplementedError("Right now only partial " "differential equations of two variables are supported") if prep or func is None: prep, func_ = _preprocess(eq, func) if func is None: func = func_ if isinstance(eq, Equality): if eq.rhs != 0: return classify_pde(eq.lhs - eq.rhs, func) eq = eq.lhs f = func.func x = func.args[0] y = func.args[1] fx = f(x,y).diff(x) fy = f(x,y).diff(y) # TODO : For now pde.py uses support offered by the ode_order function # to find the order with respect to a multi-variable function. An # improvement could be to classify the order of the PDE on the basis of # individual variables. order = ode_order(eq, f(x,y)) # hint:matchdict or hint:(tuple of matchdicts) # Also will contain "default":<default hint> and "order":order items. matching_hints = {'order': order} if not order: if dict: matching_hints["default"] = None return matching_hints else: return () eq = expand(eq) a = Wild('a', exclude = [f(x,y)]) b = Wild('b', exclude = [f(x,y), fx, fy, x, y]) c = Wild('c', exclude = [f(x,y), fx, fy, x, y]) d = Wild('d', exclude = [f(x,y), fx, fy, x, y]) e = Wild('e', exclude = [f(x,y), fx, fy]) n = Wild('n', exclude = [x, y]) # Try removing the smallest power of f(x,y) # from the highest partial derivatives of f(x,y) reduced_eq = None if eq.is_Add: var = set(combinations_with_replacement((x,y), order)) dummyvar = var.copy() power = None for i in var: coeff = eq.coeff(f(x,y).diff(*i)) if coeff != 1: match = coeff.match(a*f(x,y)**n) if match and match[a]: power = match[n] dummyvar.remove(i) break dummyvar.remove(i) for i in dummyvar: coeff = eq.coeff(f(x,y).diff(*i)) if coeff != 1: match = coeff.match(a*f(x,y)**n) if match and match[a] and match[n] < power: power = match[n] if power: den = f(x,y)**power reduced_eq = Add(*[arg/den for arg in eq.args]) if not reduced_eq: reduced_eq = eq if order == 1: reduced_eq = collect(reduced_eq, f(x, y)) r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e) if r: if not r[e]: ## Linear first-order homogeneous partial-differential ## equation with constant coefficients r.update({'b': b, 'c': c, 'd': d}) matching_hints["1st_linear_constant_coeff_homogeneous"] = r else: if r[b]**2 + r[c]**2 != 0: ## Linear first-order general partial-differential ## equation with constant coefficients r.update({'b': b, 'c': c, 'd': d, 'e': e}) matching_hints["1st_linear_constant_coeff"] = r matching_hints[ "1st_linear_constant_coeff_Integral"] = r else: b = Wild('b', exclude=[f(x, y), fx, fy]) c = Wild('c', exclude=[f(x, y), fx, fy]) d = Wild('d', exclude=[f(x, y), fx, fy]) r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e) if r: r.update({'b': b, 'c': c, 'd': d, 'e': e}) matching_hints["1st_linear_variable_coeff"] = r # Order keys based on allhints. retlist = [] for i in allhints: if i in matching_hints: retlist.append(i) if dict: # Dictionaries are ordered arbitrarily, so make note of which # hint would come first for pdsolve(). Use an ordered dict in Py 3. matching_hints["default"] = None matching_hints["ordered_hints"] = tuple(retlist) for i in allhints: if i in matching_hints: matching_hints["default"] = i break return matching_hints else: return tuple(retlist) def checkpdesol(pde, sol, func=None, solve_for_func=True): """ Checks if the given solution satisfies the partial differential equation. pde is the partial differential equation which can be given in the form of an equation or an expression. sol is the solution for which the pde is to be checked. This can also be given in an equation or an expression form. If the function is not provided, the helper function _preprocess from deutils is used to identify the function. If a sequence of solutions is passed, the same sort of container will be used to return the result for each solution. The following methods are currently being implemented to check if the solution satisfies the PDE: 1. Directly substitute the solution in the PDE and check. If the solution hasn't been solved for f, then it will solve for f provided solve_for_func hasn't been set to False. If the solution satisfies the PDE, then a tuple (True, 0) is returned. Otherwise a tuple (False, expr) where expr is the value obtained after substituting the solution in the PDE. However if a known solution returns False, it may be due to the inability of doit() to simplify it to zero. Examples ======== >>> from sympy import Function, symbols >>> from sympy.solvers.pde import checkpdesol, pdsolve >>> x, y = symbols('x y') >>> f = Function('f') >>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y) >>> sol = pdsolve(eq) >>> assert checkpdesol(eq, sol)[0] >>> eq = x*f(x,y) + f(x,y).diff(x) >>> checkpdesol(eq, sol) (False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25)) """ # Converting the pde into an equation if not isinstance(pde, Equality): pde = Eq(pde, 0) # If no function is given, try finding the function present. if func is None: try: _, func = _preprocess(pde.lhs) except ValueError: funcs = [s.atoms(AppliedUndef) for s in ( sol if is_sequence(sol, set) else [sol])] funcs = set().union(funcs) if len(funcs) != 1: raise ValueError( 'must pass func arg to checkpdesol for this case.') func = funcs.pop() # If the given solution is in the form of a list or a set # then return a list or set of tuples. if is_sequence(sol, set): return type(sol)([checkpdesol( pde, i, func=func, solve_for_func=solve_for_func) for i in sol]) # Convert solution into an equation if not isinstance(sol, Equality): sol = Eq(func, sol) elif sol.rhs == func: sol = sol.reversed # Try solving for the function solved = sol.lhs == func and not sol.rhs.has(func) if solve_for_func and not solved: solved = solve(sol, func) if solved: if len(solved) == 1: return checkpdesol(pde, Eq(func, solved[0]), func=func, solve_for_func=False) else: return checkpdesol(pde, [Eq(func, t) for t in solved], func=func, solve_for_func=False) # try direct substitution of the solution into the PDE and simplify if sol.lhs == func: pde = pde.lhs - pde.rhs s = simplify(pde.subs(func, sol.rhs).doit()) return s is S.Zero, s raise NotImplementedError(filldedent(''' Unable to test if %s is a solution to %s.''' % (sol, pde))) def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun): r""" Solves a first order linear homogeneous partial differential equation with constant coefficients. The general form of this partial differential equation is .. math:: a \frac{\partial f(x,y)}{\partial x} + b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = 0 where `a`, `b` and `c` are constants. The general solution is of the form: .. math:: f(x, y) = F(- a y + b x ) e^{- \frac{c (a x + b y)}{a^2 + b^2}} and can be found in SymPy with ``pdsolve``:: >>> from sympy.solvers import pdsolve >>> from sympy.abc import x, y, a, b, c >>> from sympy import Function, pprint >>> f = Function('f') >>> u = f(x,y) >>> ux = u.diff(x) >>> uy = u.diff(y) >>> genform = a*ux + b*uy + c*u >>> pprint(genform) d d a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) dx dy >>> pprint(pdsolve(genform)) -c*(a*x + b*y) --------------- 2 2 a + b f(x, y) = F(-a*y + b*x)*e Examples ======== >>> from sympy import pdsolve >>> from sympy import Function, pprint >>> from sympy.abc import x,y >>> f = Function('f') >>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)) Eq(f(x, y), F(x - y)*exp(-x/2 - y/2)) >>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))) x y - - - - 2 2 f(x, y) = F(x - y)*e References ========== - Viktor Grigoryan, "Partial Differential Equations" Math 124A - Fall 2010, pp.7 """ # TODO : For now homogeneous first order linear PDE's having # two variables are implemented. Once there is support for # solving systems of ODE's, this can be extended to n variables. f = func.func x = func.args[0] y = func.args[1] b = match[match['b']] c = match[match['c']] d = match[match['d']] return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y)) def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun): r""" Solves a first order linear partial differential equation with constant coefficients. The general form of this partial differential equation is .. math:: a \frac{\partial f(x,y)}{\partial x} + b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = G(x,y) where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary function in `x` and `y`. The general solution of the PDE is: .. math:: f(x, y) = \left. \left[F(\eta) + \frac{1}{a^2 + b^2} \int\limits^{a x + b y} G\left(\frac{a \xi + b \eta}{a^2 + b^2}, \frac{- a \eta + b \xi}{a^2 + b^2} \right) e^{\frac{c \xi}{a^2 + b^2}}\, d\xi\right] e^{- \frac{c \xi}{a^2 + b^2}} \right|_{\substack{\eta=- a y + b x\\ \xi=a x + b y }}\, , where `F(\eta)` is an arbitrary single-valued function. The solution can be found in SymPy with ``pdsolve``:: >>> from sympy.solvers import pdsolve >>> from sympy.abc import x, y, a, b, c >>> from sympy import Function, pprint >>> f = Function('f') >>> G = Function('G') >>> u = f(x,y) >>> ux = u.diff(x) >>> uy = u.diff(y) >>> genform = a*ux + b*uy + c*u - G(x,y) >>> pprint(genform) d d a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y) dx dy >>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral')) // a*x + b*y \ || / | || | | || | c*xi | || | ------- | || | 2 2 | || | /a*xi + b*eta -a*eta + b*xi\ a + b | || | G|------------, -------------|*e d(xi)| || | | 2 2 2 2 | | || | \ a + b a + b / | || | | || / | || | f(x, y) = ||F(eta) + -------------------------------------------------------|* || 2 2 | \\ a + b / <BLANKLINE> \| || || || || || || || || -c*xi || -------|| 2 2|| a + b || e || || /|eta=-a*y + b*x, xi=a*x + b*y Examples ======== >>> from sympy.solvers.pde import pdsolve >>> from sympy import Function, pprint, exp >>> from sympy.abc import x,y >>> f = Function('f') >>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y) >>> pdsolve(eq) Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y)) References ========== - Viktor Grigoryan, "Partial Differential Equations" Math 124A - Fall 2010, pp.7 """ # TODO : For now homogeneous first order linear PDE's having # two variables are implemented. Once there is support for # solving systems of ODE's, this can be extended to n variables. xi, eta = symbols("xi eta") f = func.func x = func.args[0] y = func.args[1] b = match[match['b']] c = match[match['c']] d = match[match['d']] e = -match[match['e']] expterm = exp(-S(d)/(b**2 + c**2)*xi) functerm = solvefun(eta) solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y) # Integral should remain as it is in terms of xi, # doit() should be done in _handle_Integral. genterm = (1/S(b**2 + c**2))*Integral( (1/expterm*e).subs(solvedict), (xi, b*x + c*y)) return Eq(f(x,y), Subs(expterm*(functerm + genterm), (eta, xi), (c*x - b*y, b*x + c*y))) def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun): r""" Solves a first order linear partial differential equation with variable coefficients. The general form of this partial differential equation is .. math:: a(x, y) \frac{\partial f(x, y)}{\partial x} + b(x, y) \frac{\partial f(x, y)}{\partial y} + c(x, y) f(x, y) = G(x, y) where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary functions in `x` and `y`. This PDE is converted into an ODE by making the following transformation: 1. `\xi` as `x` 2. `\eta` as the constant in the solution to the differential equation `\frac{dy}{dx} = -\frac{b}{a}` Making the previous substitutions reduces it to the linear ODE .. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - G(\xi, \eta) = 0 which can be solved using ``dsolve``. >>> from sympy.abc import x, y >>> from sympy import Function, pprint >>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']] >>> u = f(x,y) >>> ux = u.diff(x) >>> uy = u.diff(y) >>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y) >>> pprint(genform) d d -G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y)) dx dy Examples ======== >>> from sympy.solvers.pde import pdsolve >>> from sympy import Function, pprint >>> from sympy.abc import x,y >>> f = Function('f') >>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2 >>> pdsolve(eq) Eq(f(x, y), F(x*y)*exp(y**2/2) + 1) References ========== - Viktor Grigoryan, "Partial Differential Equations" Math 124A - Fall 2010, pp.7 """ from sympy.integrals.integrals import integrate from sympy.solvers.ode import dsolve xi, eta = symbols("xi eta") f = func.func x = func.args[0] y = func.args[1] b = match[match['b']] c = match[match['c']] d = match[match['d']] e = -match[match['e']] if not d: # To deal with cases like b*ux = e or c*uy = e if not (b and c): if c: try: tsol = integrate(e/c, y) except NotImplementedError: raise NotImplementedError("Unable to find a solution" " due to inability of integrate") else: return Eq(f(x,y), solvefun(x) + tsol) if b: try: tsol = integrate(e/b, x) except NotImplementedError: raise NotImplementedError("Unable to find a solution" " due to inability of integrate") else: return Eq(f(x,y), solvefun(y) + tsol) if not c: # To deal with cases when c is 0, a simpler method is used. # The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x plode = f(x).diff(x)*b + d*f(x) - e sol = dsolve(plode, f(x)) syms = sol.free_symbols - plode.free_symbols - {x, y} rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y) return Eq(f(x, y), rhs) if not b: # To deal with cases when b is 0, a simpler method is used. # The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y plode = f(y).diff(y)*c + d*f(y) - e sol = dsolve(plode, f(y)) syms = sol.free_symbols - plode.free_symbols - {x, y} rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x) return Eq(f(x, y), rhs) dummy = Function('d') h = (c/b).subs(y, dummy(x)) sol = dsolve(dummy(x).diff(x) - h, dummy(x)) if isinstance(sol, list): sol = sol[0] solsym = sol.free_symbols - h.free_symbols - {x, y} if len(solsym) == 1: solsym = solsym.pop() etat = (solve(sol, solsym)[0]).subs(dummy(x), y) ysub = solve(eta - etat, y)[0] deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub) final = (dsolve(deq, f(x), hint='1st_linear')).rhs if isinstance(final, list): final = final[0] finsyms = final.free_symbols - deq.free_symbols - {x, y} rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat) return Eq(f(x, y), rhs) else: raise NotImplementedError("Cannot solve the partial differential equation due" " to inability of constantsimp") def _simplify_variable_coeff(sol, syms, func, funcarg): r""" Helper function to replace constants by functions in 1st_linear_variable_coeff """ eta = Symbol("eta") if len(syms) == 1: sym = syms.pop() final = sol.subs(sym, func(funcarg)) else: for key, sym in enumerate(syms): final = sol.subs(sym, func(funcarg)) return simplify(final.subs(eta, funcarg)) def pde_separate(eq, fun, sep, strategy='mul'): """Separate variables in partial differential equation either by additive or multiplicative separation approach. It tries to rewrite an equation so that one of the specified variables occurs on a different side of the equation than the others. :param eq: Partial differential equation :param fun: Original function F(x, y, z) :param sep: List of separated functions [X(x), u(y, z)] :param strategy: Separation strategy. You can choose between additive separation ('add') and multiplicative separation ('mul') which is default. Examples ======== >>> from sympy import E, Eq, Function, pde_separate, Derivative as D >>> from sympy.abc import x, t >>> u, X, T = map(Function, 'uXT') >>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t)) >>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add') [exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)] >>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2)) >>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul') [Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)] See Also ======== pde_separate_add, pde_separate_mul """ do_add = False if strategy == 'add': do_add = True elif strategy == 'mul': do_add = False else: raise ValueError('Unknown strategy: %s' % strategy) if isinstance(eq, Equality): if eq.rhs != 0: return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy) else: return pde_separate(Eq(eq, 0), fun, sep, strategy) if eq.rhs != 0: raise ValueError("Value should be 0") # Handle arguments orig_args = list(fun.args) subs_args = [] for s in sep: for j in range(0, len(s.args)): subs_args.append(s.args[j]) if do_add: functions = reduce(operator.add, sep) else: functions = reduce(operator.mul, sep) # Check whether variables match if len(subs_args) != len(orig_args): raise ValueError("Variable counts do not match") # Check for duplicate arguments like [X(x), u(x, y)] if has_dups(subs_args): raise ValueError("Duplicate substitution arguments detected") # Check whether the variables match if set(orig_args) != set(subs_args): raise ValueError("Arguments do not match") # Substitute original function with separated... result = eq.lhs.subs(fun, functions).doit() # Divide by terms when doing multiplicative separation if not do_add: eq = 0 for i in result.args: eq += i/functions result = eq svar = subs_args[0] dvar = subs_args[1:] return _separate(result, svar, dvar) def pde_separate_add(eq, fun, sep): """ Helper function for searching additive separable solutions. Consider an equation of two independent variables x, y and a dependent variable w, we look for the product of two functions depending on different arguments: `w(x, y, z) = X(x) + y(y, z)` Examples ======== >>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D >>> from sympy.abc import x, t >>> u, X, T = map(Function, 'uXT') >>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t)) >>> pde_separate_add(eq, u(x, t), [X(x), T(t)]) [exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)] """ return pde_separate(eq, fun, sep, strategy='add') def pde_separate_mul(eq, fun, sep): """ Helper function for searching multiplicative separable solutions. Consider an equation of two independent variables x, y and a dependent variable w, we look for the product of two functions depending on different arguments: `w(x, y, z) = X(x)*u(y, z)` Examples ======== >>> from sympy import Function, Eq, pde_separate_mul, Derivative as D >>> from sympy.abc import x, y >>> u, X, Y = map(Function, 'uXY') >>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2)) >>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)]) [Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)] """ return pde_separate(eq, fun, sep, strategy='mul') def _separate(eq, dep, others): """Separate expression into two parts based on dependencies of variables.""" # FIRST PASS # Extract derivatives depending our separable variable... terms = set() for term in eq.args: if term.is_Mul: for i in term.args: if i.is_Derivative and not i.has(*others): terms.add(term) continue elif term.is_Derivative and not term.has(*others): terms.add(term) # Find the factor that we need to divide by div = set() for term in terms: ext, sep = term.expand().as_independent(dep) # Failed? if sep.has(*others): return None div.add(ext) # FIXME: Find lcm() of all the divisors and divide with it, instead of # current hack :( # https://github.com/sympy/sympy/issues/4597 if len(div) > 0: final = 0 for term in eq.args: eqn = 0 for i in div: eqn += term / i final += simplify(eqn) eq = final # SECOND PASS - separate the derivatives div = set() lhs = rhs = 0 for term in eq.args: # Check, whether we have already term with independent variable... if not term.has(*others): lhs += term continue # ...otherwise, try to separate temp, sep = term.expand().as_independent(dep) # Failed? if sep.has(*others): return None # Extract the divisors div.add(sep) rhs -= term.expand() # Do the division fulldiv = reduce(operator.add, div) lhs = simplify(lhs/fulldiv).expand() rhs = simplify(rhs/fulldiv).expand() # ...and check whether we were successful :) if lhs.has(*others) or rhs.has(dep): return None return [lhs, rhs]
679553655358778cb529802487bb2ba9f7606bccc3c68438ad3cd43636862059
""" Generic SymPy-Independent Strategies """ identity = lambda x: x def exhaust(rule): """ Apply a rule repeatedly until it has no effect """ def exhaustive_rl(expr): new, old = rule(expr), expr while new != old: new, old = rule(new), new return new return exhaustive_rl def memoize(rule): """ Memoized version of a rule """ cache = {} def memoized_rl(expr): if expr in cache: return cache[expr] else: result = rule(expr) cache[expr] = result return result return memoized_rl def condition(cond, rule): """ Only apply rule if condition is true """ def conditioned_rl(expr): if cond(expr): return rule(expr) else: return expr return conditioned_rl def chain(*rules): """ Compose a sequence of rules so that they apply to the expr sequentially """ def chain_rl(expr): for rule in rules: expr = rule(expr) return expr return chain_rl def debug(rule, file=None): """ Print out before and after expressions each time rule is used """ if file is None: from sys import stdout file = stdout def debug_rl(*args, **kwargs): expr = args[0] result = rule(*args, **kwargs) if result != expr: file.write("Rule: %s\n" % rule.__name__) file.write("In: %s\nOut: %s\n\n"%(expr, result)) return result return debug_rl def null_safe(rule): """ Return original expr if rule returns None """ def null_safe_rl(expr): result = rule(expr) if result is None: return expr else: return result return null_safe_rl def tryit(rule, exception): """ Return original expr if rule raises exception """ def try_rl(expr): try: return rule(expr) except exception: return expr return try_rl def do_one(*rules): """ Try each of the rules until one works. Then stop. """ def do_one_rl(expr): for rl in rules: result = rl(expr) if result != expr: return result return expr return do_one_rl def switch(key, ruledict): """ Select a rule based on the result of key called on the function """ def switch_rl(expr): rl = ruledict.get(key(expr), identity) return rl(expr) return switch_rl def minimize(*rules, objective=identity): """ Select result of rules that minimizes objective >>> from sympy.strategies import minimize >>> inc = lambda x: x + 1 >>> dec = lambda x: x - 1 >>> rl = minimize(inc, dec) >>> rl(4) 3 >>> rl = minimize(inc, dec, objective=lambda x: -x) # maximize >>> rl(4) 5 """ def minrule(expr): return min([rule(expr) for rule in rules], key=objective) return minrule
1f1998c508e081405b1c15ee8f3a8765fb2be859d106d0ef71c841848d397346
""" module for generating C, C++, Fortran77, Fortran90, Julia, Rust and Octave/Matlab routines that evaluate sympy expressions. This module is work in progress. Only the milestones with a '+' character in the list below have been completed. --- How is sympy.utilities.codegen different from sympy.printing.ccode? --- We considered the idea to extend the printing routines for sympy functions in such a way that it prints complete compilable code, but this leads to a few unsurmountable issues that can only be tackled with dedicated code generator: - For C, one needs both a code and a header file, while the printing routines generate just one string. This code generator can be extended to support .pyf files for f2py. - SymPy functions are not concerned with programming-technical issues, such as input, output and input-output arguments. Other examples are contiguous or non-contiguous arrays, including headers of other libraries such as gsl or others. - It is highly interesting to evaluate several sympy functions in one C routine, eventually sharing common intermediate results with the help of the cse routine. This is more than just printing. - From the programming perspective, expressions with constants should be evaluated in the code generator as much as possible. This is different for printing. --- Basic assumptions --- * A generic Routine data structure describes the routine that must be translated into C/Fortran/... code. This data structure covers all features present in one or more of the supported languages. * Descendants from the CodeGen class transform multiple Routine instances into compilable code. Each derived class translates into a specific language. * In many cases, one wants a simple workflow. The friendly functions in the last part are a simple api on top of the Routine/CodeGen stuff. They are easier to use, but are less powerful. --- Milestones --- + First working version with scalar input arguments, generating C code, tests + Friendly functions that are easier to use than the rigorous Routine/CodeGen workflow. + Integer and Real numbers as input and output + Output arguments + InputOutput arguments + Sort input/output arguments properly + Contiguous array arguments (numpy matrices) + Also generate .pyf code for f2py (in autowrap module) + Isolate constants and evaluate them beforehand in double precision + Fortran 90 + Octave/Matlab - Common Subexpression Elimination - User defined comments in the generated code - Optional extra include lines for libraries/objects that can eval special functions - Test other C compilers and libraries: gcc, tcc, libtcc, gcc+gsl, ... - Contiguous array arguments (sympy matrices) - Non-contiguous array arguments (sympy matrices) - ccode must raise an error when it encounters something that can not be translated into c. ccode(integrate(sin(x)/x, x)) does not make sense. - Complex numbers as input and output - A default complex datatype - Include extra information in the header: date, user, hostname, sha1 hash, ... - Fortran 77 - C++ - Python - Julia - Rust - ... """ import os import textwrap from io import StringIO from sympy import __version__ as sympy_version from sympy.core import Symbol, S, Tuple, Equality, Function, Basic from sympy.core.compatibility import is_sequence from sympy.printing.c import c_code_printers from sympy.printing.codeprinter import AssignmentError from sympy.printing.fortran import FCodePrinter from sympy.printing.julia import JuliaCodePrinter from sympy.printing.octave import OctaveCodePrinter from sympy.printing.rust import RustCodePrinter from sympy.tensor import Idx, Indexed, IndexedBase from sympy.matrices import (MatrixSymbol, ImmutableMatrix, MatrixBase, MatrixExpr, MatrixSlice) __all__ = [ # description of routines "Routine", "DataType", "default_datatypes", "get_default_datatype", "Argument", "InputArgument", "OutputArgument", "Result", # routines -> code "CodeGen", "CCodeGen", "FCodeGen", "JuliaCodeGen", "OctaveCodeGen", "RustCodeGen", # friendly functions "codegen", "make_routine", ] # # Description of routines # class Routine: """Generic description of evaluation routine for set of expressions. A CodeGen class can translate instances of this class into code in a particular language. The routine specification covers all the features present in these languages. The CodeGen part must raise an exception when certain features are not present in the target language. For example, multiple return values are possible in Python, but not in C or Fortran. Another example: Fortran and Python support complex numbers, while C does not. """ def __init__(self, name, arguments, results, local_vars, global_vars): """Initialize a Routine instance. Parameters ========== name : string Name of the routine. arguments : list of Arguments These are things that appear in arguments of a routine, often appearing on the right-hand side of a function call. These are commonly InputArguments but in some languages, they can also be OutputArguments or InOutArguments (e.g., pass-by-reference in C code). results : list of Results These are the return values of the routine, often appearing on the left-hand side of a function call. The difference between Results and OutputArguments and when you should use each is language-specific. local_vars : list of Results These are variables that will be defined at the beginning of the function. global_vars : list of Symbols Variables which will not be passed into the function. """ # extract all input symbols and all symbols appearing in an expression input_symbols = set() symbols = set() for arg in arguments: if isinstance(arg, OutputArgument): symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed)) elif isinstance(arg, InputArgument): input_symbols.add(arg.name) elif isinstance(arg, InOutArgument): input_symbols.add(arg.name) symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed)) else: raise ValueError("Unknown Routine argument: %s" % arg) for r in results: if not isinstance(r, Result): raise ValueError("Unknown Routine result: %s" % r) symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed)) local_symbols = set() for r in local_vars: if isinstance(r, Result): symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed)) local_symbols.add(r.name) else: local_symbols.add(r) symbols = {s.label if isinstance(s, Idx) else s for s in symbols} # Check that all symbols in the expressions are covered by # InputArguments/InOutArguments---subset because user could # specify additional (unused) InputArguments or local_vars. notcovered = symbols.difference( input_symbols.union(local_symbols).union(global_vars)) if notcovered != set(): raise ValueError("Symbols needed for output are not in input " + ", ".join([str(x) for x in notcovered])) self.name = name self.arguments = arguments self.results = results self.local_vars = local_vars self.global_vars = global_vars def __str__(self): return self.__class__.__name__ + "({name!r}, {arguments}, {results}, {local_vars}, {global_vars})".format(**self.__dict__) __repr__ = __str__ @property def variables(self): """Returns a set of all variables possibly used in the routine. For routines with unnamed return values, the dummies that may or may not be used will be included in the set. """ v = set(self.local_vars) for arg in self.arguments: v.add(arg.name) for res in self.results: v.add(res.result_var) return v @property def result_variables(self): """Returns a list of OutputArgument, InOutArgument and Result. If return values are present, they are at the end ot the list. """ args = [arg for arg in self.arguments if isinstance( arg, (OutputArgument, InOutArgument))] args.extend(self.results) return args class DataType: """Holds strings for a certain datatype in different languages.""" def __init__(self, cname, fname, pyname, jlname, octname, rsname): self.cname = cname self.fname = fname self.pyname = pyname self.jlname = jlname self.octname = octname self.rsname = rsname default_datatypes = { "int": DataType("int", "INTEGER*4", "int", "", "", "i32"), "float": DataType("double", "REAL*8", "float", "", "", "f64"), "complex": DataType("double", "COMPLEX*16", "complex", "", "", "float") #FIXME: # complex is only supported in fortran, python, julia, and octave. # So to not break c or rust code generation, we stick with double or # float, respecitvely (but actually should raise an exception for # explicitly complex variables (x.is_complex==True)) } COMPLEX_ALLOWED = False def get_default_datatype(expr, complex_allowed=None): """Derives an appropriate datatype based on the expression.""" if complex_allowed is None: complex_allowed = COMPLEX_ALLOWED if complex_allowed: final_dtype = "complex" else: final_dtype = "float" if expr.is_integer: return default_datatypes["int"] elif expr.is_real: return default_datatypes["float"] elif isinstance(expr, MatrixBase): #check all entries dt = "int" for element in expr: if dt == "int" and not element.is_integer: dt = "float" if dt == "float" and not element.is_real: return default_datatypes[final_dtype] return default_datatypes[dt] else: return default_datatypes[final_dtype] class Variable: """Represents a typed variable.""" def __init__(self, name, datatype=None, dimensions=None, precision=None): """Return a new variable. Parameters ========== name : Symbol or MatrixSymbol datatype : optional When not given, the data type will be guessed based on the assumptions on the symbol argument. dimension : sequence containing tupes, optional If present, the argument is interpreted as an array, where this sequence of tuples specifies (lower, upper) bounds for each index of the array. precision : int, optional Controls the precision of floating point constants. """ if not isinstance(name, (Symbol, MatrixSymbol)): raise TypeError("The first argument must be a sympy symbol.") if datatype is None: datatype = get_default_datatype(name) elif not isinstance(datatype, DataType): raise TypeError("The (optional) `datatype' argument must be an " "instance of the DataType class.") if dimensions and not isinstance(dimensions, (tuple, list)): raise TypeError( "The dimension argument must be a sequence of tuples") self._name = name self._datatype = { 'C': datatype.cname, 'FORTRAN': datatype.fname, 'JULIA': datatype.jlname, 'OCTAVE': datatype.octname, 'PYTHON': datatype.pyname, 'RUST': datatype.rsname, } self.dimensions = dimensions self.precision = precision def __str__(self): return "%s(%r)" % (self.__class__.__name__, self.name) __repr__ = __str__ @property def name(self): return self._name def get_datatype(self, language): """Returns the datatype string for the requested language. Examples ======== >>> from sympy import Symbol >>> from sympy.utilities.codegen import Variable >>> x = Variable(Symbol('x')) >>> x.get_datatype('c') 'double' >>> x.get_datatype('fortran') 'REAL*8' """ try: return self._datatype[language.upper()] except KeyError: raise CodeGenError("Has datatypes for languages: %s" % ", ".join(self._datatype)) class Argument(Variable): """An abstract Argument data structure: a name and a data type. This structure is refined in the descendants below. """ pass class InputArgument(Argument): pass class ResultBase: """Base class for all "outgoing" information from a routine. Objects of this class stores a sympy expression, and a sympy object representing a result variable that will be used in the generated code only if necessary. """ def __init__(self, expr, result_var): self.expr = expr self.result_var = result_var def __str__(self): return "%s(%r, %r)" % (self.__class__.__name__, self.expr, self.result_var) __repr__ = __str__ class OutputArgument(Argument, ResultBase): """OutputArgument are always initialized in the routine.""" def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None): """Return a new variable. Parameters ========== name : Symbol, MatrixSymbol The name of this variable. When used for code generation, this might appear, for example, in the prototype of function in the argument list. result_var : Symbol, Indexed Something that can be used to assign a value to this variable. Typically the same as `name` but for Indexed this should be e.g., "y[i]" whereas `name` should be the Symbol "y". expr : object The expression that should be output, typically a SymPy expression. datatype : optional When not given, the data type will be guessed based on the assumptions on the symbol argument. dimension : sequence containing tupes, optional If present, the argument is interpreted as an array, where this sequence of tuples specifies (lower, upper) bounds for each index of the array. precision : int, optional Controls the precision of floating point constants. """ Argument.__init__(self, name, datatype, dimensions, precision) ResultBase.__init__(self, expr, result_var) def __str__(self): return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.result_var, self.expr) __repr__ = __str__ class InOutArgument(Argument, ResultBase): """InOutArgument are never initialized in the routine.""" def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None): if not datatype: datatype = get_default_datatype(expr) Argument.__init__(self, name, datatype, dimensions, precision) ResultBase.__init__(self, expr, result_var) __init__.__doc__ = OutputArgument.__init__.__doc__ def __str__(self): return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.expr, self.result_var) __repr__ = __str__ class Result(Variable, ResultBase): """An expression for a return value. The name result is used to avoid conflicts with the reserved word "return" in the python language. It is also shorter than ReturnValue. These may or may not need a name in the destination (e.g., "return(x*y)" might return a value without ever naming it). """ def __init__(self, expr, name=None, result_var=None, datatype=None, dimensions=None, precision=None): """Initialize a return value. Parameters ========== expr : SymPy expression name : Symbol, MatrixSymbol, optional The name of this return variable. When used for code generation, this might appear, for example, in the prototype of function in a list of return values. A dummy name is generated if omitted. result_var : Symbol, Indexed, optional Something that can be used to assign a value to this variable. Typically the same as `name` but for Indexed this should be e.g., "y[i]" whereas `name` should be the Symbol "y". Defaults to `name` if omitted. datatype : optional When not given, the data type will be guessed based on the assumptions on the expr argument. dimension : sequence containing tupes, optional If present, this variable is interpreted as an array, where this sequence of tuples specifies (lower, upper) bounds for each index of the array. precision : int, optional Controls the precision of floating point constants. """ # Basic because it is the base class for all types of expressions if not isinstance(expr, (Basic, MatrixBase)): raise TypeError("The first argument must be a sympy expression.") if name is None: name = 'result_%d' % abs(hash(expr)) if datatype is None: #try to infer data type from the expression datatype = get_default_datatype(expr) if isinstance(name, str): if isinstance(expr, (MatrixBase, MatrixExpr)): name = MatrixSymbol(name, *expr.shape) else: name = Symbol(name) if result_var is None: result_var = name Variable.__init__(self, name, datatype=datatype, dimensions=dimensions, precision=precision) ResultBase.__init__(self, expr, result_var) def __str__(self): return "%s(%r, %r, %r)" % (self.__class__.__name__, self.expr, self.name, self.result_var) __repr__ = __str__ # # Transformation of routine objects into code # class CodeGen: """Abstract class for the code generators.""" printer = None # will be set to an instance of a CodePrinter subclass def _indent_code(self, codelines): return self.printer.indent_code(codelines) def _printer_method_with_settings(self, method, settings=None, *args, **kwargs): settings = settings or {} ori = {k: self.printer._settings[k] for k in settings} for k, v in settings.items(): self.printer._settings[k] = v result = getattr(self.printer, method)(*args, **kwargs) for k, v in ori.items(): self.printer._settings[k] = v return result def _get_symbol(self, s): """Returns the symbol as fcode prints it.""" if self.printer._settings['human']: expr_str = self.printer.doprint(s) else: constants, not_supported, expr_str = self.printer.doprint(s) if constants or not_supported: raise ValueError("Failed to print %s" % str(s)) return expr_str.strip() def __init__(self, project="project", cse=False): """Initialize a code generator. Derived classes will offer more options that affect the generated code. """ self.project = project self.cse = cse def routine(self, name, expr, argument_sequence=None, global_vars=None): """Creates an Routine object that is appropriate for this language. This implementation is appropriate for at least C/Fortran. Subclasses can override this if necessary. Here, we assume at most one return value (the l-value) which must be scalar. Additional outputs are OutputArguments (e.g., pointers on right-hand-side or pass-by-reference). Matrices are always returned via OutputArguments. If ``argument_sequence`` is None, arguments will be ordered alphabetically, but with all InputArguments first, and then OutputArgument and InOutArguments. """ if self.cse: from sympy.simplify.cse_main import cse if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): if not expr: raise ValueError("No expression given") for e in expr: if not e.is_Equality: raise CodeGenError("Lists of expressions must all be Equalities. {} is not.".format(e)) # create a list of right hand sides and simplify them rhs = [e.rhs for e in expr] common, simplified = cse(rhs) # pack the simplified expressions back up with their left hand sides expr = [Equality(e.lhs, rhs) for e, rhs in zip(expr, simplified)] else: rhs = [expr] if isinstance(expr, Equality): common, simplified = cse(expr.rhs) #, ignore=in_out_args) expr = Equality(expr.lhs, simplified[0]) else: common, simplified = cse(expr) expr = simplified local_vars = [Result(b,a) for a,b in common] local_symbols = {a for a,_ in common} local_expressions = Tuple(*[b for _,b in common]) else: local_expressions = Tuple() if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): if not expr: raise ValueError("No expression given") expressions = Tuple(*expr) else: expressions = Tuple(expr) if self.cse: if {i.label for i in expressions.atoms(Idx)} != set(): raise CodeGenError("CSE and Indexed expressions do not play well together yet") else: # local variables for indexed expressions local_vars = {i.label for i in expressions.atoms(Idx)} local_symbols = local_vars # global variables global_vars = set() if global_vars is None else set(global_vars) # symbols that should be arguments symbols = (expressions.free_symbols | local_expressions.free_symbols) - local_symbols - global_vars new_symbols = set() new_symbols.update(symbols) for symbol in symbols: if isinstance(symbol, Idx): new_symbols.remove(symbol) new_symbols.update(symbol.args[1].free_symbols) if isinstance(symbol, Indexed): new_symbols.remove(symbol) symbols = new_symbols # Decide whether to use output argument or return value return_val = [] output_args = [] for expr in expressions: if isinstance(expr, Equality): out_arg = expr.lhs expr = expr.rhs if isinstance(out_arg, Indexed): dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape]) symbol = out_arg.base.label elif isinstance(out_arg, Symbol): dims = [] symbol = out_arg elif isinstance(out_arg, MatrixSymbol): dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape]) symbol = out_arg else: raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " "can define output arguments.") if expr.has(symbol): output_args.append( InOutArgument(symbol, out_arg, expr, dimensions=dims)) else: output_args.append( OutputArgument(symbol, out_arg, expr, dimensions=dims)) # remove duplicate arguments when they are not local variables if symbol not in local_vars: # avoid duplicate arguments symbols.remove(symbol) elif isinstance(expr, (ImmutableMatrix, MatrixSlice)): # Create a "dummy" MatrixSymbol to use as the Output arg out_arg = MatrixSymbol('out_%s' % abs(hash(expr)), *expr.shape) dims = tuple([(S.Zero, dim - 1) for dim in out_arg.shape]) output_args.append( OutputArgument(out_arg, out_arg, expr, dimensions=dims)) else: return_val.append(Result(expr)) arg_list = [] # setup input argument list # helper to get dimensions for data for array-like args def dimensions(s): return [(S.Zero, dim - 1) for dim in s.shape] array_symbols = {} for array in expressions.atoms(Indexed) | local_expressions.atoms(Indexed): array_symbols[array.base.label] = array for array in expressions.atoms(MatrixSymbol) | local_expressions.atoms(MatrixSymbol): array_symbols[array] = array for symbol in sorted(symbols, key=str): if symbol in array_symbols: array = array_symbols[symbol] metadata = {'dimensions': dimensions(array)} else: metadata = {} arg_list.append(InputArgument(symbol, **metadata)) output_args.sort(key=lambda x: str(x.name)) arg_list.extend(output_args) if argument_sequence is not None: # if the user has supplied IndexedBase instances, we'll accept that new_sequence = [] for arg in argument_sequence: if isinstance(arg, IndexedBase): new_sequence.append(arg.label) else: new_sequence.append(arg) argument_sequence = new_sequence missing = [x for x in arg_list if x.name not in argument_sequence] if missing: msg = "Argument list didn't specify: {0} " msg = msg.format(", ".join([str(m.name) for m in missing])) raise CodeGenArgumentListError(msg, missing) # create redundant arguments to produce the requested sequence name_arg_dict = {x.name: x for x in arg_list} new_args = [] for symbol in argument_sequence: try: new_args.append(name_arg_dict[symbol]) except KeyError: if isinstance(symbol, (IndexedBase, MatrixSymbol)): metadata = {'dimensions': dimensions(symbol)} else: metadata = {} new_args.append(InputArgument(symbol, **metadata)) arg_list = new_args return Routine(name, arg_list, return_val, local_vars, global_vars) def write(self, routines, prefix, to_files=False, header=True, empty=True): """Writes all the source code files for the given routines. The generated source is returned as a list of (filename, contents) tuples, or is written to files (see below). Each filename consists of the given prefix, appended with an appropriate extension. Parameters ========== routines : list A list of Routine instances to be written prefix : string The prefix for the output files to_files : bool, optional When True, the output is written to files. Otherwise, a list of (filename, contents) tuples is returned. [default: False] header : bool, optional When True, a header comment is included on top of each source file. [default: True] empty : bool, optional When True, empty lines are included to structure the source files. [default: True] """ if to_files: for dump_fn in self.dump_fns: filename = "%s.%s" % (prefix, dump_fn.extension) with open(filename, "w") as f: dump_fn(self, routines, f, prefix, header, empty) else: result = [] for dump_fn in self.dump_fns: filename = "%s.%s" % (prefix, dump_fn.extension) contents = StringIO() dump_fn(self, routines, contents, prefix, header, empty) result.append((filename, contents.getvalue())) return result def dump_code(self, routines, f, prefix, header=True, empty=True): """Write the code by calling language specific methods. The generated file contains all the definitions of the routines in low-level code and refers to the header file if appropriate. Parameters ========== routines : list A list of Routine instances. f : file-like Where to write the file. prefix : string The filename prefix, used to refer to the proper header file. Only the basename of the prefix is used. header : bool, optional When True, a header comment is included on top of each source file. [default : True] empty : bool, optional When True, empty lines are included to structure the source files. [default : True] """ code_lines = self._preprocessor_statements(prefix) for routine in routines: if empty: code_lines.append("\n") code_lines.extend(self._get_routine_opening(routine)) code_lines.extend(self._declare_arguments(routine)) code_lines.extend(self._declare_globals(routine)) code_lines.extend(self._declare_locals(routine)) if empty: code_lines.append("\n") code_lines.extend(self._call_printer(routine)) if empty: code_lines.append("\n") code_lines.extend(self._get_routine_ending(routine)) code_lines = self._indent_code(''.join(code_lines)) if header: code_lines = ''.join(self._get_header() + [code_lines]) if code_lines: f.write(code_lines) class CodeGenError(Exception): pass class CodeGenArgumentListError(Exception): @property def missing_args(self): return self.args[1] header_comment = """Code generated with sympy %(version)s See http://www.sympy.org/ for more information. This file is part of '%(project)s' """ class CCodeGen(CodeGen): """Generator for C code. The .write() method inherited from CodeGen will output a code file and an interface file, <prefix>.c and <prefix>.h respectively. """ code_extension = "c" interface_extension = "h" standard = 'c99' def __init__(self, project="project", printer=None, preprocessor_statements=None, cse=False): super().__init__(project=project, cse=cse) self.printer = printer or c_code_printers[self.standard.lower()]() self.preprocessor_statements = preprocessor_statements if preprocessor_statements is None: self.preprocessor_statements = ['#include <math.h>'] def _get_header(self): """Writes a common header for the generated files.""" code_lines = [] code_lines.append("/" + "*"*78 + '\n') tmp = header_comment % {"version": sympy_version, "project": self.project} for line in tmp.splitlines(): code_lines.append(" *%s*\n" % line.center(76)) code_lines.append(" " + "*"*78 + "/\n") return code_lines def get_prototype(self, routine): """Returns a string for the function prototype of the routine. If the routine has multiple result objects, an CodeGenError is raised. See: https://en.wikipedia.org/wiki/Function_prototype """ if len(routine.results) > 1: raise CodeGenError("C only supports a single or no return value.") elif len(routine.results) == 1: ctype = routine.results[0].get_datatype('C') else: ctype = "void" type_args = [] for arg in routine.arguments: name = self.printer.doprint(arg.name) if arg.dimensions or isinstance(arg, ResultBase): type_args.append((arg.get_datatype('C'), "*%s" % name)) else: type_args.append((arg.get_datatype('C'), name)) arguments = ", ".join([ "%s %s" % t for t in type_args]) return "%s %s(%s)" % (ctype, routine.name, arguments) def _preprocessor_statements(self, prefix): code_lines = [] code_lines.append('#include "{}.h"'.format(os.path.basename(prefix))) code_lines.extend(self.preprocessor_statements) code_lines = ['{}\n'.format(l) for l in code_lines] return code_lines def _get_routine_opening(self, routine): prototype = self.get_prototype(routine) return ["%s {\n" % prototype] def _declare_arguments(self, routine): # arguments are declared in prototype return [] def _declare_globals(self, routine): # global variables are not explicitly declared within C functions return [] def _declare_locals(self, routine): # Compose a list of symbols to be dereferenced in the function # body. These are the arguments that were passed by a reference # pointer, excluding arrays. dereference = [] for arg in routine.arguments: if isinstance(arg, ResultBase) and not arg.dimensions: dereference.append(arg.name) code_lines = [] for result in routine.local_vars: # local variables that are simple symbols such as those used as indices into # for loops are defined declared elsewhere. if not isinstance(result, Result): continue if result.name != result.result_var: raise CodeGen("Result variable and name should match: {}".format(result)) assign_to = result.name t = result.get_datatype('c') if isinstance(result.expr, (MatrixBase, MatrixExpr)): dims = result.expr.shape if dims[1] != 1: raise CodeGenError("Only column vectors are supported in local variabels. Local result {} has dimensions {}".format(result, dims)) code_lines.append("{} {}[{}];\n".format(t, str(assign_to), dims[0])) prefix = "" else: prefix = "const {} ".format(t) constants, not_c, c_expr = self._printer_method_with_settings( 'doprint', dict(human=False, dereference=dereference), result.expr, assign_to=assign_to) for name, value in sorted(constants, key=str): code_lines.append("double const %s = %s;\n" % (name, value)) code_lines.append("{}{}\n".format(prefix, c_expr)) return code_lines def _call_printer(self, routine): code_lines = [] # Compose a list of symbols to be dereferenced in the function # body. These are the arguments that were passed by a reference # pointer, excluding arrays. dereference = [] for arg in routine.arguments: if isinstance(arg, ResultBase) and not arg.dimensions: dereference.append(arg.name) return_val = None for result in routine.result_variables: if isinstance(result, Result): assign_to = routine.name + "_result" t = result.get_datatype('c') code_lines.append("{} {};\n".format(t, str(assign_to))) return_val = assign_to else: assign_to = result.result_var try: constants, not_c, c_expr = self._printer_method_with_settings( 'doprint', dict(human=False, dereference=dereference), result.expr, assign_to=assign_to) except AssignmentError: assign_to = result.result_var code_lines.append( "%s %s;\n" % (result.get_datatype('c'), str(assign_to))) constants, not_c, c_expr = self._printer_method_with_settings( 'doprint', dict(human=False, dereference=dereference), result.expr, assign_to=assign_to) for name, value in sorted(constants, key=str): code_lines.append("double const %s = %s;\n" % (name, value)) code_lines.append("%s\n" % c_expr) if return_val: code_lines.append(" return %s;\n" % return_val) return code_lines def _get_routine_ending(self, routine): return ["}\n"] def dump_c(self, routines, f, prefix, header=True, empty=True): self.dump_code(routines, f, prefix, header, empty) dump_c.extension = code_extension # type: ignore dump_c.__doc__ = CodeGen.dump_code.__doc__ def dump_h(self, routines, f, prefix, header=True, empty=True): """Writes the C header file. This file contains all the function declarations. Parameters ========== routines : list A list of Routine instances. f : file-like Where to write the file. prefix : string The filename prefix, used to construct the include guards. Only the basename of the prefix is used. header : bool, optional When True, a header comment is included on top of each source file. [default : True] empty : bool, optional When True, empty lines are included to structure the source files. [default : True] """ if header: print(''.join(self._get_header()), file=f) guard_name = "%s__%s__H" % (self.project.replace( " ", "_").upper(), prefix.replace("/", "_").upper()) # include guards if empty: print(file=f) print("#ifndef %s" % guard_name, file=f) print("#define %s" % guard_name, file=f) if empty: print(file=f) # declaration of the function prototypes for routine in routines: prototype = self.get_prototype(routine) print("%s;" % prototype, file=f) # end if include guards if empty: print(file=f) print("#endif", file=f) if empty: print(file=f) dump_h.extension = interface_extension # type: ignore # This list of dump functions is used by CodeGen.write to know which dump # functions it has to call. dump_fns = [dump_c, dump_h] class C89CodeGen(CCodeGen): standard = 'C89' class C99CodeGen(CCodeGen): standard = 'C99' class FCodeGen(CodeGen): """Generator for Fortran 95 code The .write() method inherited from CodeGen will output a code file and an interface file, <prefix>.f90 and <prefix>.h respectively. """ code_extension = "f90" interface_extension = "h" def __init__(self, project='project', printer=None): super().__init__(project) self.printer = printer or FCodePrinter() def _get_header(self): """Writes a common header for the generated files.""" code_lines = [] code_lines.append("!" + "*"*78 + '\n') tmp = header_comment % {"version": sympy_version, "project": self.project} for line in tmp.splitlines(): code_lines.append("!*%s*\n" % line.center(76)) code_lines.append("!" + "*"*78 + '\n') return code_lines def _preprocessor_statements(self, prefix): return [] def _get_routine_opening(self, routine): """Returns the opening statements of the fortran routine.""" code_list = [] if len(routine.results) > 1: raise CodeGenError( "Fortran only supports a single or no return value.") elif len(routine.results) == 1: result = routine.results[0] code_list.append(result.get_datatype('fortran')) code_list.append("function") else: code_list.append("subroutine") args = ", ".join("%s" % self._get_symbol(arg.name) for arg in routine.arguments) call_sig = "{}({})\n".format(routine.name, args) # Fortran 95 requires all lines be less than 132 characters, so wrap # this line before appending. call_sig = ' &\n'.join(textwrap.wrap(call_sig, width=60, break_long_words=False)) + '\n' code_list.append(call_sig) code_list = [' '.join(code_list)] code_list.append('implicit none\n') return code_list def _declare_arguments(self, routine): # argument type declarations code_list = [] array_list = [] scalar_list = [] for arg in routine.arguments: if isinstance(arg, InputArgument): typeinfo = "%s, intent(in)" % arg.get_datatype('fortran') elif isinstance(arg, InOutArgument): typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran') elif isinstance(arg, OutputArgument): typeinfo = "%s, intent(out)" % arg.get_datatype('fortran') else: raise CodeGenError("Unknown Argument type: %s" % type(arg)) fprint = self._get_symbol if arg.dimensions: # fortran arrays start at 1 dimstr = ", ".join(["%s:%s" % ( fprint(dim[0] + 1), fprint(dim[1] + 1)) for dim in arg.dimensions]) typeinfo += ", dimension(%s)" % dimstr array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name))) else: scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name))) # scalars first, because they can be used in array declarations code_list.extend(scalar_list) code_list.extend(array_list) return code_list def _declare_globals(self, routine): # Global variables not explicitly declared within Fortran 90 functions. # Note: a future F77 mode may need to generate "common" blocks. return [] def _declare_locals(self, routine): code_list = [] for var in sorted(routine.local_vars, key=str): typeinfo = get_default_datatype(var) code_list.append("%s :: %s\n" % ( typeinfo.fname, self._get_symbol(var))) return code_list def _get_routine_ending(self, routine): """Returns the closing statements of the fortran routine.""" if len(routine.results) == 1: return ["end function\n"] else: return ["end subroutine\n"] def get_interface(self, routine): """Returns a string for the function interface. The routine should have a single result object, which can be None. If the routine has multiple result objects, a CodeGenError is raised. See: https://en.wikipedia.org/wiki/Function_prototype """ prototype = [ "interface\n" ] prototype.extend(self._get_routine_opening(routine)) prototype.extend(self._declare_arguments(routine)) prototype.extend(self._get_routine_ending(routine)) prototype.append("end interface\n") return "".join(prototype) def _call_printer(self, routine): declarations = [] code_lines = [] for result in routine.result_variables: if isinstance(result, Result): assign_to = routine.name elif isinstance(result, (OutputArgument, InOutArgument)): assign_to = result.result_var constants, not_fortran, f_expr = self._printer_method_with_settings( 'doprint', dict(human=False, source_format='free', standard=95), result.expr, assign_to=assign_to) for obj, v in sorted(constants, key=str): t = get_default_datatype(obj) declarations.append( "%s, parameter :: %s = %s\n" % (t.fname, obj, v)) for obj in sorted(not_fortran, key=str): t = get_default_datatype(obj) if isinstance(obj, Function): name = obj.func else: name = obj declarations.append("%s :: %s\n" % (t.fname, name)) code_lines.append("%s\n" % f_expr) return declarations + code_lines def _indent_code(self, codelines): return self._printer_method_with_settings( 'indent_code', dict(human=False, source_format='free'), codelines) def dump_f95(self, routines, f, prefix, header=True, empty=True): # check that symbols are unique with ignorecase for r in routines: lowercase = {str(x).lower() for x in r.variables} orig_case = {str(x) for x in r.variables} if len(lowercase) < len(orig_case): raise CodeGenError("Fortran ignores case. Got symbols: %s" % (", ".join([str(var) for var in r.variables]))) self.dump_code(routines, f, prefix, header, empty) dump_f95.extension = code_extension # type: ignore dump_f95.__doc__ = CodeGen.dump_code.__doc__ def dump_h(self, routines, f, prefix, header=True, empty=True): """Writes the interface to a header file. This file contains all the function declarations. Parameters ========== routines : list A list of Routine instances. f : file-like Where to write the file. prefix : string The filename prefix. header : bool, optional When True, a header comment is included on top of each source file. [default : True] empty : bool, optional When True, empty lines are included to structure the source files. [default : True] """ if header: print(''.join(self._get_header()), file=f) if empty: print(file=f) # declaration of the function prototypes for routine in routines: prototype = self.get_interface(routine) f.write(prototype) if empty: print(file=f) dump_h.extension = interface_extension # type: ignore # This list of dump functions is used by CodeGen.write to know which dump # functions it has to call. dump_fns = [dump_f95, dump_h] class JuliaCodeGen(CodeGen): """Generator for Julia code. The .write() method inherited from CodeGen will output a code file <prefix>.jl. """ code_extension = "jl" def __init__(self, project='project', printer=None): super().__init__(project) self.printer = printer or JuliaCodePrinter() def routine(self, name, expr, argument_sequence, global_vars): """Specialized Routine creation for Julia.""" if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): if not expr: raise ValueError("No expression given") expressions = Tuple(*expr) else: expressions = Tuple(expr) # local variables local_vars = {i.label for i in expressions.atoms(Idx)} # global variables global_vars = set() if global_vars is None else set(global_vars) # symbols that should be arguments old_symbols = expressions.free_symbols - local_vars - global_vars symbols = set() for s in old_symbols: if isinstance(s, Idx): symbols.update(s.args[1].free_symbols) elif not isinstance(s, Indexed): symbols.add(s) # Julia supports multiple return values return_vals = [] output_args = [] for (i, expr) in enumerate(expressions): if isinstance(expr, Equality): out_arg = expr.lhs expr = expr.rhs symbol = out_arg if isinstance(out_arg, Indexed): dims = tuple([ (S.One, dim) for dim in out_arg.shape]) symbol = out_arg.base.label output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims)) if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)): raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " "can define output arguments.") return_vals.append(Result(expr, name=symbol, result_var=out_arg)) if not expr.has(symbol): # this is a pure output: remove from the symbols list, so # it doesn't become an input. symbols.remove(symbol) else: # we have no name for this output return_vals.append(Result(expr, name='out%d' % (i+1))) # setup input argument list output_args.sort(key=lambda x: str(x.name)) arg_list = list(output_args) array_symbols = {} for array in expressions.atoms(Indexed): array_symbols[array.base.label] = array for array in expressions.atoms(MatrixSymbol): array_symbols[array] = array for symbol in sorted(symbols, key=str): arg_list.append(InputArgument(symbol)) if argument_sequence is not None: # if the user has supplied IndexedBase instances, we'll accept that new_sequence = [] for arg in argument_sequence: if isinstance(arg, IndexedBase): new_sequence.append(arg.label) else: new_sequence.append(arg) argument_sequence = new_sequence missing = [x for x in arg_list if x.name not in argument_sequence] if missing: msg = "Argument list didn't specify: {0} " msg = msg.format(", ".join([str(m.name) for m in missing])) raise CodeGenArgumentListError(msg, missing) # create redundant arguments to produce the requested sequence name_arg_dict = {x.name: x for x in arg_list} new_args = [] for symbol in argument_sequence: try: new_args.append(name_arg_dict[symbol]) except KeyError: new_args.append(InputArgument(symbol)) arg_list = new_args return Routine(name, arg_list, return_vals, local_vars, global_vars) def _get_header(self): """Writes a common header for the generated files.""" code_lines = [] tmp = header_comment % {"version": sympy_version, "project": self.project} for line in tmp.splitlines(): if line == '': code_lines.append("#\n") else: code_lines.append("# %s\n" % line) return code_lines def _preprocessor_statements(self, prefix): return [] def _get_routine_opening(self, routine): """Returns the opening statements of the routine.""" code_list = [] code_list.append("function ") # Inputs args = [] for i, arg in enumerate(routine.arguments): if isinstance(arg, OutputArgument): raise CodeGenError("Julia: invalid argument of type %s" % str(type(arg))) if isinstance(arg, (InputArgument, InOutArgument)): args.append("%s" % self._get_symbol(arg.name)) args = ", ".join(args) code_list.append("%s(%s)\n" % (routine.name, args)) code_list = [ "".join(code_list) ] return code_list def _declare_arguments(self, routine): return [] def _declare_globals(self, routine): return [] def _declare_locals(self, routine): return [] def _get_routine_ending(self, routine): outs = [] for result in routine.results: if isinstance(result, Result): # Note: name not result_var; want `y` not `y[i]` for Indexed s = self._get_symbol(result.name) else: raise CodeGenError("unexpected object in Routine results") outs.append(s) return ["return " + ", ".join(outs) + "\nend\n"] def _call_printer(self, routine): declarations = [] code_lines = [] for i, result in enumerate(routine.results): if isinstance(result, Result): assign_to = result.result_var else: raise CodeGenError("unexpected object in Routine results") constants, not_supported, jl_expr = self._printer_method_with_settings( 'doprint', dict(human=False), result.expr, assign_to=assign_to) for obj, v in sorted(constants, key=str): declarations.append( "%s = %s\n" % (obj, v)) for obj in sorted(not_supported, key=str): if isinstance(obj, Function): name = obj.func else: name = obj declarations.append( "# unsupported: %s\n" % (name)) code_lines.append("%s\n" % (jl_expr)) return declarations + code_lines def _indent_code(self, codelines): # Note that indenting seems to happen twice, first # statement-by-statement by JuliaPrinter then again here. p = JuliaCodePrinter({'human': False}) return p.indent_code(codelines) def dump_jl(self, routines, f, prefix, header=True, empty=True): self.dump_code(routines, f, prefix, header, empty) dump_jl.extension = code_extension # type: ignore dump_jl.__doc__ = CodeGen.dump_code.__doc__ # This list of dump functions is used by CodeGen.write to know which dump # functions it has to call. dump_fns = [dump_jl] class OctaveCodeGen(CodeGen): """Generator for Octave code. The .write() method inherited from CodeGen will output a code file <prefix>.m. Octave .m files usually contain one function. That function name should match the filename (``prefix``). If you pass multiple ``name_expr`` pairs, the latter ones are presumed to be private functions accessed by the primary function. You should only pass inputs to ``argument_sequence``: outputs are ordered according to their order in ``name_expr``. """ code_extension = "m" def __init__(self, project='project', printer=None): super().__init__(project) self.printer = printer or OctaveCodePrinter() def routine(self, name, expr, argument_sequence, global_vars): """Specialized Routine creation for Octave.""" # FIXME: this is probably general enough for other high-level # languages, perhaps its the C/Fortran one that is specialized! if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): if not expr: raise ValueError("No expression given") expressions = Tuple(*expr) else: expressions = Tuple(expr) # local variables local_vars = {i.label for i in expressions.atoms(Idx)} # global variables global_vars = set() if global_vars is None else set(global_vars) # symbols that should be arguments old_symbols = expressions.free_symbols - local_vars - global_vars symbols = set() for s in old_symbols: if isinstance(s, Idx): symbols.update(s.args[1].free_symbols) elif not isinstance(s, Indexed): symbols.add(s) # Octave supports multiple return values return_vals = [] for (i, expr) in enumerate(expressions): if isinstance(expr, Equality): out_arg = expr.lhs expr = expr.rhs symbol = out_arg if isinstance(out_arg, Indexed): symbol = out_arg.base.label if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)): raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " "can define output arguments.") return_vals.append(Result(expr, name=symbol, result_var=out_arg)) if not expr.has(symbol): # this is a pure output: remove from the symbols list, so # it doesn't become an input. symbols.remove(symbol) else: # we have no name for this output return_vals.append(Result(expr, name='out%d' % (i+1))) # setup input argument list arg_list = [] array_symbols = {} for array in expressions.atoms(Indexed): array_symbols[array.base.label] = array for array in expressions.atoms(MatrixSymbol): array_symbols[array] = array for symbol in sorted(symbols, key=str): arg_list.append(InputArgument(symbol)) if argument_sequence is not None: # if the user has supplied IndexedBase instances, we'll accept that new_sequence = [] for arg in argument_sequence: if isinstance(arg, IndexedBase): new_sequence.append(arg.label) else: new_sequence.append(arg) argument_sequence = new_sequence missing = [x for x in arg_list if x.name not in argument_sequence] if missing: msg = "Argument list didn't specify: {0} " msg = msg.format(", ".join([str(m.name) for m in missing])) raise CodeGenArgumentListError(msg, missing) # create redundant arguments to produce the requested sequence name_arg_dict = {x.name: x for x in arg_list} new_args = [] for symbol in argument_sequence: try: new_args.append(name_arg_dict[symbol]) except KeyError: new_args.append(InputArgument(symbol)) arg_list = new_args return Routine(name, arg_list, return_vals, local_vars, global_vars) def _get_header(self): """Writes a common header for the generated files.""" code_lines = [] tmp = header_comment % {"version": sympy_version, "project": self.project} for line in tmp.splitlines(): if line == '': code_lines.append("%\n") else: code_lines.append("%% %s\n" % line) return code_lines def _preprocessor_statements(self, prefix): return [] def _get_routine_opening(self, routine): """Returns the opening statements of the routine.""" code_list = [] code_list.append("function ") # Outputs outs = [] for i, result in enumerate(routine.results): if isinstance(result, Result): # Note: name not result_var; want `y` not `y(i)` for Indexed s = self._get_symbol(result.name) else: raise CodeGenError("unexpected object in Routine results") outs.append(s) if len(outs) > 1: code_list.append("[" + (", ".join(outs)) + "]") else: code_list.append("".join(outs)) code_list.append(" = ") # Inputs args = [] for i, arg in enumerate(routine.arguments): if isinstance(arg, (OutputArgument, InOutArgument)): raise CodeGenError("Octave: invalid argument of type %s" % str(type(arg))) if isinstance(arg, InputArgument): args.append("%s" % self._get_symbol(arg.name)) args = ", ".join(args) code_list.append("%s(%s)\n" % (routine.name, args)) code_list = [ "".join(code_list) ] return code_list def _declare_arguments(self, routine): return [] def _declare_globals(self, routine): if not routine.global_vars: return [] s = " ".join(sorted([self._get_symbol(g) for g in routine.global_vars])) return ["global " + s + "\n"] def _declare_locals(self, routine): return [] def _get_routine_ending(self, routine): return ["end\n"] def _call_printer(self, routine): declarations = [] code_lines = [] for i, result in enumerate(routine.results): if isinstance(result, Result): assign_to = result.result_var else: raise CodeGenError("unexpected object in Routine results") constants, not_supported, oct_expr = self._printer_method_with_settings( 'doprint', dict(human=False), result.expr, assign_to=assign_to) for obj, v in sorted(constants, key=str): declarations.append( " %s = %s; %% constant\n" % (obj, v)) for obj in sorted(not_supported, key=str): if isinstance(obj, Function): name = obj.func else: name = obj declarations.append( " %% unsupported: %s\n" % (name)) code_lines.append("%s\n" % (oct_expr)) return declarations + code_lines def _indent_code(self, codelines): return self._printer_method_with_settings( 'indent_code', dict(human=False), codelines) def dump_m(self, routines, f, prefix, header=True, empty=True, inline=True): # Note used to call self.dump_code() but we need more control for header code_lines = self._preprocessor_statements(prefix) for i, routine in enumerate(routines): if i > 0: if empty: code_lines.append("\n") code_lines.extend(self._get_routine_opening(routine)) if i == 0: if routine.name != prefix: raise ValueError('Octave function name should match prefix') if header: code_lines.append("%" + prefix.upper() + " Autogenerated by sympy\n") code_lines.append(''.join(self._get_header())) code_lines.extend(self._declare_arguments(routine)) code_lines.extend(self._declare_globals(routine)) code_lines.extend(self._declare_locals(routine)) if empty: code_lines.append("\n") code_lines.extend(self._call_printer(routine)) if empty: code_lines.append("\n") code_lines.extend(self._get_routine_ending(routine)) code_lines = self._indent_code(''.join(code_lines)) if code_lines: f.write(code_lines) dump_m.extension = code_extension # type: ignore dump_m.__doc__ = CodeGen.dump_code.__doc__ # This list of dump functions is used by CodeGen.write to know which dump # functions it has to call. dump_fns = [dump_m] class RustCodeGen(CodeGen): """Generator for Rust code. The .write() method inherited from CodeGen will output a code file <prefix>.rs """ code_extension = "rs" def __init__(self, project="project", printer=None): super().__init__(project=project) self.printer = printer or RustCodePrinter() def routine(self, name, expr, argument_sequence, global_vars): """Specialized Routine creation for Rust.""" if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): if not expr: raise ValueError("No expression given") expressions = Tuple(*expr) else: expressions = Tuple(expr) # local variables local_vars = {i.label for i in expressions.atoms(Idx)} # global variables global_vars = set() if global_vars is None else set(global_vars) # symbols that should be arguments symbols = expressions.free_symbols - local_vars - global_vars - expressions.atoms(Indexed) # Rust supports multiple return values return_vals = [] output_args = [] for (i, expr) in enumerate(expressions): if isinstance(expr, Equality): out_arg = expr.lhs expr = expr.rhs symbol = out_arg if isinstance(out_arg, Indexed): dims = tuple([ (S.One, dim) for dim in out_arg.shape]) symbol = out_arg.base.label output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims)) if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)): raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " "can define output arguments.") return_vals.append(Result(expr, name=symbol, result_var=out_arg)) if not expr.has(symbol): # this is a pure output: remove from the symbols list, so # it doesn't become an input. symbols.remove(symbol) else: # we have no name for this output return_vals.append(Result(expr, name='out%d' % (i+1))) # setup input argument list output_args.sort(key=lambda x: str(x.name)) arg_list = list(output_args) array_symbols = {} for array in expressions.atoms(Indexed): array_symbols[array.base.label] = array for array in expressions.atoms(MatrixSymbol): array_symbols[array] = array for symbol in sorted(symbols, key=str): arg_list.append(InputArgument(symbol)) if argument_sequence is not None: # if the user has supplied IndexedBase instances, we'll accept that new_sequence = [] for arg in argument_sequence: if isinstance(arg, IndexedBase): new_sequence.append(arg.label) else: new_sequence.append(arg) argument_sequence = new_sequence missing = [x for x in arg_list if x.name not in argument_sequence] if missing: msg = "Argument list didn't specify: {0} " msg = msg.format(", ".join([str(m.name) for m in missing])) raise CodeGenArgumentListError(msg, missing) # create redundant arguments to produce the requested sequence name_arg_dict = {x.name: x for x in arg_list} new_args = [] for symbol in argument_sequence: try: new_args.append(name_arg_dict[symbol]) except KeyError: new_args.append(InputArgument(symbol)) arg_list = new_args return Routine(name, arg_list, return_vals, local_vars, global_vars) def _get_header(self): """Writes a common header for the generated files.""" code_lines = [] code_lines.append("/*\n") tmp = header_comment % {"version": sympy_version, "project": self.project} for line in tmp.splitlines(): code_lines.append((" *%s" % line.center(76)).rstrip() + "\n") code_lines.append(" */\n") return code_lines def get_prototype(self, routine): """Returns a string for the function prototype of the routine. If the routine has multiple result objects, an CodeGenError is raised. See: https://en.wikipedia.org/wiki/Function_prototype """ results = [i.get_datatype('Rust') for i in routine.results] if len(results) == 1: rstype = " -> " + results[0] elif len(routine.results) > 1: rstype = " -> (" + ", ".join(results) + ")" else: rstype = "" type_args = [] for arg in routine.arguments: name = self.printer.doprint(arg.name) if arg.dimensions or isinstance(arg, ResultBase): type_args.append(("*%s" % name, arg.get_datatype('Rust'))) else: type_args.append((name, arg.get_datatype('Rust'))) arguments = ", ".join([ "%s: %s" % t for t in type_args]) return "fn %s(%s)%s" % (routine.name, arguments, rstype) def _preprocessor_statements(self, prefix): code_lines = [] # code_lines.append("use std::f64::consts::*;\n") return code_lines def _get_routine_opening(self, routine): prototype = self.get_prototype(routine) return ["%s {\n" % prototype] def _declare_arguments(self, routine): # arguments are declared in prototype return [] def _declare_globals(self, routine): # global variables are not explicitly declared within C functions return [] def _declare_locals(self, routine): # loop variables are declared in loop statement return [] def _call_printer(self, routine): code_lines = [] declarations = [] returns = [] # Compose a list of symbols to be dereferenced in the function # body. These are the arguments that were passed by a reference # pointer, excluding arrays. dereference = [] for arg in routine.arguments: if isinstance(arg, ResultBase) and not arg.dimensions: dereference.append(arg.name) for i, result in enumerate(routine.results): if isinstance(result, Result): assign_to = result.result_var returns.append(str(result.result_var)) else: raise CodeGenError("unexpected object in Routine results") constants, not_supported, rs_expr = self._printer_method_with_settings( 'doprint', dict(human=False), result.expr, assign_to=assign_to) for name, value in sorted(constants, key=str): declarations.append("const %s: f64 = %s;\n" % (name, value)) for obj in sorted(not_supported, key=str): if isinstance(obj, Function): name = obj.func else: name = obj declarations.append("// unsupported: %s\n" % (name)) code_lines.append("let %s\n" % rs_expr); if len(returns) > 1: returns = ['(' + ', '.join(returns) + ')'] returns.append('\n') return declarations + code_lines + returns def _get_routine_ending(self, routine): return ["}\n"] def dump_rs(self, routines, f, prefix, header=True, empty=True): self.dump_code(routines, f, prefix, header, empty) dump_rs.extension = code_extension # type: ignore dump_rs.__doc__ = CodeGen.dump_code.__doc__ # This list of dump functions is used by CodeGen.write to know which dump # functions it has to call. dump_fns = [dump_rs] def get_code_generator(language, project=None, standard=None, printer = None): if language == 'C': if standard is None: pass elif standard.lower() == 'c89': language = 'C89' elif standard.lower() == 'c99': language = 'C99' CodeGenClass = {"C": CCodeGen, "C89": C89CodeGen, "C99": C99CodeGen, "F95": FCodeGen, "JULIA": JuliaCodeGen, "OCTAVE": OctaveCodeGen, "RUST": RustCodeGen}.get(language.upper()) if CodeGenClass is None: raise ValueError("Language '%s' is not supported." % language) return CodeGenClass(project, printer) # # Friendly functions # def codegen(name_expr, language=None, prefix=None, project="project", to_files=False, header=True, empty=True, argument_sequence=None, global_vars=None, standard=None, code_gen=None, printer = None): """Generate source code for expressions in a given language. Parameters ========== name_expr : tuple, or list of tuples A single (name, expression) tuple or a list of (name, expression) tuples. Each tuple corresponds to a routine. If the expression is an equality (an instance of class Equality) the left hand side is considered an output argument. If expression is an iterable, then the routine will have multiple outputs. language : string, A string that indicates the source code language. This is case insensitive. Currently, 'C', 'F95' and 'Octave' are supported. 'Octave' generates code compatible with both Octave and Matlab. prefix : string, optional A prefix for the names of the files that contain the source code. Language-dependent suffixes will be appended. If omitted, the name of the first name_expr tuple is used. project : string, optional A project name, used for making unique preprocessor instructions. [default: "project"] to_files : bool, optional When True, the code will be written to one or more files with the given prefix, otherwise strings with the names and contents of these files are returned. [default: False] header : bool, optional When True, a header is written on top of each source file. [default: True] empty : bool, optional When True, empty lines are used to structure the code. [default: True] argument_sequence : iterable, optional Sequence of arguments for the routine in a preferred order. A CodeGenError is raised if required arguments are missing. Redundant arguments are used without warning. If omitted, arguments will be ordered alphabetically, but with all input arguments first, and then output or in-out arguments. global_vars : iterable, optional Sequence of global variables used by the routine. Variables listed here will not show up as function arguments. standard : string code_gen : CodeGen instance An instance of a CodeGen subclass. Overrides ``language``. Examples ======== >>> from sympy.utilities.codegen import codegen >>> from sympy.abc import x, y, z >>> [(c_name, c_code), (h_name, c_header)] = codegen( ... ("f", x+y*z), "C89", "test", header=False, empty=False) >>> print(c_name) test.c >>> print(c_code) #include "test.h" #include <math.h> double f(double x, double y, double z) { double f_result; f_result = x + y*z; return f_result; } <BLANKLINE> >>> print(h_name) test.h >>> print(c_header) #ifndef PROJECT__TEST__H #define PROJECT__TEST__H double f(double x, double y, double z); #endif <BLANKLINE> Another example using Equality objects to give named outputs. Here the filename (prefix) is taken from the first (name, expr) pair. >>> from sympy.abc import f, g >>> from sympy import Eq >>> [(c_name, c_code), (h_name, c_header)] = codegen( ... [("myfcn", x + y), ("fcn2", [Eq(f, 2*x), Eq(g, y)])], ... "C99", header=False, empty=False) >>> print(c_name) myfcn.c >>> print(c_code) #include "myfcn.h" #include <math.h> double myfcn(double x, double y) { double myfcn_result; myfcn_result = x + y; return myfcn_result; } void fcn2(double x, double y, double *f, double *g) { (*f) = 2*x; (*g) = y; } <BLANKLINE> If the generated function(s) will be part of a larger project where various global variables have been defined, the 'global_vars' option can be used to remove the specified variables from the function signature >>> from sympy.utilities.codegen import codegen >>> from sympy.abc import x, y, z >>> [(f_name, f_code), header] = codegen( ... ("f", x+y*z), "F95", header=False, empty=False, ... argument_sequence=(x, y), global_vars=(z,)) >>> print(f_code) REAL*8 function f(x, y) implicit none REAL*8, intent(in) :: x REAL*8, intent(in) :: y f = x + y*z end function <BLANKLINE> """ # Initialize the code generator. if language is None: if code_gen is None: raise ValueError("Need either language or code_gen") else: if code_gen is not None: raise ValueError("You cannot specify both language and code_gen.") code_gen = get_code_generator(language, project, standard, printer) if isinstance(name_expr[0], str): # single tuple is given, turn it into a singleton list with a tuple. name_expr = [name_expr] if prefix is None: prefix = name_expr[0][0] # Construct Routines appropriate for this code_gen from (name, expr) pairs. routines = [] for name, expr in name_expr: routines.append(code_gen.routine(name, expr, argument_sequence, global_vars)) # Write the code. return code_gen.write(routines, prefix, to_files, header, empty) def make_routine(name, expr, argument_sequence=None, global_vars=None, language="F95"): """A factory that makes an appropriate Routine from an expression. Parameters ========== name : string The name of this routine in the generated code. expr : expression or list/tuple of expressions A SymPy expression that the Routine instance will represent. If given a list or tuple of expressions, the routine will be considered to have multiple return values and/or output arguments. argument_sequence : list or tuple, optional List arguments for the routine in a preferred order. If omitted, the results are language dependent, for example, alphabetical order or in the same order as the given expressions. global_vars : iterable, optional Sequence of global variables used by the routine. Variables listed here will not show up as function arguments. language : string, optional Specify a target language. The Routine itself should be language-agnostic but the precise way one is created, error checking, etc depend on the language. [default: "F95"]. A decision about whether to use output arguments or return values is made depending on both the language and the particular mathematical expressions. For an expression of type Equality, the left hand side is typically made into an OutputArgument (or perhaps an InOutArgument if appropriate). Otherwise, typically, the calculated expression is made a return values of the routine. Examples ======== >>> from sympy.utilities.codegen import make_routine >>> from sympy.abc import x, y, f, g >>> from sympy import Eq >>> r = make_routine('test', [Eq(f, 2*x), Eq(g, x + y)]) >>> [arg.result_var for arg in r.results] [] >>> [arg.name for arg in r.arguments] [x, y, f, g] >>> [arg.name for arg in r.result_variables] [f, g] >>> r.local_vars set() Another more complicated example with a mixture of specified and automatically-assigned names. Also has Matrix output. >>> from sympy import Matrix >>> r = make_routine('fcn', [x*y, Eq(f, 1), Eq(g, x + g), Matrix([[x, 2]])]) >>> [arg.result_var for arg in r.results] # doctest: +SKIP [result_5397460570204848505] >>> [arg.expr for arg in r.results] [x*y] >>> [arg.name for arg in r.arguments] # doctest: +SKIP [x, y, f, g, out_8598435338387848786] We can examine the various arguments more closely: >>> from sympy.utilities.codegen import (InputArgument, OutputArgument, ... InOutArgument) >>> [a.name for a in r.arguments if isinstance(a, InputArgument)] [x, y] >>> [a.name for a in r.arguments if isinstance(a, OutputArgument)] # doctest: +SKIP [f, out_8598435338387848786] >>> [a.expr for a in r.arguments if isinstance(a, OutputArgument)] [1, Matrix([[x, 2]])] >>> [a.name for a in r.arguments if isinstance(a, InOutArgument)] [g] >>> [a.expr for a in r.arguments if isinstance(a, InOutArgument)] [g + x] """ # initialize a new code generator code_gen = get_code_generator(language) return code_gen.routine(name, expr, argument_sequence, global_vars)
2af1088ed311bbf81b367591c1f87f16be982c5b0b07686b8222de719ac88b85
""" This module provides convenient functions to transform sympy expressions to lambda functions which can be used to calculate numerical values very fast. """ from typing import Any, Dict, Iterable import builtins import inspect import keyword import textwrap import linecache from sympy.utilities.exceptions import SymPyDeprecationWarning from sympy.core.compatibility import (is_sequence, iterable, NotIterable) from sympy.utilities.misc import filldedent from sympy.utilities.decorator import doctest_depends_on __doctest_requires__ = {('lambdify',): ['numpy', 'tensorflow']} # Default namespaces, letting us define translations that can't be defined # by simple variable maps, like I => 1j MATH_DEFAULT = {} # type: Dict[str, Any] MPMATH_DEFAULT = {} # type: Dict[str, Any] NUMPY_DEFAULT = {"I": 1j} # type: Dict[str, Any] SCIPY_DEFAULT = {"I": 1j} # type: Dict[str, Any] TENSORFLOW_DEFAULT = {} # type: Dict[str, Any] SYMPY_DEFAULT = {} # type: Dict[str, Any] NUMEXPR_DEFAULT = {} # type: Dict[str, Any] # These are the namespaces the lambda functions will use. # These are separate from the names above because they are modified # throughout this file, whereas the defaults should remain unmodified. MATH = MATH_DEFAULT.copy() MPMATH = MPMATH_DEFAULT.copy() NUMPY = NUMPY_DEFAULT.copy() SCIPY = SCIPY_DEFAULT.copy() TENSORFLOW = TENSORFLOW_DEFAULT.copy() SYMPY = SYMPY_DEFAULT.copy() NUMEXPR = NUMEXPR_DEFAULT.copy() # Mappings between sympy and other modules function names. MATH_TRANSLATIONS = { "ceiling": "ceil", "E": "e", "ln": "log", } # NOTE: This dictionary is reused in Function._eval_evalf to allow subclasses # of Function to automatically evalf. MPMATH_TRANSLATIONS = { "Abs": "fabs", "elliptic_k": "ellipk", "elliptic_f": "ellipf", "elliptic_e": "ellipe", "elliptic_pi": "ellippi", "ceiling": "ceil", "chebyshevt": "chebyt", "chebyshevu": "chebyu", "E": "e", "I": "j", "ln": "log", #"lowergamma":"lower_gamma", "oo": "inf", #"uppergamma":"upper_gamma", "LambertW": "lambertw", "MutableDenseMatrix": "matrix", "ImmutableDenseMatrix": "matrix", "conjugate": "conj", "dirichlet_eta": "altzeta", "Ei": "ei", "Shi": "shi", "Chi": "chi", "Si": "si", "Ci": "ci", "RisingFactorial": "rf", "FallingFactorial": "ff", } NUMPY_TRANSLATIONS = {} # type: Dict[str, str] SCIPY_TRANSLATIONS = {} # type: Dict[str, str] TENSORFLOW_TRANSLATIONS = {} # type: Dict[str, str] NUMEXPR_TRANSLATIONS = {} # type: Dict[str, str] # Available modules: MODULES = { "math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)), "mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)), "numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import numpy; from numpy import *; from numpy.linalg import *",)), "scipy": (SCIPY, SCIPY_DEFAULT, SCIPY_TRANSLATIONS, ("import numpy; import scipy; from scipy import *; from scipy.special import *",)), "tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import tensorflow",)), "sympy": (SYMPY, SYMPY_DEFAULT, {}, ( "from sympy.functions import *", "from sympy.matrices import *", "from sympy import Integral, pi, oo, nan, zoo, E, I",)), "numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS, ("import_module('numexpr')", )), } def _import(module, reload=False): """ Creates a global translation dictionary for module. The argument module has to be one of the following strings: "math", "mpmath", "numpy", "sympy", "tensorflow". These dictionaries map names of python functions to their equivalent in other modules. """ # Required despite static analysis claiming it is not used from sympy.external import import_module # noqa:F401 try: namespace, namespace_default, translations, import_commands = MODULES[ module] except KeyError: raise NameError( "'%s' module can't be used for lambdification" % module) # Clear namespace or exit if namespace != namespace_default: # The namespace was already generated, don't do it again if not forced. if reload: namespace.clear() namespace.update(namespace_default) else: return for import_command in import_commands: if import_command.startswith('import_module'): module = eval(import_command) if module is not None: namespace.update(module.__dict__) continue else: try: exec(import_command, {}, namespace) continue except ImportError: pass raise ImportError( "can't import '%s' with '%s' command" % (module, import_command)) # Add translated names to namespace for sympyname, translation in translations.items(): namespace[sympyname] = namespace[translation] # For computing the modulus of a sympy expression we use the builtin abs # function, instead of the previously used fabs function for all # translation modules. This is because the fabs function in the math # module does not accept complex valued arguments. (see issue 9474). The # only exception, where we don't use the builtin abs function is the # mpmath translation module, because mpmath.fabs returns mpf objects in # contrast to abs(). if 'Abs' not in namespace: namespace['Abs'] = abs # Used for dynamically generated filenames that are inserted into the # linecache. _lambdify_generated_counter = 1 @doctest_depends_on(modules=('numpy', 'tensorflow', ), python_version=(3,)) def lambdify(args: Iterable, expr, modules=None, printer=None, use_imps=True, dummify=False): """Convert a SymPy expression into a function that allows for fast numeric evaluation. .. warning:: This function uses ``exec``, and thus shouldn't be used on unsanitized input. .. versionchanged:: 1.7.0 Passing a set for the *args* parameter is deprecated as sets are unordered. Use an ordered iterable such as a list or tuple. Explanation =========== For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an equivalent NumPy function that numerically evaluates it: >>> from sympy import sin, cos, symbols, lambdify >>> import numpy as np >>> x = symbols('x') >>> expr = sin(x) + cos(x) >>> expr sin(x) + cos(x) >>> f = lambdify(x, expr, 'numpy') >>> a = np.array([1, 2]) >>> f(a) [1.38177329 0.49315059] The primary purpose of this function is to provide a bridge from SymPy expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath, and tensorflow. In general, SymPy functions do not work with objects from other libraries, such as NumPy arrays, and functions from numeric libraries like NumPy or mpmath do not work on SymPy expressions. ``lambdify`` bridges the two by converting a SymPy expression to an equivalent numeric function. The basic workflow with ``lambdify`` is to first create a SymPy expression representing whatever mathematical function you wish to evaluate. This should be done using only SymPy functions and expressions. Then, use ``lambdify`` to convert this to an equivalent function for numerical evaluation. For instance, above we created ``expr`` using the SymPy symbol ``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an equivalent NumPy function ``f``, and called it on a NumPy array ``a``. Parameters ========== args : List[Symbol] A variable or a list of variables whose nesting represents the nesting of the arguments that will be passed to the function. Variables can be symbols, undefined functions, or matrix symbols. >>> from sympy import Eq >>> from sympy.abc import x, y, z The list of variables should match the structure of how the arguments will be passed to the function. Simply enclose the parameters as they will be passed in a list. To call a function like ``f(x)`` then ``[x]`` should be the first argument to ``lambdify``; for this case a single ``x`` can also be used: >>> f = lambdify(x, x + 1) >>> f(1) 2 >>> f = lambdify([x], x + 1) >>> f(1) 2 To call a function like ``f(x, y)`` then ``[x, y]`` will be the first argument of the ``lambdify``: >>> f = lambdify([x, y], x + y) >>> f(1, 1) 2 To call a function with a single 3-element tuple like ``f((x, y, z))`` then ``[(x, y, z)]`` will be the first argument of the ``lambdify``: >>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2)) >>> f((3, 4, 5)) True If two args will be passed and the first is a scalar but the second is a tuple with two arguments then the items in the list should match that structure: >>> f = lambdify([x, (y, z)], x + y + z) >>> f(1, (2, 3)) 6 expr : Expr An expression, list of expressions, or matrix to be evaluated. Lists may be nested. If the expression is a list, the output will also be a list. >>> f = lambdify(x, [x, [x + 1, x + 2]]) >>> f(1) [1, [2, 3]] If it is a matrix, an array will be returned (for the NumPy module). >>> from sympy import Matrix >>> f = lambdify(x, Matrix([x, x + 1])) >>> f(1) [[1] [2]] Note that the argument order here (variables then expression) is used to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works (roughly) like ``lambda x: expr`` (see :ref:`lambdify-how-it-works` below). modules : str, optional Specifies the numeric library to use. If not specified, *modules* defaults to: - ``["scipy", "numpy"]`` if SciPy is installed - ``["numpy"]`` if only NumPy is installed - ``["math", "mpmath", "sympy"]`` if neither is installed. That is, SymPy functions are replaced as far as possible by either ``scipy`` or ``numpy`` functions if available, and Python's standard library ``math``, or ``mpmath`` functions otherwise. *modules* can be one of the following types: - The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``, ``"scipy"``, ``"sympy"``, or ``"tensorflow"``. This uses the corresponding printer and namespace mapping for that module. - A module (e.g., ``math``). This uses the global namespace of the module. If the module is one of the above known modules, it will also use the corresponding printer and namespace mapping (i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``). - A dictionary that maps names of SymPy functions to arbitrary functions (e.g., ``{'sin': custom_sin}``). - A list that contains a mix of the arguments above, with higher priority given to entries appearing first (e.g., to use the NumPy module but override the ``sin`` function with a custom version, you can use ``[{'sin': custom_sin}, 'numpy']``). dummify : bool, optional Whether or not the variables in the provided expression that are not valid Python identifiers are substituted with dummy symbols. This allows for undefined functions like ``Function('f')(t)`` to be supplied as arguments. By default, the variables are only dummified if they are not valid Python identifiers. Set ``dummify=True`` to replace all arguments with dummy symbols (if ``args`` is not a string) - for example, to ensure that the arguments do not redefine any built-in names. Examples ======== >>> from sympy.utilities.lambdify import implemented_function >>> from sympy import sqrt, sin, Matrix >>> from sympy import Function >>> from sympy.abc import w, x, y, z >>> f = lambdify(x, x**2) >>> f(2) 4 >>> f = lambdify((x, y, z), [z, y, x]) >>> f(1,2,3) [3, 2, 1] >>> f = lambdify(x, sqrt(x)) >>> f(4) 2.0 >>> f = lambdify((x, y), sin(x*y)**2) >>> f(0, 5) 0.0 >>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy') >>> row(1, 2) Matrix([[1, 3]]) ``lambdify`` can be used to translate SymPy expressions into mpmath functions. This may be preferable to using ``evalf`` (which uses mpmath on the backend) in some cases. >>> f = lambdify(x, sin(x), 'mpmath') >>> f(1) 0.8414709848078965 Tuple arguments are handled and the lambdified function should be called with the same type of arguments as were used to create the function: >>> f = lambdify((x, (y, z)), x + y) >>> f(1, (2, 4)) 3 The ``flatten`` function can be used to always work with flattened arguments: >>> from sympy.utilities.iterables import flatten >>> args = w, (x, (y, z)) >>> vals = 1, (2, (3, 4)) >>> f = lambdify(flatten(args), w + x + y + z) >>> f(*flatten(vals)) 10 Functions present in ``expr`` can also carry their own numerical implementations, in a callable attached to the ``_imp_`` attribute. This can be used with undefined functions using the ``implemented_function`` factory: >>> f = implemented_function(Function('f'), lambda x: x+1) >>> func = lambdify(x, f(x)) >>> func(4) 5 ``lambdify`` always prefers ``_imp_`` implementations to implementations in other namespaces, unless the ``use_imps`` input parameter is False. Usage with Tensorflow: >>> import tensorflow as tf >>> from sympy import Max, sin, lambdify >>> from sympy.abc import x >>> f = Max(x, sin(x)) >>> func = lambdify(x, f, 'tensorflow') After tensorflow v2, eager execution is enabled by default. If you want to get the compatible result across tensorflow v1 and v2 as same as this tutorial, run this line. >>> tf.compat.v1.enable_eager_execution() If you have eager execution enabled, you can get the result out immediately as you can use numpy. If you pass tensorflow objects, you may get an ``EagerTensor`` object instead of value. >>> result = func(tf.constant(1.0)) >>> print(result) tf.Tensor(1.0, shape=(), dtype=float32) >>> print(result.__class__) <class 'tensorflow.python.framework.ops.EagerTensor'> You can use ``.numpy()`` to get the numpy value of the tensor. >>> result.numpy() 1.0 >>> var = tf.Variable(2.0) >>> result = func(var) # also works for tf.Variable and tf.Placeholder >>> result.numpy() 2.0 And it works with any shape array. >>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> result = func(tensor) >>> result.numpy() [[1. 2.] [3. 4.]] Notes ===== - For functions involving large array calculations, numexpr can provide a significant speedup over numpy. Please note that the available functions for numexpr are more limited than numpy but can be expanded with ``implemented_function`` and user defined subclasses of Function. If specified, numexpr may be the only option in modules. The official list of numexpr functions can be found at: https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions - In previous versions of SymPy, ``lambdify`` replaced ``Matrix`` with ``numpy.matrix`` by default. As of SymPy 1.0 ``numpy.array`` is the default. To get the old default behavior you must pass in ``[{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']`` to the ``modules`` kwarg. >>> from sympy import lambdify, Matrix >>> from sympy.abc import x, y >>> import numpy >>> array2mat = [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy'] >>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat) >>> f(1, 2) [[1] [2]] - In the above examples, the generated functions can accept scalar values or numpy arrays as arguments. However, in some cases the generated function relies on the input being a numpy array: >>> from sympy import Piecewise >>> from sympy.testing.pytest import ignore_warnings >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy") >>> with ignore_warnings(RuntimeWarning): ... f(numpy.array([-1, 0, 1, 2])) [-1. 0. 1. 0.5] >>> f(0) Traceback (most recent call last): ... ZeroDivisionError: division by zero In such cases, the input should be wrapped in a numpy array: >>> with ignore_warnings(RuntimeWarning): ... float(f(numpy.array([0]))) 0.0 Or if numpy functionality is not required another module can be used: >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math") >>> f(0) 0 .. _lambdify-how-it-works: How it works ============ When using this function, it helps a great deal to have an idea of what it is doing. At its core, lambdify is nothing more than a namespace translation, on top of a special printer that makes some corner cases work properly. To understand lambdify, first we must properly understand how Python namespaces work. Say we had two files. One called ``sin_cos_sympy.py``, with .. code:: python # sin_cos_sympy.py from sympy import sin, cos def sin_cos(x): return sin(x) + cos(x) and one called ``sin_cos_numpy.py`` with .. code:: python # sin_cos_numpy.py from numpy import sin, cos def sin_cos(x): return sin(x) + cos(x) The two files define an identical function ``sin_cos``. However, in the first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and ``cos``. In the second, they are defined as the NumPy versions. If we were to import the first file and use the ``sin_cos`` function, we would get something like >>> from sin_cos_sympy import sin_cos # doctest: +SKIP >>> sin_cos(1) # doctest: +SKIP cos(1) + sin(1) On the other hand, if we imported ``sin_cos`` from the second file, we would get >>> from sin_cos_numpy import sin_cos # doctest: +SKIP >>> sin_cos(1) # doctest: +SKIP 1.38177329068 In the first case we got a symbolic output, because it used the symbolic ``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions from NumPy. But notice that the versions of ``sin`` and ``cos`` that were used was not inherent to the ``sin_cos`` function definition. Both ``sin_cos`` definitions are exactly the same. Rather, it was based on the names defined at the module where the ``sin_cos`` function was defined. The key point here is that when function in Python references a name that is not defined in the function, that name is looked up in the "global" namespace of the module where that function is defined. Now, in Python, we can emulate this behavior without actually writing a file to disk using the ``exec`` function. ``exec`` takes a string containing a block of Python code, and a dictionary that should contain the global variables of the module. It then executes the code "in" that dictionary, as if it were the module globals. The following is equivalent to the ``sin_cos`` defined in ``sin_cos_sympy.py``: >>> import sympy >>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos} >>> exec(''' ... def sin_cos(x): ... return sin(x) + cos(x) ... ''', module_dictionary) >>> sin_cos = module_dictionary['sin_cos'] >>> sin_cos(1) cos(1) + sin(1) and similarly with ``sin_cos_numpy``: >>> import numpy >>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos} >>> exec(''' ... def sin_cos(x): ... return sin(x) + cos(x) ... ''', module_dictionary) >>> sin_cos = module_dictionary['sin_cos'] >>> sin_cos(1) 1.38177329068 So now we can get an idea of how ``lambdify`` works. The name "lambdify" comes from the fact that we can think of something like ``lambdify(x, sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where ``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why the symbols argument is first in ``lambdify``, as opposed to most SymPy functions where it comes after the expression: to better mimic the ``lambda`` keyword. ``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and 1. Converts it to a string 2. Creates a module globals dictionary based on the modules that are passed in (by default, it uses the NumPy module) 3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the list of variables separated by commas, and ``{expr}`` is the string created in step 1., then ``exec``s that string with the module globals namespace and returns ``func``. In fact, functions returned by ``lambdify`` support inspection. So you can see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you are using IPython or the Jupyter notebook. >>> f = lambdify(x, sin(x) + cos(x)) >>> import inspect >>> print(inspect.getsource(f)) def _lambdifygenerated(x): return (sin(x) + cos(x)) This shows us the source code of the function, but not the namespace it was defined in. We can inspect that by looking at the ``__globals__`` attribute of ``f``: >>> f.__globals__['sin'] <ufunc 'sin'> >>> f.__globals__['cos'] <ufunc 'cos'> >>> f.__globals__['sin'] is numpy.sin True This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be ``numpy.sin`` and ``numpy.cos``. Note that there are some convenience layers in each of these steps, but at the core, this is how ``lambdify`` works. Step 1 is done using the ``LambdaPrinter`` printers defined in the printing module (see :mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions to define how they should be converted to a string for different modules. You can change which printer ``lambdify`` uses by passing a custom printer in to the ``printer`` argument. Step 2 is augmented by certain translations. There are default translations for each module, but you can provide your own by passing a list to the ``modules`` argument. For instance, >>> def mysin(x): ... print('taking the sin of', x) ... return numpy.sin(x) ... >>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy']) >>> f(1) taking the sin of 1 0.8414709848078965 The globals dictionary is generated from the list by merging the dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The merging is done so that earlier items take precedence, which is why ``mysin`` is used above instead of ``numpy.sin``. If you want to modify the way ``lambdify`` works for a given function, it is usually easiest to do so by modifying the globals dictionary as such. In more complicated cases, it may be necessary to create and pass in a custom printer. Finally, step 3 is augmented with certain convenience operations, such as the addition of a docstring. Understanding how ``lambdify`` works can make it easier to avoid certain gotchas when using it. For instance, a common mistake is to create a lambdified function for one module (say, NumPy), and pass it objects from another (say, a SymPy expression). For instance, say we create >>> from sympy.abc import x >>> f = lambdify(x, x + 1, 'numpy') Now if we pass in a NumPy array, we get that array plus 1 >>> import numpy >>> a = numpy.array([1, 2]) >>> f(a) [2 3] But what happens if you make the mistake of passing in a SymPy expression instead of a NumPy array: >>> f(x + 1) x + 2 This worked, but it was only by accident. Now take a different lambdified function: >>> from sympy import sin >>> g = lambdify(x, x + sin(x), 'numpy') This works as expected on NumPy arrays: >>> g(a) [1.84147098 2.90929743] But if we try to pass in a SymPy expression, it fails >>> try: ... g(x + 1) ... # NumPy release after 1.17 raises TypeError instead of ... # AttributeError ... except (AttributeError, TypeError): ... raise AttributeError() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... AttributeError: Now, let's look at what happened. The reason this fails is that ``g`` calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not know how to operate on a SymPy object. **As a general rule, NumPy functions do not know how to operate on SymPy expressions, and SymPy functions do not know how to operate on NumPy arrays. This is why lambdify exists: to provide a bridge between SymPy and NumPy.** However, why is it that ``f`` did work? That's because ``f`` doesn't call any functions, it only adds 1. So the resulting function that is created, ``def _lambdifygenerated(x): return x + 1`` does not depend on the globals namespace it is defined in. Thus it works, but only by accident. A future version of ``lambdify`` may remove this behavior. Be aware that certain implementation details described here may change in future versions of SymPy. The API of passing in custom modules and printers will not change, but the details of how a lambda function is created may change. However, the basic idea will remain the same, and understanding it will be helpful to understanding the behavior of lambdify. **In general: you should create lambdified functions for one module (say, NumPy), and only pass it input types that are compatible with that module (say, NumPy arrays).** Remember that by default, if the ``module`` argument is not provided, ``lambdify`` creates functions using the NumPy and SciPy namespaces. """ from sympy.core.symbol import Symbol # If the user hasn't specified any modules, use what is available. if modules is None: try: _import("scipy") except ImportError: try: _import("numpy") except ImportError: # Use either numpy (if available) or python.math where possible. # XXX: This leads to different behaviour on different systems and # might be the reason for irreproducible errors. modules = ["math", "mpmath", "sympy"] else: modules = ["numpy"] else: modules = ["numpy", "scipy"] # Get the needed namespaces. namespaces = [] # First find any function implementations if use_imps: namespaces.append(_imp_namespace(expr)) # Check for dict before iterating if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'): namespaces.append(modules) else: # consistency check if _module_present('numexpr', modules) and len(modules) > 1: raise TypeError("numexpr must be the only item in 'modules'") namespaces += list(modules) # fill namespace with first having highest priority namespace = {} # type: Dict[str, Any] for m in namespaces[::-1]: buf = _get_namespace(m) namespace.update(buf) if hasattr(expr, "atoms"): #Try if you can extract symbols from the expression. #Move on if expr.atoms in not implemented. syms = expr.atoms(Symbol) for term in syms: namespace.update({str(term): term}) if printer is None: if _module_present('mpmath', namespaces): from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore elif _module_present('scipy', namespaces): from sympy.printing.pycode import SciPyPrinter as Printer # type: ignore elif _module_present('numpy', namespaces): from sympy.printing.pycode import NumPyPrinter as Printer # type: ignore elif _module_present('numexpr', namespaces): from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore elif _module_present('tensorflow', namespaces): from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore elif _module_present('sympy', namespaces): from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore else: from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore user_functions = {} for m in namespaces[::-1]: if isinstance(m, dict): for k in m: user_functions[k] = k printer = Printer({'fully_qualified_modules': False, 'inline': True, 'allow_unknown_functions': True, 'user_functions': user_functions}) if isinstance(args, set): SymPyDeprecationWarning( feature="The list of arguments is a `set`. This leads to unpredictable results", useinstead=": Convert set into list or tuple", issue=20013, deprecated_since_version="1.6.3" ).warn() # Get the names of the args, for creating a docstring if not iterable(args): args = (args,) names = [] # Grab the callers frame, for getting the names by inspection (if needed) callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore for n, var in enumerate(args): if hasattr(var, 'name'): names.append(var.name) else: # It's an iterable. Try to get name by inspection of calling frame. name_list = [var_name for var_name, var_val in callers_local_vars if var_val is var] if len(name_list) == 1: names.append(name_list[0]) else: # Cannot infer name with certainty. arg_# will have to do. names.append('arg_' + str(n)) # Create the function definition code and execute it funcname = '_lambdifygenerated' if _module_present('tensorflow', namespaces): funcprinter = _TensorflowEvaluatorPrinter(printer, dummify) # type: _EvaluatorPrinter else: funcprinter = _EvaluatorPrinter(printer, dummify) funcstr = funcprinter.doprint(funcname, args, expr) # Collect the module imports from the code printers. imp_mod_lines = [] for mod, keys in (getattr(printer, 'module_imports', None) or {}).items(): for k in keys: if k not in namespace: ln = "from %s import %s" % (mod, k) try: exec(ln, {}, namespace) except ImportError: # Tensorflow 2.0 has issues with importing a specific # function from its submodule. # https://github.com/tensorflow/tensorflow/issues/33022 ln = "%s = %s.%s" % (k, mod, k) exec(ln, {}, namespace) imp_mod_lines.append(ln) # Provide lambda expression with builtins, and compatible implementation of range namespace.update({'builtins':builtins, 'range':range}) funclocals = {} # type: Dict[str, Any] global _lambdify_generated_counter filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter _lambdify_generated_counter += 1 c = compile(funcstr, filename, 'exec') exec(c, namespace, funclocals) # mtime has to be None or else linecache.checkcache will remove it linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore func = funclocals[funcname] # Apply the docstring sig = "func({})".format(", ".join(str(i) for i in names)) sig = textwrap.fill(sig, subsequent_indent=' '*8) expr_str = str(expr) if len(expr_str) > 78: expr_str = textwrap.wrap(expr_str, 75)[0] + '...' func.__doc__ = ( "Created with lambdify. Signature:\n\n" "{sig}\n\n" "Expression:\n\n" "{expr}\n\n" "Source code:\n\n" "{src}\n\n" "Imported modules:\n\n" "{imp_mods}" ).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines)) return func def _module_present(modname, modlist): if modname in modlist: return True for m in modlist: if hasattr(m, '__name__') and m.__name__ == modname: return True return False def _get_namespace(m): """ This is used by _lambdify to parse its arguments. """ if isinstance(m, str): _import(m) return MODULES[m][0] elif isinstance(m, dict): return m elif hasattr(m, "__dict__"): return m.__dict__ else: raise TypeError("Argument must be either a string, dict or module but it is: %s" % m) def lambdastr(args, expr, printer=None, dummify=None): """ Returns a string that can be evaluated to a lambda function. Examples ======== >>> from sympy.abc import x, y, z >>> from sympy.utilities.lambdify import lambdastr >>> lambdastr(x, x**2) 'lambda x: (x**2)' >>> lambdastr((x,y,z), [z,y,x]) 'lambda x,y,z: ([z, y, x])' Although tuples may not appear as arguments to lambda in Python 3, lambdastr will create a lambda function that will unpack the original arguments so that nested arguments can be handled: >>> lambdastr((x, (y, z)), x + y) 'lambda _0,_1: (lambda x,y,z: (x + y))(_0,_1[0],_1[1])' """ # Transforming everything to strings. from sympy.matrices import DeferredVector from sympy import Dummy, sympify, Symbol, Function, flatten, Derivative, Basic if printer is not None: if inspect.isfunction(printer): lambdarepr = printer else: if inspect.isclass(printer): lambdarepr = lambda expr: printer().doprint(expr) else: lambdarepr = lambda expr: printer.doprint(expr) else: #XXX: This has to be done here because of circular imports from sympy.printing.lambdarepr import lambdarepr def sub_args(args, dummies_dict): if isinstance(args, str): return args elif isinstance(args, DeferredVector): return str(args) elif iterable(args): dummies = flatten([sub_args(a, dummies_dict) for a in args]) return ",".join(str(a) for a in dummies) else: # replace these with Dummy symbols if isinstance(args, (Function, Symbol, Derivative)): dummies = Dummy() dummies_dict.update({args : dummies}) return str(dummies) else: return str(args) def sub_expr(expr, dummies_dict): expr = sympify(expr) # dict/tuple are sympified to Basic if isinstance(expr, Basic): expr = expr.xreplace(dummies_dict) # list is not sympified to Basic elif isinstance(expr, list): expr = [sub_expr(a, dummies_dict) for a in expr] return expr # Transform args def isiter(l): return iterable(l, exclude=(str, DeferredVector, NotIterable)) def flat_indexes(iterable): n = 0 for el in iterable: if isiter(el): for ndeep in flat_indexes(el): yield (n,) + ndeep else: yield (n,) n += 1 if dummify is None: dummify = any(isinstance(a, Basic) and a.atoms(Function, Derivative) for a in ( args if isiter(args) else [args])) if isiter(args) and any(isiter(i) for i in args): dum_args = [str(Dummy(str(i))) for i in range(len(args))] indexed_args = ','.join([ dum_args[ind[0]] + ''.join(["[%s]" % k for k in ind[1:]]) for ind in flat_indexes(args)]) lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify) return 'lambda %s: (%s)(%s)' % (','.join(dum_args), lstr, indexed_args) dummies_dict = {} if dummify: args = sub_args(args, dummies_dict) else: if isinstance(args, str): pass elif iterable(args, exclude=DeferredVector): args = ",".join(str(a) for a in args) # Transform expr if dummify: if isinstance(expr, str): pass else: expr = sub_expr(expr, dummies_dict) expr = lambdarepr(expr) return "lambda %s: (%s)" % (args, expr) class _EvaluatorPrinter: def __init__(self, printer=None, dummify=False): self._dummify = dummify #XXX: This has to be done here because of circular imports from sympy.printing.lambdarepr import LambdaPrinter if printer is None: printer = LambdaPrinter() if inspect.isfunction(printer): self._exprrepr = printer else: if inspect.isclass(printer): printer = printer() self._exprrepr = printer.doprint #if hasattr(printer, '_print_Symbol'): # symbolrepr = printer._print_Symbol #if hasattr(printer, '_print_Dummy'): # dummyrepr = printer._print_Dummy # Used to print the generated function arguments in a standard way self._argrepr = LambdaPrinter().doprint def doprint(self, funcname, args, expr): """Returns the function definition code as a string.""" from sympy import Dummy funcbody = [] if not iterable(args): args = [args] argstrs, expr = self._preprocess(args, expr) # Generate argument unpacking and final argument list funcargs = [] unpackings = [] for argstr in argstrs: if iterable(argstr): funcargs.append(self._argrepr(Dummy())) unpackings.extend(self._print_unpacking(argstr, funcargs[-1])) else: funcargs.append(argstr) funcsig = 'def {}({}):'.format(funcname, ', '.join(funcargs)) # Wrap input arguments before unpacking funcbody.extend(self._print_funcargwrapping(funcargs)) funcbody.extend(unpackings) funcbody.append('return ({})'.format(self._exprrepr(expr))) funclines = [funcsig] funclines.extend(' ' + line for line in funcbody) return '\n'.join(funclines) + '\n' @classmethod def _is_safe_ident(cls, ident): return isinstance(ident, str) and ident.isidentifier() \ and not keyword.iskeyword(ident) def _preprocess(self, args, expr): """Preprocess args, expr to replace arguments that do not map to valid Python identifiers. Returns string form of args, and updated expr. """ from sympy import Dummy, Function, flatten, Derivative, ordered, Basic from sympy.matrices import DeferredVector from sympy.core.symbol import uniquely_named_symbol from sympy.core.expr import Expr # Args of type Dummy can cause name collisions with args # of type Symbol. Force dummify of everything in this # situation. dummify = self._dummify or any( isinstance(arg, Dummy) for arg in flatten(args)) argstrs = [None]*len(args) for arg, i in reversed(list(ordered(zip(args, range(len(args)))))): if iterable(arg): s, expr = self._preprocess(arg, expr) elif isinstance(arg, DeferredVector): s = str(arg) elif isinstance(arg, Basic) and arg.is_symbol: s = self._argrepr(arg) if dummify or not self._is_safe_ident(s): dummy = Dummy() if isinstance(expr, Expr): dummy = uniquely_named_symbol( dummy.name, expr, modify=lambda s: '_' + s) s = self._argrepr(dummy) expr = self._subexpr(expr, {arg: dummy}) elif dummify or isinstance(arg, (Function, Derivative)): dummy = Dummy() s = self._argrepr(dummy) expr = self._subexpr(expr, {arg: dummy}) else: s = str(arg) argstrs[i] = s return argstrs, expr def _subexpr(self, expr, dummies_dict): from sympy.matrices import DeferredVector from sympy import sympify expr = sympify(expr) xreplace = getattr(expr, 'xreplace', None) if xreplace is not None: expr = xreplace(dummies_dict) else: if isinstance(expr, DeferredVector): pass elif isinstance(expr, dict): k = [self._subexpr(sympify(a), dummies_dict) for a in expr.keys()] v = [self._subexpr(sympify(a), dummies_dict) for a in expr.values()] expr = dict(zip(k, v)) elif isinstance(expr, tuple): expr = tuple(self._subexpr(sympify(a), dummies_dict) for a in expr) elif isinstance(expr, list): expr = [self._subexpr(sympify(a), dummies_dict) for a in expr] return expr def _print_funcargwrapping(self, args): """Generate argument wrapping code. args is the argument list of the generated function (strings). Return value is a list of lines of code that will be inserted at the beginning of the function definition. """ return [] def _print_unpacking(self, unpackto, arg): """Generate argument unpacking code. arg is the function argument to be unpacked (a string), and unpackto is a list or nested lists of the variable names (strings) to unpack to. """ def unpack_lhs(lvalues): return '[{}]'.format(', '.join( unpack_lhs(val) if iterable(val) else val for val in lvalues)) return ['{} = {}'.format(unpack_lhs(unpackto), arg)] class _TensorflowEvaluatorPrinter(_EvaluatorPrinter): def _print_unpacking(self, lvalues, rvalue): """Generate argument unpacking code. This method is used when the input value is not interable, but can be indexed (see issue #14655). """ from sympy import flatten def flat_indexes(elems): n = 0 for el in elems: if iterable(el): for ndeep in flat_indexes(el): yield (n,) + ndeep else: yield (n,) n += 1 indexed = ', '.join('{}[{}]'.format(rvalue, ']['.join(map(str, ind))) for ind in flat_indexes(lvalues)) return ['[{}] = [{}]'.format(', '.join(flatten(lvalues)), indexed)] def _imp_namespace(expr, namespace=None): """ Return namespace dict with function implementations We need to search for functions in anything that can be thrown at us - that is - anything that could be passed as ``expr``. Examples include sympy expressions, as well as tuples, lists and dicts that may contain sympy expressions. Parameters ---------- expr : object Something passed to lambdify, that will generate valid code from ``str(expr)``. namespace : None or mapping Namespace to fill. None results in new empty dict Returns ------- namespace : dict dict with keys of implemented function names within ``expr`` and corresponding values being the numerical implementation of function Examples ======== >>> from sympy.abc import x >>> from sympy.utilities.lambdify import implemented_function, _imp_namespace >>> from sympy import Function >>> f = implemented_function(Function('f'), lambda x: x+1) >>> g = implemented_function(Function('g'), lambda x: x*10) >>> namespace = _imp_namespace(f(g(x))) >>> sorted(namespace.keys()) ['f', 'g'] """ # Delayed import to avoid circular imports from sympy.core.function import FunctionClass if namespace is None: namespace = {} # tuples, lists, dicts are valid expressions if is_sequence(expr): for arg in expr: _imp_namespace(arg, namespace) return namespace elif isinstance(expr, dict): for key, val in expr.items(): # functions can be in dictionary keys _imp_namespace(key, namespace) _imp_namespace(val, namespace) return namespace # sympy expressions may be Functions themselves func = getattr(expr, 'func', None) if isinstance(func, FunctionClass): imp = getattr(func, '_imp_', None) if imp is not None: name = expr.func.__name__ if name in namespace and namespace[name] != imp: raise ValueError('We found more than one ' 'implementation with name ' '"%s"' % name) namespace[name] = imp # and / or they may take Functions as arguments if hasattr(expr, 'args'): for arg in expr.args: _imp_namespace(arg, namespace) return namespace def implemented_function(symfunc, implementation): """ Add numerical ``implementation`` to function ``symfunc``. ``symfunc`` can be an ``UndefinedFunction`` instance, or a name string. In the latter case we create an ``UndefinedFunction`` instance with that name. Be aware that this is a quick workaround, not a general method to create special symbolic functions. If you want to create a symbolic function to be used by all the machinery of SymPy you should subclass the ``Function`` class. Parameters ---------- symfunc : ``str`` or ``UndefinedFunction`` instance If ``str``, then create new ``UndefinedFunction`` with this as name. If ``symfunc`` is an Undefined function, create a new function with the same name and the implemented function attached. implementation : callable numerical implementation to be called by ``evalf()`` or ``lambdify`` Returns ------- afunc : sympy.FunctionClass instance function with attached implementation Examples ======== >>> from sympy.abc import x >>> from sympy.utilities.lambdify import lambdify, implemented_function >>> f = implemented_function('f', lambda x: x+1) >>> lam_f = lambdify(x, f(x)) >>> lam_f(4) 5 """ # Delayed import to avoid circular imports from sympy.core.function import UndefinedFunction # if name, create function to hold implementation kwargs = {} if isinstance(symfunc, UndefinedFunction): kwargs = symfunc._kwargs symfunc = symfunc.__name__ if isinstance(symfunc, str): # Keyword arguments to UndefinedFunction are added as attributes to # the created class. symfunc = UndefinedFunction( symfunc, _imp_=staticmethod(implementation), **kwargs) elif not isinstance(symfunc, UndefinedFunction): raise ValueError(filldedent(''' symfunc should be either a string or an UndefinedFunction instance.''')) return symfunc
aed5dfbc865112986e7a3d2d50f70a57c1bf84b8bed13a056bf0d37c81b5b19d
""" pkgdata is a simple, extensible way for a package to acquire data file resources. The getResource function is equivalent to the standard idioms, such as the following minimal implementation:: import sys, os def getResource(identifier, pkgname=__name__): pkgpath = os.path.dirname(sys.modules[pkgname].__file__) path = os.path.join(pkgpath, identifier) return open(os.path.normpath(path), mode='rb') When a __loader__ is present on the module given by __name__, it will defer getResource to its get_data implementation and return it as a file-like object (such as StringIO). """ import sys import os from io import StringIO def get_resource(identifier, pkgname=__name__): """ Acquire a readable object for a given package name and identifier. An IOError will be raised if the resource can not be found. For example:: mydata = get_resource('mypkgdata.jpg').read() Note that the package name must be fully qualified, if given, such that it would be found in sys.modules. In some cases, getResource will return a real file object. In that case, it may be useful to use its name attribute to get the path rather than use it as a file-like object. For example, you may be handing data off to a C API. """ mod = sys.modules[pkgname] fn = getattr(mod, '__file__', None) if fn is None: raise OSError("%r has no __file__!") path = os.path.join(os.path.dirname(fn), identifier) loader = getattr(mod, '__loader__', None) if loader is not None: try: data = loader.get_data(path) except (OSError, AttributeError): pass else: return StringIO(data.decode('utf-8')) return open(os.path.normpath(path), 'rb')
bf47ffbd53e4f3315826778c336664a173ba05e698c0b894379cc7f04c6fd061
"""Useful utility decorators. """ import sys import types import inspect from sympy.core.decorators import wraps from sympy.core.compatibility import iterable from sympy.testing.runtests import DependencyError, SymPyDocTests, PyTestReporter def threaded_factory(func, use_add): """A factory for ``threaded`` decorators. """ from sympy.core import sympify from sympy.matrices import MatrixBase @wraps(func) def threaded_func(expr, *args, **kwargs): if isinstance(expr, MatrixBase): return expr.applyfunc(lambda f: func(f, *args, **kwargs)) elif iterable(expr): try: return expr.__class__([func(f, *args, **kwargs) for f in expr]) except TypeError: return expr else: expr = sympify(expr) if use_add and expr.is_Add: return expr.__class__(*[ func(f, *args, **kwargs) for f in expr.args ]) elif expr.is_Relational: return expr.__class__(func(expr.lhs, *args, **kwargs), func(expr.rhs, *args, **kwargs)) else: return func(expr, *args, **kwargs) return threaded_func def threaded(func): """Apply ``func`` to sub--elements of an object, including :class:`~.Add`. This decorator is intended to make it uniformly possible to apply a function to all elements of composite objects, e.g. matrices, lists, tuples and other iterable containers, or just expressions. This version of :func:`threaded` decorator allows threading over elements of :class:`~.Add` class. If this behavior is not desirable use :func:`xthreaded` decorator. Functions using this decorator must have the following signature:: @threaded def function(expr, *args, **kwargs): """ return threaded_factory(func, True) def xthreaded(func): """Apply ``func`` to sub--elements of an object, excluding :class:`~.Add`. This decorator is intended to make it uniformly possible to apply a function to all elements of composite objects, e.g. matrices, lists, tuples and other iterable containers, or just expressions. This version of :func:`threaded` decorator disallows threading over elements of :class:`~.Add` class. If this behavior is not desirable use :func:`threaded` decorator. Functions using this decorator must have the following signature:: @xthreaded def function(expr, *args, **kwargs): """ return threaded_factory(func, False) def conserve_mpmath_dps(func): """After the function finishes, resets the value of mpmath.mp.dps to the value it had before the function was run.""" import functools import mpmath def func_wrapper(*args, **kwargs): dps = mpmath.mp.dps try: return func(*args, **kwargs) finally: mpmath.mp.dps = dps func_wrapper = functools.update_wrapper(func_wrapper, func) return func_wrapper class no_attrs_in_subclass: """Don't 'inherit' certain attributes from a base class >>> from sympy.utilities.decorator import no_attrs_in_subclass >>> class A(object): ... x = 'test' >>> A.x = no_attrs_in_subclass(A, A.x) >>> class B(A): ... pass >>> hasattr(A, 'x') True >>> hasattr(B, 'x') False """ def __init__(self, cls, f): self.cls = cls self.f = f def __get__(self, instance, owner=None): if owner == self.cls: if hasattr(self.f, '__get__'): return self.f.__get__(instance, owner) return self.f raise AttributeError def doctest_depends_on(exe=None, modules=None, disable_viewers=None, python_version=None): """ Adds metadata about the dependencies which need to be met for doctesting the docstrings of the decorated objects. exe should be a list of executables modules should be a list of modules disable_viewers should be a list of viewers for preview() to disable python_version should be the minimum Python version required, as a tuple (like (3, 0)) """ dependencies = {} if exe is not None: dependencies['executables'] = exe if modules is not None: dependencies['modules'] = modules if disable_viewers is not None: dependencies['disable_viewers'] = disable_viewers if python_version is not None: dependencies['python_version'] = python_version def skiptests(): r = PyTestReporter() t = SymPyDocTests(r, None) try: t._check_dependencies(**dependencies) except DependencyError: return True # Skip doctests else: return False # Run doctests def depends_on_deco(fn): fn._doctest_depends_on = dependencies fn.__doctest_skip__ = skiptests if inspect.isclass(fn): fn._doctest_depdends_on = no_attrs_in_subclass( fn, fn._doctest_depends_on) fn.__doctest_skip__ = no_attrs_in_subclass( fn, fn.__doctest_skip__) return fn return depends_on_deco def public(obj): """ Append ``obj``'s name to global ``__all__`` variable (call site). By using this decorator on functions or classes you achieve the same goal as by filling ``__all__`` variables manually, you just don't have to repeat yourself (object's name). You also know if object is public at definition site, not at some random location (where ``__all__`` was set). Note that in multiple decorator setup (in almost all cases) ``@public`` decorator must be applied before any other decorators, because it relies on the pointer to object's global namespace. If you apply other decorators first, ``@public`` may end up modifying the wrong namespace. Examples ======== >>> from sympy.utilities.decorator import public >>> __all__ # noqa: F821 Traceback (most recent call last): ... NameError: name '__all__' is not defined >>> @public ... def some_function(): ... pass >>> __all__ # noqa: F821 ['some_function'] """ if isinstance(obj, types.FunctionType): ns = obj.__globals__ name = obj.__name__ elif isinstance(obj, (type(type), type)): ns = sys.modules[obj.__module__].__dict__ name = obj.__name__ else: raise TypeError("expected a function or a class, got %s" % obj) if "__all__" not in ns: ns["__all__"] = [name] else: ns["__all__"].append(name) return obj def memoize_property(propfunc): """Property decorator that caches the value of potentially expensive `propfunc` after the first evaluation. The cached value is stored in the corresponding property name with an attached underscore.""" attrname = '_' + propfunc.__name__ sentinel = object() @wraps(propfunc) def accessor(self): val = getattr(self, attrname, sentinel) if val is sentinel: val = propfunc(self) setattr(self, attrname, val) return val return property(accessor)
845dc59754a6fd112390507caff141e8ec60cff4f5724c3214cfc35f49a22394
""" The objects in this module allow the usage of the MatchPy pattern matching library on SymPy expressions. """ from sympy.external import import_module from sympy.functions import (log, sin, cos, tan, cot, csc, sec, erf, gamma, uppergamma) from sympy.functions.elementary.hyperbolic import acosh, asinh, atanh, acoth, acsch, asech, cosh, sinh, tanh, coth, sech, csch from sympy.functions.elementary.trigonometric import atan, acsc, asin, acot, acos, asec from sympy.functions.special.error_functions import fresnelc, fresnels, erfc, erfi, Ei from sympy import (Basic, Mul, Add, Pow, Integral, exp, Symbol) from sympy.utilities.decorator import doctest_depends_on matchpy = import_module("matchpy") if matchpy: from matchpy import Operation, CommutativeOperation, AssociativeOperation, OneIdentityOperation from matchpy.expressions.functions import op_iter, create_operation_expression, op_len Operation.register(Integral) Operation.register(Pow) OneIdentityOperation.register(Pow) Operation.register(Add) OneIdentityOperation.register(Add) CommutativeOperation.register(Add) AssociativeOperation.register(Add) Operation.register(Mul) OneIdentityOperation.register(Mul) CommutativeOperation.register(Mul) AssociativeOperation.register(Mul) Operation.register(exp) Operation.register(log) Operation.register(gamma) Operation.register(uppergamma) Operation.register(fresnels) Operation.register(fresnelc) Operation.register(erf) Operation.register(Ei) Operation.register(erfc) Operation.register(erfi) Operation.register(sin) Operation.register(cos) Operation.register(tan) Operation.register(cot) Operation.register(csc) Operation.register(sec) Operation.register(sinh) Operation.register(cosh) Operation.register(tanh) Operation.register(coth) Operation.register(csch) Operation.register(sech) Operation.register(asin) Operation.register(acos) Operation.register(atan) Operation.register(acot) Operation.register(acsc) Operation.register(asec) Operation.register(asinh) Operation.register(acosh) Operation.register(atanh) Operation.register(acoth) Operation.register(acsch) Operation.register(asech) @op_iter.register(Integral) # type: ignore def _(operation): return iter((operation._args[0],) + operation._args[1]) @op_iter.register(Basic) # type: ignore def _(operation): return iter(operation._args) @op_len.register(Integral) # type: ignore def _(operation): return 1 + len(operation._args[1]) @op_len.register(Basic) # type: ignore def _(operation): return len(operation._args) @create_operation_expression.register(Basic) def sympy_op_factory(old_operation, new_operands, variable_name=True): return type(old_operation)(*new_operands) if matchpy: from matchpy import Wildcard else: class Wildcard: def __init__(self, min_length, fixed_size, variable_name, optional): pass @doctest_depends_on(modules=('matchpy',)) class _WildAbstract(Wildcard, Symbol): min_length = None # abstract field required in subclasses fixed_size = None # abstract field required in subclasses def __init__(self, variable_name=None, optional=None, **assumptions): min_length = self.min_length fixed_size = self.fixed_size Wildcard.__init__(self, min_length, fixed_size, str(variable_name), optional) def __new__(cls, variable_name=None, optional=None, **assumptions): cls._sanitize(assumptions, cls) return _WildAbstract.__xnew__(cls, variable_name, optional, **assumptions) def __getnewargs__(self): return self.min_count, self.fixed_size, self.variable_name, self.optional @staticmethod def __xnew__(cls, variable_name=None, optional=None, **assumptions): obj = Symbol.__xnew__(cls, variable_name, **assumptions) return obj def _hashable_content(self): if self.optional: return super()._hashable_content() + (self.min_count, self.fixed_size, self.variable_name, self.optional) else: return super()._hashable_content() + (self.min_count, self.fixed_size, self.variable_name) def __copy__(self) -> '_WildAbstract': return type(self)(variable_name=self.variable_name, optional=self.optional) def __repr__(self): return str(self) @doctest_depends_on(modules=('matchpy',)) class WildDot(_WildAbstract): min_length = 1 fixed_size = True @doctest_depends_on(modules=('matchpy',)) class WildPlus(_WildAbstract): min_length = 1 fixed_size = False @doctest_depends_on(modules=('matchpy',)) class WildStar(_WildAbstract): min_length = 0 fixed_size = False
670f2775419b2073105d220482d496e9d7d21888cb8310ba410fdfc9c5468b15
from collections import defaultdict, OrderedDict from itertools import ( combinations, combinations_with_replacement, permutations, product, product as cartes ) import random from operator import gt from sympy.core import Basic # this is the logical location of these functions from sympy.core.compatibility import (as_int, is_sequence, iterable, ordered) from sympy.core.compatibility import default_sort_key # noqa: F401 import sympy from sympy.utilities.enumerative import ( multiset_partitions_taocp, list_visitor, MultisetPartitionTraverser) def is_palindromic(s, i=0, j=None): """return True if the sequence is the same from left to right as it is from right to left in the whole sequence (default) or in the Python slice ``s[i: j]``; else False. Examples ======== >>> from sympy.utilities.iterables import is_palindromic >>> is_palindromic([1, 0, 1]) True >>> is_palindromic('abcbb') False >>> is_palindromic('abcbb', 1) False Normal Python slicing is performed in place so there is no need to create a slice of the sequence for testing: >>> is_palindromic('abcbb', 1, -1) True >>> is_palindromic('abcbb', -4, -1) True See Also ======== sympy.ntheory.digits.is_palindromic: tests integers """ i, j, _ = slice(i, j).indices(len(s)) m = (j - i)//2 # if length is odd, middle element will be ignored return all(s[i + k] == s[j - 1 - k] for k in range(m)) def flatten(iterable, levels=None, cls=None): """ Recursively denest iterable containers. >>> from sympy.utilities.iterables import flatten >>> flatten([1, 2, 3]) [1, 2, 3] >>> flatten([1, 2, [3]]) [1, 2, 3] >>> flatten([1, [2, 3], [4, 5]]) [1, 2, 3, 4, 5] >>> flatten([1.0, 2, (1, None)]) [1.0, 2, 1, None] If you want to denest only a specified number of levels of nested containers, then set ``levels`` flag to the desired number of levels:: >>> ls = [[(-2, -1), (1, 2)], [(0, 0)]] >>> flatten(ls, levels=1) [(-2, -1), (1, 2), (0, 0)] If cls argument is specified, it will only flatten instances of that class, for example: >>> from sympy.core import Basic >>> class MyOp(Basic): ... pass ... >>> flatten([MyOp(1, MyOp(2, 3))], cls=MyOp) [1, 2, 3] adapted from https://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks """ from sympy.tensor.array import NDimArray if levels is not None: if not levels: return iterable elif levels > 0: levels -= 1 else: raise ValueError( "expected non-negative number of levels, got %s" % levels) if cls is None: reducible = lambda x: is_sequence(x, set) else: reducible = lambda x: isinstance(x, cls) result = [] for el in iterable: if reducible(el): if hasattr(el, 'args') and not isinstance(el, NDimArray): el = el.args result.extend(flatten(el, levels=levels, cls=cls)) else: result.append(el) return result def unflatten(iter, n=2): """Group ``iter`` into tuples of length ``n``. Raise an error if the length of ``iter`` is not a multiple of ``n``. """ if n < 1 or len(iter) % n: raise ValueError('iter length is not a multiple of %i' % n) return list(zip(*(iter[i::n] for i in range(n)))) def reshape(seq, how): """Reshape the sequence according to the template in ``how``. Examples ======== >>> from sympy.utilities import reshape >>> seq = list(range(1, 9)) >>> reshape(seq, [4]) # lists of 4 [[1, 2, 3, 4], [5, 6, 7, 8]] >>> reshape(seq, (4,)) # tuples of 4 [(1, 2, 3, 4), (5, 6, 7, 8)] >>> reshape(seq, (2, 2)) # tuples of 4 [(1, 2, 3, 4), (5, 6, 7, 8)] >>> reshape(seq, (2, [2])) # (i, i, [i, i]) [(1, 2, [3, 4]), (5, 6, [7, 8])] >>> reshape(seq, ((2,), [2])) # etc.... [((1, 2), [3, 4]), ((5, 6), [7, 8])] >>> reshape(seq, (1, [2], 1)) [(1, [2, 3], 4), (5, [6, 7], 8)] >>> reshape(tuple(seq), ([[1], 1, (2,)],)) (([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],)) >>> reshape(tuple(seq), ([1], 1, (2,))) (([1], 2, (3, 4)), ([5], 6, (7, 8))) >>> reshape(list(range(12)), [2, [3], {2}, (1, (3,), 1)]) [[0, 1, [2, 3, 4], {5, 6}, (7, (8, 9, 10), 11)]] """ m = sum(flatten(how)) n, rem = divmod(len(seq), m) if m < 0 or rem: raise ValueError('template must sum to positive number ' 'that divides the length of the sequence') i = 0 container = type(how) rv = [None]*n for k in range(len(rv)): rv[k] = [] for hi in how: if type(hi) is int: rv[k].extend(seq[i: i + hi]) i += hi else: n = sum(flatten(hi)) hi_type = type(hi) rv[k].append(hi_type(reshape(seq[i: i + n], hi)[0])) i += n rv[k] = container(rv[k]) return type(seq)(rv) def group(seq, multiple=True): """ Splits a sequence into a list of lists of equal, adjacent elements. Examples ======== >>> from sympy.utilities.iterables import group >>> group([1, 1, 1, 2, 2, 3]) [[1, 1, 1], [2, 2], [3]] >>> group([1, 1, 1, 2, 2, 3], multiple=False) [(1, 3), (2, 2), (3, 1)] >>> group([1, 1, 3, 2, 2, 1], multiple=False) [(1, 2), (3, 1), (2, 2), (1, 1)] See Also ======== multiset """ if not seq: return [] current, groups = [seq[0]], [] for elem in seq[1:]: if elem == current[-1]: current.append(elem) else: groups.append(current) current = [elem] groups.append(current) if multiple: return groups for i, current in enumerate(groups): groups[i] = (current[0], len(current)) return groups def _iproduct2(iterable1, iterable2): '''Cartesian product of two possibly infinite iterables''' it1 = iter(iterable1) it2 = iter(iterable2) elems1 = [] elems2 = [] sentinel = object() def append(it, elems): e = next(it, sentinel) if e is not sentinel: elems.append(e) n = 0 append(it1, elems1) append(it2, elems2) while n <= len(elems1) + len(elems2): for m in range(n-len(elems1)+1, len(elems2)): yield (elems1[n-m], elems2[m]) n += 1 append(it1, elems1) append(it2, elems2) def iproduct(*iterables): ''' Cartesian product of iterables. Generator of the cartesian product of iterables. This is analogous to itertools.product except that it works with infinite iterables and will yield any item from the infinite product eventually. Examples ======== >>> from sympy.utilities.iterables import iproduct >>> sorted(iproduct([1,2], [3,4])) [(1, 3), (1, 4), (2, 3), (2, 4)] With an infinite iterator: >>> from sympy import S >>> (3,) in iproduct(S.Integers) True >>> (3, 4) in iproduct(S.Integers, S.Integers) True .. seealso:: `itertools.product <https://docs.python.org/3/library/itertools.html#itertools.product>`_ ''' if len(iterables) == 0: yield () return elif len(iterables) == 1: for e in iterables[0]: yield (e,) elif len(iterables) == 2: yield from _iproduct2(*iterables) else: first, others = iterables[0], iterables[1:] for ef, eo in _iproduct2(first, iproduct(*others)): yield (ef,) + eo def multiset(seq): """Return the hashable sequence in multiset form with values being the multiplicity of the item in the sequence. Examples ======== >>> from sympy.utilities.iterables import multiset >>> multiset('mississippi') {'i': 4, 'm': 1, 'p': 2, 's': 4} See Also ======== group """ rv = defaultdict(int) for s in seq: rv[s] += 1 return dict(rv) def postorder_traversal(node, keys=None): """ Do a postorder traversal of a tree. This generator recursively yields nodes that it has visited in a postorder fashion. That is, it descends through the tree depth-first to yield all of a node's children's postorder traversal before yielding the node itself. Parameters ========== node : sympy expression The expression to traverse. keys : (default None) sort key(s) The key(s) used to sort args of Basic objects. When None, args of Basic objects are processed in arbitrary order. If key is defined, it will be passed along to ordered() as the only key(s) to use to sort the arguments; if ``key`` is simply True then the default keys of ``ordered`` will be used (node count and default_sort_key). Yields ====== subtree : sympy expression All of the subtrees in the tree. Examples ======== >>> from sympy.utilities.iterables import postorder_traversal >>> from sympy.abc import w, x, y, z The nodes are returned in the order that they are encountered unless key is given; simply passing key=True will guarantee that the traversal is unique. >>> list(postorder_traversal(w + (x + y)*z)) # doctest: +SKIP [z, y, x, x + y, z*(x + y), w, w + z*(x + y)] >>> list(postorder_traversal(w + (x + y)*z, keys=True)) [w, z, x, y, x + y, z*(x + y), w + z*(x + y)] """ if isinstance(node, Basic): args = node.args if keys: if keys != True: args = ordered(args, keys, default=False) else: args = ordered(args) for arg in args: yield from postorder_traversal(arg, keys) elif iterable(node): for item in node: yield from postorder_traversal(item, keys) yield node def interactive_traversal(expr): """Traverse a tree asking a user which branch to choose. """ from sympy.printing import pprint RED, BRED = '\033[0;31m', '\033[1;31m' GREEN, BGREEN = '\033[0;32m', '\033[1;32m' YELLOW, BYELLOW = '\033[0;33m', '\033[1;33m' # noqa BLUE, BBLUE = '\033[0;34m', '\033[1;34m' # noqa MAGENTA, BMAGENTA = '\033[0;35m', '\033[1;35m'# noqa CYAN, BCYAN = '\033[0;36m', '\033[1;36m' # noqa END = '\033[0m' def cprint(*args): print("".join(map(str, args)) + END) def _interactive_traversal(expr, stage): if stage > 0: print() cprint("Current expression (stage ", BYELLOW, stage, END, "):") print(BCYAN) pprint(expr) print(END) if isinstance(expr, Basic): if expr.is_Add: args = expr.as_ordered_terms() elif expr.is_Mul: args = expr.as_ordered_factors() else: args = expr.args elif hasattr(expr, "__iter__"): args = list(expr) else: return expr n_args = len(args) if not n_args: return expr for i, arg in enumerate(args): cprint(GREEN, "[", BGREEN, i, GREEN, "] ", BLUE, type(arg), END) pprint(arg) print() if n_args == 1: choices = '0' else: choices = '0-%d' % (n_args - 1) try: choice = input("Your choice [%s,f,l,r,d,?]: " % choices) except EOFError: result = expr print() else: if choice == '?': cprint(RED, "%s - select subexpression with the given index" % choices) cprint(RED, "f - select the first subexpression") cprint(RED, "l - select the last subexpression") cprint(RED, "r - select a random subexpression") cprint(RED, "d - done\n") result = _interactive_traversal(expr, stage) elif choice in ['d', '']: result = expr elif choice == 'f': result = _interactive_traversal(args[0], stage + 1) elif choice == 'l': result = _interactive_traversal(args[-1], stage + 1) elif choice == 'r': result = _interactive_traversal(random.choice(args), stage + 1) else: try: choice = int(choice) except ValueError: cprint(BRED, "Choice must be a number in %s range\n" % choices) result = _interactive_traversal(expr, stage) else: if choice < 0 or choice >= n_args: cprint(BRED, "Choice must be in %s range\n" % choices) result = _interactive_traversal(expr, stage) else: result = _interactive_traversal(args[choice], stage + 1) return result return _interactive_traversal(expr, 0) def ibin(n, bits=None, str=False): """Return a list of length ``bits`` corresponding to the binary value of ``n`` with small bits to the right (last). If bits is omitted, the length will be the number required to represent ``n``. If the bits are desired in reversed order, use the ``[::-1]`` slice of the returned list. If a sequence of all bits-length lists starting from ``[0, 0,..., 0]`` through ``[1, 1, ..., 1]`` are desired, pass a non-integer for bits, e.g. ``'all'``. If the bit *string* is desired pass ``str=True``. Examples ======== >>> from sympy.utilities.iterables import ibin >>> ibin(2) [1, 0] >>> ibin(2, 4) [0, 0, 1, 0] If all lists corresponding to 0 to 2**n - 1, pass a non-integer for bits: >>> bits = 2 >>> for i in ibin(2, 'all'): ... print(i) (0, 0) (0, 1) (1, 0) (1, 1) If a bit string is desired of a given length, use str=True: >>> n = 123 >>> bits = 10 >>> ibin(n, bits, str=True) '0001111011' >>> ibin(n, bits, str=True)[::-1] # small bits left '1101111000' >>> list(ibin(3, 'all', str=True)) ['000', '001', '010', '011', '100', '101', '110', '111'] """ if n < 0: raise ValueError("negative numbers are not allowed") n = as_int(n) if bits is None: bits = 0 else: try: bits = as_int(bits) except ValueError: bits = -1 else: if n.bit_length() > bits: raise ValueError( "`bits` must be >= {}".format(n.bit_length())) if not str: if bits >= 0: return [1 if i == "1" else 0 for i in bin(n)[2:].rjust(bits, "0")] else: return variations(list(range(2)), n, repetition=True) else: if bits >= 0: return bin(n)[2:].rjust(bits, "0") else: return (bin(i)[2:].rjust(n, "0") for i in range(2**n)) def variations(seq, n, repetition=False): r"""Returns a generator of the n-sized variations of ``seq`` (size N). ``repetition`` controls whether items in ``seq`` can appear more than once; Examples ======== ``variations(seq, n)`` will return `\frac{N!}{(N - n)!}` permutations without repetition of ``seq``'s elements: >>> from sympy.utilities.iterables import variations >>> list(variations([1, 2], 2)) [(1, 2), (2, 1)] ``variations(seq, n, True)`` will return the `N^n` permutations obtained by allowing repetition of elements: >>> list(variations([1, 2], 2, repetition=True)) [(1, 1), (1, 2), (2, 1), (2, 2)] If you ask for more items than are in the set you get the empty set unless you allow repetitions: >>> list(variations([0, 1], 3, repetition=False)) [] >>> list(variations([0, 1], 3, repetition=True))[:4] [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)] .. seealso:: `itertools.permutations <https://docs.python.org/3/library/itertools.html#itertools.permutations>`_, `itertools.product <https://docs.python.org/3/library/itertools.html#itertools.product>`_ """ if not repetition: seq = tuple(seq) if len(seq) < n: return yield from permutations(seq, n) else: if n == 0: yield () else: yield from product(seq, repeat=n) def subsets(seq, k=None, repetition=False): r"""Generates all `k`-subsets (combinations) from an `n`-element set, ``seq``. A `k`-subset of an `n`-element set is any subset of length exactly `k`. The number of `k`-subsets of an `n`-element set is given by ``binomial(n, k)``, whereas there are `2^n` subsets all together. If `k` is ``None`` then all `2^n` subsets will be returned from shortest to longest. Examples ======== >>> from sympy.utilities.iterables import subsets ``subsets(seq, k)`` will return the `\frac{n!}{k!(n - k)!}` `k`-subsets (combinations) without repetition, i.e. once an item has been removed, it can no longer be "taken": >>> list(subsets([1, 2], 2)) [(1, 2)] >>> list(subsets([1, 2])) [(), (1,), (2,), (1, 2)] >>> list(subsets([1, 2, 3], 2)) [(1, 2), (1, 3), (2, 3)] ``subsets(seq, k, repetition=True)`` will return the `\frac{(n - 1 + k)!}{k!(n - 1)!}` combinations *with* repetition: >>> list(subsets([1, 2], 2, repetition=True)) [(1, 1), (1, 2), (2, 2)] If you ask for more items than are in the set you get the empty set unless you allow repetitions: >>> list(subsets([0, 1], 3, repetition=False)) [] >>> list(subsets([0, 1], 3, repetition=True)) [(0, 0, 0), (0, 0, 1), (0, 1, 1), (1, 1, 1)] """ if k is None: for k in range(len(seq) + 1): yield from subsets(seq, k, repetition) else: if not repetition: yield from combinations(seq, k) else: yield from combinations_with_replacement(seq, k) def filter_symbols(iterator, exclude): """ Only yield elements from `iterator` that do not occur in `exclude`. Parameters ========== iterator : iterable iterator to take elements from exclude : iterable elements to exclude Returns ======= iterator : iterator filtered iterator """ exclude = set(exclude) for s in iterator: if s not in exclude: yield s def numbered_symbols(prefix='x', cls=None, start=0, exclude=[], *args, **assumptions): """ Generate an infinite stream of Symbols consisting of a prefix and increasing subscripts provided that they do not occur in ``exclude``. Parameters ========== prefix : str, optional The prefix to use. By default, this function will generate symbols of the form "x0", "x1", etc. cls : class, optional The class to use. By default, it uses ``Symbol``, but you can also use ``Wild`` or ``Dummy``. start : int, optional The start number. By default, it is 0. Returns ======= sym : Symbol The subscripted symbols. """ exclude = set(exclude or []) if cls is None: # We can't just make the default cls=Symbol because it isn't # imported yet. from sympy import Symbol cls = Symbol while True: name = '%s%s' % (prefix, start) s = cls(name, *args, **assumptions) if s not in exclude: yield s start += 1 def capture(func): """Return the printed output of func(). ``func`` should be a function without arguments that produces output with print statements. >>> from sympy.utilities.iterables import capture >>> from sympy import pprint >>> from sympy.abc import x >>> def foo(): ... print('hello world!') ... >>> 'hello' in capture(foo) # foo, not foo() True >>> capture(lambda: pprint(2/x)) '2\\n-\\nx\\n' """ from io import StringIO import sys stdout = sys.stdout sys.stdout = file = StringIO() try: func() finally: sys.stdout = stdout return file.getvalue() def sift(seq, keyfunc, binary=False): """ Sift the sequence, ``seq`` according to ``keyfunc``. Returns ======= When ``binary`` is ``False`` (default), the output is a dictionary where elements of ``seq`` are stored in a list keyed to the value of keyfunc for that element. If ``binary`` is True then a tuple with lists ``T`` and ``F`` are returned where ``T`` is a list containing elements of seq for which ``keyfunc`` was ``True`` and ``F`` containing those elements for which ``keyfunc`` was ``False``; a ValueError is raised if the ``keyfunc`` is not binary. Examples ======== >>> from sympy.utilities import sift >>> from sympy.abc import x, y >>> from sympy import sqrt, exp, pi, Tuple >>> sift(range(5), lambda x: x % 2) {0: [0, 2, 4], 1: [1, 3]} sift() returns a defaultdict() object, so any key that has no matches will give []. >>> sift([x], lambda x: x.is_commutative) {True: [x]} >>> _[False] [] Sometimes you will not know how many keys you will get: >>> sift([sqrt(x), exp(x), (y**x)**2], ... lambda x: x.as_base_exp()[0]) {E: [exp(x)], x: [sqrt(x)], y: [y**(2*x)]} Sometimes you expect the results to be binary; the results can be unpacked by setting ``binary`` to True: >>> sift(range(4), lambda x: x % 2, binary=True) ([1, 3], [0, 2]) >>> sift(Tuple(1, pi), lambda x: x.is_rational, binary=True) ([1], [pi]) A ValueError is raised if the predicate was not actually binary (which is a good test for the logic where sifting is used and binary results were expected): >>> unknown = exp(1) - pi # the rationality of this is unknown >>> args = Tuple(1, pi, unknown) >>> sift(args, lambda x: x.is_rational, binary=True) Traceback (most recent call last): ... ValueError: keyfunc gave non-binary output The non-binary sifting shows that there were 3 keys generated: >>> set(sift(args, lambda x: x.is_rational).keys()) {None, False, True} If you need to sort the sifted items it might be better to use ``ordered`` which can economically apply multiple sort keys to a sequence while sorting. See Also ======== ordered """ if not binary: m = defaultdict(list) for i in seq: m[keyfunc(i)].append(i) return m sift = F, T = [], [] for i in seq: try: sift[keyfunc(i)].append(i) except (IndexError, TypeError): raise ValueError('keyfunc gave non-binary output') return T, F def take(iter, n): """Return ``n`` items from ``iter`` iterator. """ return [ value for _, value in zip(range(n), iter) ] def dict_merge(*dicts): """Merge dictionaries into a single dictionary. """ merged = {} for dict in dicts: merged.update(dict) return merged def common_prefix(*seqs): """Return the subsequence that is a common start of sequences in ``seqs``. >>> from sympy.utilities.iterables import common_prefix >>> common_prefix(list(range(3))) [0, 1, 2] >>> common_prefix(list(range(3)), list(range(4))) [0, 1, 2] >>> common_prefix([1, 2, 3], [1, 2, 5]) [1, 2] >>> common_prefix([1, 2, 3], [1, 3, 5]) [1] """ if any(not s for s in seqs): return [] elif len(seqs) == 1: return seqs[0] i = 0 for i in range(min(len(s) for s in seqs)): if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))): break else: i += 1 return seqs[0][:i] def common_suffix(*seqs): """Return the subsequence that is a common ending of sequences in ``seqs``. >>> from sympy.utilities.iterables import common_suffix >>> common_suffix(list(range(3))) [0, 1, 2] >>> common_suffix(list(range(3)), list(range(4))) [] >>> common_suffix([1, 2, 3], [9, 2, 3]) [2, 3] >>> common_suffix([1, 2, 3], [9, 7, 3]) [3] """ if any(not s for s in seqs): return [] elif len(seqs) == 1: return seqs[0] i = 0 for i in range(-1, -min(len(s) for s in seqs) - 1, -1): if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))): break else: i -= 1 if i == -1: return [] else: return seqs[0][i + 1:] def prefixes(seq): """ Generate all prefixes of a sequence. Examples ======== >>> from sympy.utilities.iterables import prefixes >>> list(prefixes([1,2,3,4])) [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4]] """ n = len(seq) for i in range(n): yield seq[:i + 1] def postfixes(seq): """ Generate all postfixes of a sequence. Examples ======== >>> from sympy.utilities.iterables import postfixes >>> list(postfixes([1,2,3,4])) [[4], [3, 4], [2, 3, 4], [1, 2, 3, 4]] """ n = len(seq) for i in range(n): yield seq[n - i - 1:] def topological_sort(graph, key=None): r""" Topological sort of graph's vertices. Parameters ========== graph : tuple[list, list[tuple[T, T]] A tuple consisting of a list of vertices and a list of edges of a graph to be sorted topologically. key : callable[T] (optional) Ordering key for vertices on the same level. By default the natural (e.g. lexicographic) ordering is used (in this case the base type must implement ordering relations). Examples ======== Consider a graph:: +---+ +---+ +---+ | 7 |\ | 5 | | 3 | +---+ \ +---+ +---+ | _\___/ ____ _/ | | / \___/ \ / | V V V V | +----+ +---+ | | 11 | | 8 | | +----+ +---+ | | | \____ ___/ _ | | \ \ / / \ | V \ V V / V V +---+ \ +---+ | +----+ | 2 | | | 9 | | | 10 | +---+ | +---+ | +----+ \________/ where vertices are integers. This graph can be encoded using elementary Python's data structures as follows:: >>> V = [2, 3, 5, 7, 8, 9, 10, 11] >>> E = [(7, 11), (7, 8), (5, 11), (3, 8), (3, 10), ... (11, 2), (11, 9), (11, 10), (8, 9)] To compute a topological sort for graph ``(V, E)`` issue:: >>> from sympy.utilities.iterables import topological_sort >>> topological_sort((V, E)) [3, 5, 7, 8, 11, 2, 9, 10] If specific tie breaking approach is needed, use ``key`` parameter:: >>> topological_sort((V, E), key=lambda v: -v) [7, 5, 11, 3, 10, 8, 9, 2] Only acyclic graphs can be sorted. If the input graph has a cycle, then ``ValueError`` will be raised:: >>> topological_sort((V, E + [(10, 7)])) Traceback (most recent call last): ... ValueError: cycle detected References ========== .. [1] https://en.wikipedia.org/wiki/Topological_sorting """ V, E = graph L = [] S = set(V) E = list(E) for v, u in E: S.discard(u) if key is None: key = lambda value: value S = sorted(S, key=key, reverse=True) while S: node = S.pop() L.append(node) for u, v in list(E): if u == node: E.remove((u, v)) for _u, _v in E: if v == _v: break else: kv = key(v) for i, s in enumerate(S): ks = key(s) if kv > ks: S.insert(i, v) break else: S.append(v) if E: raise ValueError("cycle detected") else: return L def strongly_connected_components(G): r""" Strongly connected components of a directed graph in reverse topological order. Parameters ========== graph : tuple[list, list[tuple[T, T]] A tuple consisting of a list of vertices and a list of edges of a graph whose strongly connected components are to be found. Examples ======== Consider a directed graph (in dot notation):: digraph { A -> B A -> C B -> C C -> B B -> D } where vertices are the letters A, B, C and D. This graph can be encoded using Python's elementary data structures as follows:: >>> V = ['A', 'B', 'C', 'D'] >>> E = [('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B'), ('B', 'D')] The strongly connected components of this graph can be computed as >>> from sympy.utilities.iterables import strongly_connected_components >>> strongly_connected_components((V, E)) [['D'], ['B', 'C'], ['A']] This also gives the components in reverse topological order. Since the subgraph containing B and C has a cycle they must be together in a strongly connected component. A and D are connected to the rest of the graph but not in a cyclic manner so they appear as their own strongly connected components. Notes ===== The vertices of the graph must be hashable for the data structures used. If the vertices are unhashable replace them with integer indices. This function uses Tarjan's algorithm to compute the strongly connected components in `O(|V|+|E|)` (linear) time. References ========== .. [1] https://en.wikipedia.org/wiki/Strongly_connected_component .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm See Also ======== sympy.utilities.iterables.connected_components """ # Map from a vertex to its neighbours V, E = G Gmap = {vi: [] for vi in V} for v1, v2 in E: Gmap[v1].append(v2) # Non-recursive Tarjan's algorithm: lowlink = {} indices = {} stack = OrderedDict() callstack = [] components = [] nomore = object() def start(v): index = len(stack) indices[v] = lowlink[v] = index stack[v] = None callstack.append((v, iter(Gmap[v]))) def finish(v1): # Finished a component? if lowlink[v1] == indices[v1]: component = [stack.popitem()[0]] while component[-1] is not v1: component.append(stack.popitem()[0]) components.append(component[::-1]) v2, _ = callstack.pop() if callstack: v1, _ = callstack[-1] lowlink[v1] = min(lowlink[v1], lowlink[v2]) for v in V: if v in indices: continue start(v) while callstack: v1, it1 = callstack[-1] v2 = next(it1, nomore) # Finished children of v1? if v2 is nomore: finish(v1) # Recurse on v2 elif v2 not in indices: start(v2) elif v2 in stack: lowlink[v1] = min(lowlink[v1], indices[v2]) # Reverse topological sort order: return components def connected_components(G): r""" Connected components of an undirected graph or weakly connected components of a directed graph. Parameters ========== graph : tuple[list, list[tuple[T, T]] A tuple consisting of a list of vertices and a list of edges of a graph whose connected components are to be found. Examples ======== Given an undirected graph:: graph { A -- B C -- D } We can find the connected components using this function if we include each edge in both directions:: >>> from sympy.utilities.iterables import connected_components >>> V = ['A', 'B', 'C', 'D'] >>> E = [('A', 'B'), ('B', 'A'), ('C', 'D'), ('D', 'C')] >>> connected_components((V, E)) [['A', 'B'], ['C', 'D']] The weakly connected components of a directed graph can found the same way. Notes ===== The vertices of the graph must be hashable for the data structures used. If the vertices are unhashable replace them with integer indices. This function uses Tarjan's algorithm to compute the connected components in `O(|V|+|E|)` (linear) time. References ========== .. [1] https://en.wikipedia.org/wiki/Connected_component_(graph_theory) .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm See Also ======== sympy.utilities.iterables.strongly_connected_components """ # Duplicate edges both ways so that the graph is effectively undirected # and return the strongly connected components: V, E = G E_undirected = [] for v1, v2 in E: E_undirected.extend([(v1, v2), (v2, v1)]) return strongly_connected_components((V, E_undirected)) def rotate_left(x, y): """ Left rotates a list x by the number of steps specified in y. Examples ======== >>> from sympy.utilities.iterables import rotate_left >>> a = [0, 1, 2] >>> rotate_left(a, 1) [1, 2, 0] """ if len(x) == 0: return [] y = y % len(x) return x[y:] + x[:y] def rotate_right(x, y): """ Right rotates a list x by the number of steps specified in y. Examples ======== >>> from sympy.utilities.iterables import rotate_right >>> a = [0, 1, 2] >>> rotate_right(a, 1) [2, 0, 1] """ if len(x) == 0: return [] y = len(x) - y % len(x) return x[y:] + x[:y] def least_rotation(x, key=None): ''' Returns the number of steps of left rotation required to obtain lexicographically minimal string/list/tuple, etc. Examples ======== >>> from sympy.utilities.iterables import least_rotation, rotate_left >>> a = [3, 1, 5, 1, 2] >>> least_rotation(a) 3 >>> rotate_left(a, _) [1, 2, 3, 1, 5] References ========== .. [1] https://en.wikipedia.org/wiki/Lexicographically_minimal_string_rotation ''' if key is None: key = sympy.Id S = x + x # Concatenate string to it self to avoid modular arithmetic f = [-1] * len(S) # Failure function k = 0 # Least rotation of string found so far for j in range(1,len(S)): sj = S[j] i = f[j-k-1] while i != -1 and sj != S[k+i+1]: if key(sj) < key(S[k+i+1]): k = j-i-1 i = f[i] if sj != S[k+i+1]: if key(sj) < key(S[k]): k = j f[j-k] = -1 else: f[j-k] = i+1 return k def multiset_combinations(m, n, g=None): """ Return the unique combinations of size ``n`` from multiset ``m``. Examples ======== >>> from sympy.utilities.iterables import multiset_combinations >>> from itertools import combinations >>> [''.join(i) for i in multiset_combinations('baby', 3)] ['abb', 'aby', 'bby'] >>> def count(f, s): return len(list(f(s, 3))) The number of combinations depends on the number of letters; the number of unique combinations depends on how the letters are repeated. >>> s1 = 'abracadabra' >>> s2 = 'banana tree' >>> count(combinations, s1), count(multiset_combinations, s1) (165, 23) >>> count(combinations, s2), count(multiset_combinations, s2) (165, 54) """ if g is None: if type(m) is dict: if n > sum(m.values()): return g = [[k, m[k]] for k in ordered(m)] else: m = list(m) if n > len(m): return try: m = multiset(m) g = [(k, m[k]) for k in ordered(m)] except TypeError: m = list(ordered(m)) g = [list(i) for i in group(m, multiple=False)] del m if sum(v for k, v in g) < n or not n: yield [] else: for i, (k, v) in enumerate(g): if v >= n: yield [k]*n v = n - 1 for v in range(min(n, v), 0, -1): for j in multiset_combinations(None, n - v, g[i + 1:]): rv = [k]*v + j if len(rv) == n: yield rv def multiset_permutations(m, size=None, g=None): """ Return the unique permutations of multiset ``m``. Examples ======== >>> from sympy.utilities.iterables import multiset_permutations >>> from sympy import factorial >>> [''.join(i) for i in multiset_permutations('aab')] ['aab', 'aba', 'baa'] >>> factorial(len('banana')) 720 >>> len(list(multiset_permutations('banana'))) 60 """ if g is None: if type(m) is dict: g = [[k, m[k]] for k in ordered(m)] else: m = list(ordered(m)) g = [list(i) for i in group(m, multiple=False)] del m do = [gi for gi in g if gi[1] > 0] SUM = sum([gi[1] for gi in do]) if not do or size is not None and (size > SUM or size < 1): if size < 1: yield [] return elif size == 1: for k, v in do: yield [k] elif len(do) == 1: k, v = do[0] v = v if size is None else (size if size <= v else 0) yield [k for i in range(v)] elif all(v == 1 for k, v in do): for p in permutations([k for k, v in do], size): yield list(p) else: size = size if size is not None else SUM for i, (k, v) in enumerate(do): do[i][1] -= 1 for j in multiset_permutations(None, size - 1, do): if j: yield [k] + j do[i][1] += 1 def _partition(seq, vector, m=None): """ Return the partition of seq as specified by the partition vector. Examples ======== >>> from sympy.utilities.iterables import _partition >>> _partition('abcde', [1, 0, 1, 2, 0]) [['b', 'e'], ['a', 'c'], ['d']] Specifying the number of bins in the partition is optional: >>> _partition('abcde', [1, 0, 1, 2, 0], 3) [['b', 'e'], ['a', 'c'], ['d']] The output of _set_partitions can be passed as follows: >>> output = (3, [1, 0, 1, 2, 0]) >>> _partition('abcde', *output) [['b', 'e'], ['a', 'c'], ['d']] See Also ======== combinatorics.partitions.Partition.from_rgs """ if m is None: m = max(vector) + 1 elif type(vector) is int: # entered as m, vector vector, m = m, vector p = [[] for i in range(m)] for i, v in enumerate(vector): p[v].append(seq[i]) return p def _set_partitions(n): """Cycle through all partions of n elements, yielding the current number of partitions, ``m``, and a mutable list, ``q`` such that element[i] is in part q[i] of the partition. NOTE: ``q`` is modified in place and generally should not be changed between function calls. Examples ======== >>> from sympy.utilities.iterables import _set_partitions, _partition >>> for m, q in _set_partitions(3): ... print('%s %s %s' % (m, q, _partition('abc', q, m))) 1 [0, 0, 0] [['a', 'b', 'c']] 2 [0, 0, 1] [['a', 'b'], ['c']] 2 [0, 1, 0] [['a', 'c'], ['b']] 2 [0, 1, 1] [['a'], ['b', 'c']] 3 [0, 1, 2] [['a'], ['b'], ['c']] Notes ===== This algorithm is similar to, and solves the same problem as, Algorithm 7.2.1.5H, from volume 4A of Knuth's The Art of Computer Programming. Knuth uses the term "restricted growth string" where this code refers to a "partition vector". In each case, the meaning is the same: the value in the ith element of the vector specifies to which part the ith set element is to be assigned. At the lowest level, this code implements an n-digit big-endian counter (stored in the array q) which is incremented (with carries) to get the next partition in the sequence. A special twist is that a digit is constrained to be at most one greater than the maximum of all the digits to the left of it. The array p maintains this maximum, so that the code can efficiently decide when a digit can be incremented in place or whether it needs to be reset to 0 and trigger a carry to the next digit. The enumeration starts with all the digits 0 (which corresponds to all the set elements being assigned to the same 0th part), and ends with 0123...n, which corresponds to each set element being assigned to a different, singleton, part. This routine was rewritten to use 0-based lists while trying to preserve the beauty and efficiency of the original algorithm. References ========== .. [1] Nijenhuis, Albert and Wilf, Herbert. (1978) Combinatorial Algorithms, 2nd Ed, p 91, algorithm "nexequ". Available online from https://www.math.upenn.edu/~wilf/website/CombAlgDownld.html (viewed November 17, 2012). """ p = [0]*n q = [0]*n nc = 1 yield nc, q while nc != n: m = n while 1: m -= 1 i = q[m] if p[i] != 1: break q[m] = 0 i += 1 q[m] = i m += 1 nc += m - n p[0] += n - m if i == nc: p[nc] = 0 nc += 1 p[i - 1] -= 1 p[i] += 1 yield nc, q def multiset_partitions(multiset, m=None): """ Return unique partitions of the given multiset (in list form). If ``m`` is None, all multisets will be returned, otherwise only partitions with ``m`` parts will be returned. If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1] will be supplied. Examples ======== >>> from sympy.utilities.iterables import multiset_partitions >>> list(multiset_partitions([1, 2, 3, 4], 2)) [[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]], [[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]], [[1], [2, 3, 4]]] >>> list(multiset_partitions([1, 2, 3, 4], 1)) [[[1, 2, 3, 4]]] Only unique partitions are returned and these will be returned in a canonical order regardless of the order of the input: >>> a = [1, 2, 2, 1] >>> ans = list(multiset_partitions(a, 2)) >>> a.sort() >>> list(multiset_partitions(a, 2)) == ans True >>> a = range(3, 1, -1) >>> (list(multiset_partitions(a)) == ... list(multiset_partitions(sorted(a)))) True If m is omitted then all partitions will be returned: >>> list(multiset_partitions([1, 1, 2])) [[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]] >>> list(multiset_partitions([1]*3)) [[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]] Counting ======== The number of partitions of a set is given by the bell number: >>> from sympy import bell >>> len(list(multiset_partitions(5))) == bell(5) == 52 True The number of partitions of length k from a set of size n is given by the Stirling Number of the 2nd kind: >>> from sympy.functions.combinatorial.numbers import stirling >>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15 True These comments on counting apply to *sets*, not multisets. Notes ===== When all the elements are the same in the multiset, the order of the returned partitions is determined by the ``partitions`` routine. If one is counting partitions then it is better to use the ``nT`` function. See Also ======== partitions sympy.combinatorics.partitions.Partition sympy.combinatorics.partitions.IntegerPartition sympy.functions.combinatorial.numbers.nT """ # This function looks at the supplied input and dispatches to # several special-case routines as they apply. if type(multiset) is int: n = multiset if m and m > n: return multiset = list(range(n)) if m == 1: yield [multiset[:]] return # If m is not None, it can sometimes be faster to use # MultisetPartitionTraverser.enum_range() even for inputs # which are sets. Since the _set_partitions code is quite # fast, this is only advantageous when the overall set # partitions outnumber those with the desired number of parts # by a large factor. (At least 60.) Such a switch is not # currently implemented. for nc, q in _set_partitions(n): if m is None or nc == m: rv = [[] for i in range(nc)] for i in range(n): rv[q[i]].append(multiset[i]) yield rv return if len(multiset) == 1 and isinstance(multiset, str): multiset = [multiset] if not has_variety(multiset): # Only one component, repeated n times. The resulting # partitions correspond to partitions of integer n. n = len(multiset) if m and m > n: return if m == 1: yield [multiset[:]] return x = multiset[:1] for size, p in partitions(n, m, size=True): if m is None or size == m: rv = [] for k in sorted(p): rv.extend([x*k]*p[k]) yield rv else: multiset = list(ordered(multiset)) n = len(multiset) if m and m > n: return if m == 1: yield [multiset[:]] return # Split the information of the multiset into two lists - # one of the elements themselves, and one (of the same length) # giving the number of repeats for the corresponding element. elements, multiplicities = zip(*group(multiset, False)) if len(elements) < len(multiset): # General case - multiset with more than one distinct element # and at least one element repeated more than once. if m: mpt = MultisetPartitionTraverser() for state in mpt.enum_range(multiplicities, m-1, m): yield list_visitor(state, elements) else: for state in multiset_partitions_taocp(multiplicities): yield list_visitor(state, elements) else: # Set partitions case - no repeated elements. Pretty much # same as int argument case above, with same possible, but # currently unimplemented optimization for some cases when # m is not None for nc, q in _set_partitions(n): if m is None or nc == m: rv = [[] for i in range(nc)] for i in range(n): rv[q[i]].append(i) yield [[multiset[j] for j in i] for i in rv] def partitions(n, m=None, k=None, size=False): """Generate all partitions of positive integer, n. Parameters ========== m : integer (default gives partitions of all sizes) limits number of parts in partition (mnemonic: m, maximum parts) k : integer (default gives partitions number from 1 through n) limits the numbers that are kept in the partition (mnemonic: k, keys) size : bool (default False, only partition is returned) when ``True`` then (M, P) is returned where M is the sum of the multiplicities and P is the generated partition. Each partition is represented as a dictionary, mapping an integer to the number of copies of that integer in the partition. For example, the first partition of 4 returned is {4: 1}, "4: one of them". Examples ======== >>> from sympy.utilities.iterables import partitions The numbers appearing in the partition (the key of the returned dict) are limited with k: >>> for p in partitions(6, k=2): # doctest: +SKIP ... print(p) {2: 3} {1: 2, 2: 2} {1: 4, 2: 1} {1: 6} The maximum number of parts in the partition (the sum of the values in the returned dict) are limited with m (default value, None, gives partitions from 1 through n): >>> for p in partitions(6, m=2): # doctest: +SKIP ... print(p) ... {6: 1} {1: 1, 5: 1} {2: 1, 4: 1} {3: 2} References ========== .. [1] modified from Tim Peter's version to allow for k and m values: http://code.activestate.com/recipes/218332-generator-for-integer-partitions/ See Also ======== sympy.combinatorics.partitions.Partition sympy.combinatorics.partitions.IntegerPartition """ if (n <= 0 or m is not None and m < 1 or k is not None and k < 1 or m and k and m*k < n): # the empty set is the only way to handle these inputs # and returning {} to represent it is consistent with # the counting convention, e.g. nT(0) == 1. if size: yield 0, {} else: yield {} return if m is None: m = n else: m = min(m, n) if n == 0: if size: yield 1, {0: 1} else: yield {0: 1} return k = min(k or n, n) n, m, k = as_int(n), as_int(m), as_int(k) q, r = divmod(n, k) ms = {k: q} keys = [k] # ms.keys(), from largest to smallest if r: ms[r] = 1 keys.append(r) room = m - q - bool(r) if size: yield sum(ms.values()), ms.copy() else: yield ms.copy() while keys != [1]: # Reuse any 1's. if keys[-1] == 1: del keys[-1] reuse = ms.pop(1) room += reuse else: reuse = 0 while 1: # Let i be the smallest key larger than 1. Reuse one # instance of i. i = keys[-1] newcount = ms[i] = ms[i] - 1 reuse += i if newcount == 0: del keys[-1], ms[i] room += 1 # Break the remainder into pieces of size i-1. i -= 1 q, r = divmod(reuse, i) need = q + bool(r) if need > room: if not keys: return continue ms[i] = q keys.append(i) if r: ms[r] = 1 keys.append(r) break room -= need if size: yield sum(ms.values()), ms.copy() else: yield ms.copy() def ordered_partitions(n, m=None, sort=True): """Generates ordered partitions of integer ``n``. Parameters ========== m : integer (default None) The default value gives partitions of all sizes else only those with size m. In addition, if ``m`` is not None then partitions are generated *in place* (see examples). sort : bool (default True) Controls whether partitions are returned in sorted order when ``m`` is not None; when False, the partitions are returned as fast as possible with elements sorted, but when m|n the partitions will not be in ascending lexicographical order. Examples ======== >>> from sympy.utilities.iterables import ordered_partitions All partitions of 5 in ascending lexicographical: >>> for p in ordered_partitions(5): ... print(p) [1, 1, 1, 1, 1] [1, 1, 1, 2] [1, 1, 3] [1, 2, 2] [1, 4] [2, 3] [5] Only partitions of 5 with two parts: >>> for p in ordered_partitions(5, 2): ... print(p) [1, 4] [2, 3] When ``m`` is given, a given list objects will be used more than once for speed reasons so you will not see the correct partitions unless you make a copy of each as it is generated: >>> [p for p in ordered_partitions(7, 3)] [[1, 1, 1], [1, 1, 1], [1, 1, 1], [2, 2, 2]] >>> [list(p) for p in ordered_partitions(7, 3)] [[1, 1, 5], [1, 2, 4], [1, 3, 3], [2, 2, 3]] When ``n`` is a multiple of ``m``, the elements are still sorted but the partitions themselves will be *unordered* if sort is False; the default is to return them in ascending lexicographical order. >>> for p in ordered_partitions(6, 2): ... print(p) [1, 5] [2, 4] [3, 3] But if speed is more important than ordering, sort can be set to False: >>> for p in ordered_partitions(6, 2, sort=False): ... print(p) [1, 5] [3, 3] [2, 4] References ========== .. [1] Generating Integer Partitions, [online], Available: https://jeromekelleher.net/generating-integer-partitions.html .. [2] Jerome Kelleher and Barry O'Sullivan, "Generating All Partitions: A Comparison Of Two Encodings", [online], Available: https://arxiv.org/pdf/0909.2331v2.pdf """ if n < 1 or m is not None and m < 1: # the empty set is the only way to handle these inputs # and returning {} to represent it is consistent with # the counting convention, e.g. nT(0) == 1. yield [] return if m is None: # The list `a`'s leading elements contain the partition in which # y is the biggest element and x is either the same as y or the # 2nd largest element; v and w are adjacent element indices # to which x and y are being assigned, respectively. a = [1]*n y = -1 v = n while v > 0: v -= 1 x = a[v] + 1 while y >= 2 * x: a[v] = x y -= x v += 1 w = v + 1 while x <= y: a[v] = x a[w] = y yield a[:w + 1] x += 1 y -= 1 a[v] = x + y y = a[v] - 1 yield a[:w] elif m == 1: yield [n] elif n == m: yield [1]*n else: # recursively generate partitions of size m for b in range(1, n//m + 1): a = [b]*m x = n - b*m if not x: if sort: yield a elif not sort and x <= m: for ax in ordered_partitions(x, sort=False): mi = len(ax) a[-mi:] = [i + b for i in ax] yield a a[-mi:] = [b]*mi else: for mi in range(1, m): for ax in ordered_partitions(x, mi, sort=True): a[-mi:] = [i + b for i in ax] yield a a[-mi:] = [b]*mi def binary_partitions(n): """ Generates the binary partition of n. A binary partition consists only of numbers that are powers of two. Each step reduces a `2^{k+1}` to `2^k` and `2^k`. Thus 16 is converted to 8 and 8. Examples ======== >>> from sympy.utilities.iterables import binary_partitions >>> for i in binary_partitions(5): ... print(i) ... [4, 1] [2, 2, 1] [2, 1, 1, 1] [1, 1, 1, 1, 1] References ========== .. [1] TAOCP 4, section 7.2.1.5, problem 64 """ from math import ceil, log pow = int(2**(ceil(log(n, 2)))) sum = 0 partition = [] while pow: if sum + pow <= n: partition.append(pow) sum += pow pow >>= 1 last_num = len(partition) - 1 - (n & 1) while last_num >= 0: yield partition if partition[last_num] == 2: partition[last_num] = 1 partition.append(1) last_num -= 1 continue partition.append(1) partition[last_num] >>= 1 x = partition[last_num + 1] = partition[last_num] last_num += 1 while x > 1: if x <= len(partition) - last_num - 1: del partition[-x + 1:] last_num += 1 partition[last_num] = x else: x >>= 1 yield [1]*n def has_dups(seq): """Return True if there are any duplicate elements in ``seq``. Examples ======== >>> from sympy.utilities.iterables import has_dups >>> from sympy import Dict, Set >>> has_dups((1, 2, 1)) True >>> has_dups(range(3)) False >>> all(has_dups(c) is False for c in (set(), Set(), dict(), Dict())) True """ from sympy.core.containers import Dict from sympy.sets.sets import Set if isinstance(seq, (dict, set, Dict, Set)): return False uniq = set() return any(True for s in seq if s in uniq or uniq.add(s)) def has_variety(seq): """Return True if there are any different elements in ``seq``. Examples ======== >>> from sympy.utilities.iterables import has_variety >>> has_variety((1, 2, 1)) True >>> has_variety((1, 1, 1)) False """ for i, s in enumerate(seq): if i == 0: sentinel = s else: if s != sentinel: return True return False def uniq(seq, result=None): """ Yield unique elements from ``seq`` as an iterator. The second parameter ``result`` is used internally; it is not necessary to pass anything for this. Note: changing the sequence during iteration will raise a RuntimeError if the size of the sequence is known; if you pass an iterator and advance the iterator you will change the output of this routine but there will be no warning. Examples ======== >>> from sympy.utilities.iterables import uniq >>> dat = [1, 4, 1, 5, 4, 2, 1, 2] >>> type(uniq(dat)) in (list, tuple) False >>> list(uniq(dat)) [1, 4, 5, 2] >>> list(uniq(x for x in dat)) [1, 4, 5, 2] >>> list(uniq([[1], [2, 1], [1]])) [[1], [2, 1]] """ try: n = len(seq) except TypeError: n = None def check(): # check that size of seq did not change during iteration; # if n == None the object won't support size changing, e.g. # an iterator can't be changed if n is not None and len(seq) != n: raise RuntimeError('sequence changed size during iteration') try: seen = set() result = result or [] for i, s in enumerate(seq): if not (s in seen or seen.add(s)): yield s check() except TypeError: if s not in result: yield s check() result.append(s) if hasattr(seq, '__getitem__'): yield from uniq(seq[i + 1:], result) else: yield from uniq(seq, result) def generate_bell(n): """Return permutations of [0, 1, ..., n - 1] such that each permutation differs from the last by the exchange of a single pair of neighbors. The ``n!`` permutations are returned as an iterator. In order to obtain the next permutation from a random starting permutation, use the ``next_trotterjohnson`` method of the Permutation class (which generates the same sequence in a different manner). Examples ======== >>> from itertools import permutations >>> from sympy.utilities.iterables import generate_bell >>> from sympy import zeros, Matrix This is the sort of permutation used in the ringing of physical bells, and does not produce permutations in lexicographical order. Rather, the permutations differ from each other by exactly one inversion, and the position at which the swapping occurs varies periodically in a simple fashion. Consider the first few permutations of 4 elements generated by ``permutations`` and ``generate_bell``: >>> list(permutations(range(4)))[:5] [(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)] >>> list(generate_bell(4))[:5] [(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)] Notice how the 2nd and 3rd lexicographical permutations have 3 elements out of place whereas each "bell" permutation always has only two elements out of place relative to the previous permutation (and so the signature (+/-1) of a permutation is opposite of the signature of the previous permutation). How the position of inversion varies across the elements can be seen by tracing out where the largest number appears in the permutations: >>> m = zeros(4, 24) >>> for i, p in enumerate(generate_bell(4)): ... m[:, i] = Matrix([j - 3 for j in list(p)]) # make largest zero >>> m.print_nonzero('X') [XXX XXXXXX XXXXXX XXX] [XX XX XXXX XX XXXX XX XX] [X XXXX XX XXXX XX XXXX X] [ XXXXXX XXXXXX XXXXXX ] See Also ======== sympy.combinatorics.permutations.Permutation.next_trotterjohnson References ========== .. [1] https://en.wikipedia.org/wiki/Method_ringing .. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018 .. [3] http://programminggeeks.com/bell-algorithm-for-permutation/ .. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm .. [5] Generating involutions, derangements, and relatives by ECO Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010 """ n = as_int(n) if n < 1: raise ValueError('n must be a positive integer') if n == 1: yield (0,) elif n == 2: yield (0, 1) yield (1, 0) elif n == 3: yield from [(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)] else: m = n - 1 op = [0] + [-1]*m l = list(range(n)) while True: yield tuple(l) # find biggest element with op big = None, -1 # idx, value for i in range(n): if op[i] and l[i] > big[1]: big = i, l[i] i, _ = big if i is None: break # there are no ops left # swap it with neighbor in the indicated direction j = i + op[i] l[i], l[j] = l[j], l[i] op[i], op[j] = op[j], op[i] # if it landed at the end or if the neighbor in the same # direction is bigger then turn off op if j == 0 or j == m or l[j + op[j]] > l[j]: op[j] = 0 # any element bigger to the left gets +1 op for i in range(j): if l[i] > l[j]: op[i] = 1 # any element bigger to the right gets -1 op for i in range(j + 1, n): if l[i] > l[j]: op[i] = -1 def generate_involutions(n): """ Generates involutions. An involution is a permutation that when multiplied by itself equals the identity permutation. In this implementation the involutions are generated using Fixed Points. Alternatively, an involution can be considered as a permutation that does not contain any cycles with a length that is greater than two. Examples ======== >>> from sympy.utilities.iterables import generate_involutions >>> list(generate_involutions(3)) [(0, 1, 2), (0, 2, 1), (1, 0, 2), (2, 1, 0)] >>> len(list(generate_involutions(4))) 10 References ========== .. [1] http://mathworld.wolfram.com/PermutationInvolution.html """ idx = list(range(n)) for p in permutations(idx): for i in idx: if p[p[i]] != i: break else: yield p def generate_derangements(perm): """ Routine to generate unique derangements. TODO: This will be rewritten to use the ECO operator approach once the permutations branch is in master. Examples ======== >>> from sympy.utilities.iterables import generate_derangements >>> list(generate_derangements([0, 1, 2])) [[1, 2, 0], [2, 0, 1]] >>> list(generate_derangements([0, 1, 2, 3])) [[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1], \ [2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], \ [3, 2, 1, 0]] >>> list(generate_derangements([0, 1, 1])) [] See Also ======== sympy.functions.combinatorial.factorials.subfactorial """ for p in multiset_permutations(perm): if not any(i == j for i, j in zip(perm, p)): yield p def necklaces(n, k, free=False): """ A routine to generate necklaces that may (free=True) or may not (free=False) be turned over to be viewed. The "necklaces" returned are comprised of ``n`` integers (beads) with ``k`` different values (colors). Only unique necklaces are returned. Examples ======== >>> from sympy.utilities.iterables import necklaces, bracelets >>> def show(s, i): ... return ''.join(s[j] for j in i) The "unrestricted necklace" is sometimes also referred to as a "bracelet" (an object that can be turned over, a sequence that can be reversed) and the term "necklace" is used to imply a sequence that cannot be reversed. So ACB == ABC for a bracelet (rotate and reverse) while the two are different for a necklace since rotation alone cannot make the two sequences the same. (mnemonic: Bracelets can be viewed Backwards, but Not Necklaces.) >>> B = [show('ABC', i) for i in bracelets(3, 3)] >>> N = [show('ABC', i) for i in necklaces(3, 3)] >>> set(N) - set(B) {'ACB'} >>> list(necklaces(4, 2)) [(0, 0, 0, 0), (0, 0, 0, 1), (0, 0, 1, 1), (0, 1, 0, 1), (0, 1, 1, 1), (1, 1, 1, 1)] >>> [show('.o', i) for i in bracelets(4, 2)] ['....', '...o', '..oo', '.o.o', '.ooo', 'oooo'] References ========== .. [1] http://mathworld.wolfram.com/Necklace.html """ return uniq(minlex(i, directed=not free) for i in variations(list(range(k)), n, repetition=True)) def bracelets(n, k): """Wrapper to necklaces to return a free (unrestricted) necklace.""" return necklaces(n, k, free=True) def generate_oriented_forest(n): """ This algorithm generates oriented forests. An oriented graph is a directed graph having no symmetric pair of directed edges. A forest is an acyclic graph, i.e., it has no cycles. A forest can also be described as a disjoint union of trees, which are graphs in which any two vertices are connected by exactly one simple path. Examples ======== >>> from sympy.utilities.iterables import generate_oriented_forest >>> list(generate_oriented_forest(4)) [[0, 1, 2, 3], [0, 1, 2, 2], [0, 1, 2, 1], [0, 1, 2, 0], \ [0, 1, 1, 1], [0, 1, 1, 0], [0, 1, 0, 1], [0, 1, 0, 0], [0, 0, 0, 0]] References ========== .. [1] T. Beyer and S.M. Hedetniemi: constant time generation of rooted trees, SIAM J. Computing Vol. 9, No. 4, November 1980 .. [2] https://stackoverflow.com/questions/1633833/oriented-forest-taocp-algorithm-in-python """ P = list(range(-1, n)) while True: yield P[1:] if P[n] > 0: P[n] = P[P[n]] else: for p in range(n - 1, 0, -1): if P[p] != 0: target = P[p] - 1 for q in range(p - 1, 0, -1): if P[q] == target: break offset = p - q for i in range(p, n + 1): P[i] = P[i - offset] break else: break def minlex(seq, directed=True, key=None): """ Return the rotation of the sequence in which the lexically smallest elements appear first, e.g. `cba ->acb`. The sequence returned is a tuple, unless the input sequence is a string in which case a string is returned. If ``directed`` is False then the smaller of the sequence and the reversed sequence is returned, e.g. `cba -> abc`. If ``key`` is not None then it is used to extract a comparison key from each element in iterable. Examples ======== >>> from sympy.combinatorics.polyhedron import minlex >>> minlex((1, 2, 0)) (0, 1, 2) >>> minlex((1, 0, 2)) (0, 2, 1) >>> minlex((1, 0, 2), directed=False) (0, 1, 2) >>> minlex('11010011000', directed=True) '00011010011' >>> minlex('11010011000', directed=False) '00011001011' >>> minlex(('bb', 'aaa', 'c', 'a')) ('a', 'bb', 'aaa', 'c') >>> minlex(('bb', 'aaa', 'c', 'a'), key=len) ('c', 'a', 'bb', 'aaa') """ if key is None: key = sympy.Id best = rotate_left(seq, least_rotation(seq, key=key)) if not directed: rseq = seq[::-1] rbest = rotate_left(rseq, least_rotation(rseq, key=key)) best = min(best, rbest, key=key) # Convert to tuple, unless we started with a string. return tuple(best) if not isinstance(seq, str) else best def runs(seq, op=gt): """Group the sequence into lists in which successive elements all compare the same with the comparison operator, ``op``: op(seq[i + 1], seq[i]) is True from all elements in a run. Examples ======== >>> from sympy.utilities.iterables import runs >>> from operator import ge >>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2]) [[0, 1, 2], [2], [1, 4], [3], [2], [2]] >>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2], op=ge) [[0, 1, 2, 2], [1, 4], [3], [2, 2]] """ cycles = [] seq = iter(seq) try: run = [next(seq)] except StopIteration: return [] while True: try: ei = next(seq) except StopIteration: break if op(ei, run[-1]): run.append(ei) continue else: cycles.append(run) run = [ei] if run: cycles.append(run) return cycles def kbins(l, k, ordered=None): """ Return sequence ``l`` partitioned into ``k`` bins. Examples ======== >>> from __future__ import print_function The default is to give the items in the same order, but grouped into k partitions without any reordering: >>> from sympy.utilities.iterables import kbins >>> for p in kbins(list(range(5)), 2): ... print(p) ... [[0], [1, 2, 3, 4]] [[0, 1], [2, 3, 4]] [[0, 1, 2], [3, 4]] [[0, 1, 2, 3], [4]] The ``ordered`` flag is either None (to give the simple partition of the elements) or is a 2 digit integer indicating whether the order of the bins and the order of the items in the bins matters. Given:: A = [[0], [1, 2]] B = [[1, 2], [0]] C = [[2, 1], [0]] D = [[0], [2, 1]] the following values for ``ordered`` have the shown meanings:: 00 means A == B == C == D 01 means A == B 10 means A == D 11 means A == A >>> for ordered_flag in [None, 0, 1, 10, 11]: ... print('ordered = %s' % ordered_flag) ... for p in kbins(list(range(3)), 2, ordered=ordered_flag): ... print(' %s' % p) ... ordered = None [[0], [1, 2]] [[0, 1], [2]] ordered = 0 [[0, 1], [2]] [[0, 2], [1]] [[0], [1, 2]] ordered = 1 [[0], [1, 2]] [[0], [2, 1]] [[1], [0, 2]] [[1], [2, 0]] [[2], [0, 1]] [[2], [1, 0]] ordered = 10 [[0, 1], [2]] [[2], [0, 1]] [[0, 2], [1]] [[1], [0, 2]] [[0], [1, 2]] [[1, 2], [0]] ordered = 11 [[0], [1, 2]] [[0, 1], [2]] [[0], [2, 1]] [[0, 2], [1]] [[1], [0, 2]] [[1, 0], [2]] [[1], [2, 0]] [[1, 2], [0]] [[2], [0, 1]] [[2, 0], [1]] [[2], [1, 0]] [[2, 1], [0]] See Also ======== partitions, multiset_partitions """ def partition(lista, bins): # EnricoGiampieri's partition generator from # https://stackoverflow.com/questions/13131491/ # partition-n-items-into-k-bins-in-python-lazily if len(lista) == 1 or bins == 1: yield [lista] elif len(lista) > 1 and bins > 1: for i in range(1, len(lista)): for part in partition(lista[i:], bins - 1): if len([lista[:i]] + part) == bins: yield [lista[:i]] + part if ordered is None: yield from partition(l, k) elif ordered == 11: for pl in multiset_permutations(l): pl = list(pl) yield from partition(pl, k) elif ordered == 00: yield from multiset_partitions(l, k) elif ordered == 10: for p in multiset_partitions(l, k): for perm in permutations(p): yield list(perm) elif ordered == 1: for kgot, p in partitions(len(l), k, size=True): if kgot != k: continue for li in multiset_permutations(l): rv = [] i = j = 0 li = list(li) for size, multiplicity in sorted(p.items()): for m in range(multiplicity): j = i + size rv.append(li[i: j]) i = j yield rv else: raise ValueError( 'ordered must be one of 00, 01, 10 or 11, not %s' % ordered) def permute_signs(t): """Return iterator in which the signs of non-zero elements of t are permuted. Examples ======== >>> from sympy.utilities.iterables import permute_signs >>> list(permute_signs((0, 1, 2))) [(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2)] """ for signs in cartes(*[(1, -1)]*(len(t) - t.count(0))): signs = list(signs) yield type(t)([i*signs.pop() if i else i for i in t]) def signed_permutations(t): """Return iterator in which the signs of non-zero elements of t and the order of the elements are permuted. Examples ======== >>> from sympy.utilities.iterables import signed_permutations >>> list(signed_permutations((0, 1, 2))) [(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2), (0, 2, 1), (0, -2, 1), (0, 2, -1), (0, -2, -1), (1, 0, 2), (-1, 0, 2), (1, 0, -2), (-1, 0, -2), (1, 2, 0), (-1, 2, 0), (1, -2, 0), (-1, -2, 0), (2, 0, 1), (-2, 0, 1), (2, 0, -1), (-2, 0, -1), (2, 1, 0), (-2, 1, 0), (2, -1, 0), (-2, -1, 0)] """ return (type(t)(i) for j in permutations(t) for i in permute_signs(j)) def rotations(s, dir=1): """Return a generator giving the items in s as list where each subsequent list has the items rotated to the left (default) or right (dir=-1) relative to the previous list. Examples ======== >>> from sympy.utilities.iterables import rotations >>> list(rotations([1,2,3])) [[1, 2, 3], [2, 3, 1], [3, 1, 2]] >>> list(rotations([1,2,3], -1)) [[1, 2, 3], [3, 1, 2], [2, 3, 1]] """ seq = list(s) for i in range(len(seq)): yield seq seq = rotate_left(seq, dir) def roundrobin(*iterables): """roundrobin recipe taken from itertools documentation: https://docs.python.org/2/library/itertools.html#recipes roundrobin('ABC', 'D', 'EF') --> A D E B F C Recipe credited to George Sakkis """ import itertools nexts = itertools.cycle(iter(it).__next__ for it in iterables) pending = len(iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = itertools.cycle(itertools.islice(nexts, pending))
36573c44d5a6db0a0c5739e7657690fc70da0dbfeec8ae99c479964c5c590a7e
"""Miscellaneous stuff that doesn't really fit anywhere else.""" from typing import List import sys import os import re as _re import struct from textwrap import fill, dedent from sympy.core.compatibility import as_int from sympy.core.decorators import deprecated class Undecidable(ValueError): # an error to be raised when a decision cannot be made definitively # where a definitive answer is needed pass def filldedent(s, w=70): """ Strips leading and trailing empty lines from a copy of `s`, then dedents, fills and returns it. Empty line stripping serves to deal with docstrings like this one that start with a newline after the initial triple quote, inserting an empty line at the beginning of the string. See Also ======== strlines, rawlines """ return '\n' + fill(dedent(str(s)).strip('\n'), width=w) def strlines(s, c=64, short=False): """Return a cut-and-pastable string that, when printed, is equivalent to the input. The lines will be surrounded by parentheses and no line will be longer than c (default 64) characters. If the line contains newlines characters, the `rawlines` result will be returned. If ``short`` is True (default is False) then if there is one line it will be returned without bounding parentheses. Examples ======== >>> from sympy.utilities.misc import strlines >>> q = 'this is a long string that should be broken into shorter lines' >>> print(strlines(q, 40)) ( 'this is a long string that should be b' 'roken into shorter lines' ) >>> q == ( ... 'this is a long string that should be b' ... 'roken into shorter lines' ... ) True See Also ======== filldedent, rawlines """ if type(s) is not str: raise ValueError('expecting string input') if '\n' in s: return rawlines(s) q = '"' if repr(s).startswith('"') else "'" q = (q,)*2 if '\\' in s: # use r-string m = '(\nr%s%%s%s\n)' % q j = '%s\nr%s' % q c -= 3 else: m = '(\n%s%%s%s\n)' % q j = '%s\n%s' % q c -= 2 out = [] while s: out.append(s[:c]) s=s[c:] if short and len(out) == 1: return (m % out[0]).splitlines()[1] # strip bounding (\n...\n) return m % j.join(out) def rawlines(s): """Return a cut-and-pastable string that, when printed, is equivalent to the input. Use this when there is more than one line in the string. The string returned is formatted so it can be indented nicely within tests; in some cases it is wrapped in the dedent function which has to be imported from textwrap. Examples ======== Note: because there are characters in the examples below that need to be escaped because they are themselves within a triple quoted docstring, expressions below look more complicated than they would be if they were printed in an interpreter window. >>> from sympy.utilities.misc import rawlines >>> from sympy import TableForm >>> s = str(TableForm([[1, 10]], headings=(None, ['a', 'bee']))) >>> print(rawlines(s)) ( 'a bee\\n' '-----\\n' '1 10 ' ) >>> print(rawlines('''this ... that''')) dedent('''\\ this that''') >>> print(rawlines('''this ... that ... ''')) dedent('''\\ this that ''') >>> s = \"\"\"this ... is a triple ''' ... \"\"\" >>> print(rawlines(s)) dedent(\"\"\"\\ this is a triple ''' \"\"\") >>> print(rawlines('''this ... that ... ''')) ( 'this\\n' 'that\\n' ' ' ) See Also ======== filldedent, strlines """ lines = s.split('\n') if len(lines) == 1: return repr(lines[0]) triple = ["'''" in s, '"""' in s] if any(li.endswith(' ') for li in lines) or '\\' in s or all(triple): rv = [] # add on the newlines trailing = s.endswith('\n') last = len(lines) - 1 for i, li in enumerate(lines): if i != last or trailing: rv.append(repr(li + '\n')) else: rv.append(repr(li)) return '(\n %s\n)' % '\n '.join(rv) else: rv = '\n '.join(lines) if triple[0]: return 'dedent("""\\\n %s""")' % rv else: return "dedent('''\\\n %s''')" % rv ARCH = str(struct.calcsize('P') * 8) + "-bit" # XXX: PyPy doesn't support hash randomization HASH_RANDOMIZATION = getattr(sys.flags, 'hash_randomization', False) _debug_tmp = [] # type: List[str] _debug_iter = 0 def debug_decorator(func): """If SYMPY_DEBUG is True, it will print a nice execution tree with arguments and results of all decorated functions, else do nothing. """ from sympy import SYMPY_DEBUG if not SYMPY_DEBUG: return func def maketree(f, *args, **kw): global _debug_tmp global _debug_iter oldtmp = _debug_tmp _debug_tmp = [] _debug_iter += 1 def tree(subtrees): def indent(s, type=1): x = s.split("\n") r = "+-%s\n" % x[0] for a in x[1:]: if a == "": continue if type == 1: r += "| %s\n" % a else: r += " %s\n" % a return r if len(subtrees) == 0: return "" f = [] for a in subtrees[:-1]: f.append(indent(a)) f.append(indent(subtrees[-1], 2)) return ''.join(f) # If there is a bug and the algorithm enters an infinite loop, enable the # following lines. It will print the names and parameters of all major functions # that are called, *before* they are called #from functools import reduce #print("%s%s %s%s" % (_debug_iter, reduce(lambda x, y: x + y, \ # map(lambda x: '-', range(1, 2 + _debug_iter))), f.__name__, args)) r = f(*args, **kw) _debug_iter -= 1 s = "%s%s = %s\n" % (f.__name__, args, r) if _debug_tmp != []: s += tree(_debug_tmp) _debug_tmp = oldtmp _debug_tmp.append(s) if _debug_iter == 0: print(_debug_tmp[0]) _debug_tmp = [] return r def decorated(*args, **kwargs): return maketree(func, *args, **kwargs) return decorated def debug(*args): """ Print ``*args`` if SYMPY_DEBUG is True, else do nothing. """ from sympy import SYMPY_DEBUG if SYMPY_DEBUG: print(*args, file=sys.stderr) @deprecated( useinstead="the builtin ``shutil.which`` function", issue=19634, deprecated_since_version="1.7") def find_executable(executable, path=None): """Try to find 'executable' in the directories listed in 'path' (a string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']). Returns the complete filename or None if not found """ if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) extlist = [''] if os.name == 'os2': (base, ext) = os.path.splitext(executable) # executable files on OS/2 can have an arbitrary extension, but # .exe is automatically appended if no dot is present in the name if not ext: executable = executable + ".exe" elif sys.platform == 'win32': pathext = os.environ['PATHEXT'].lower().split(os.pathsep) (base, ext) = os.path.splitext(executable) if ext.lower() not in pathext: extlist = pathext for ext in extlist: execname = executable + ext if os.path.isfile(execname): return execname else: for p in paths: f = os.path.join(p, execname) if os.path.isfile(f): return f return None def func_name(x, short=False): """Return function name of `x` (if defined) else the `type(x)`. If short is True and there is a shorter alias for the result, return the alias. Examples ======== >>> from sympy.utilities.misc import func_name >>> from sympy import Matrix >>> from sympy.abc import x >>> func_name(Matrix.eye(3)) 'MutableDenseMatrix' >>> func_name(x < 1) 'StrictLessThan' >>> func_name(x < 1, short=True) 'Lt' """ alias = { 'GreaterThan': 'Ge', 'StrictGreaterThan': 'Gt', 'LessThan': 'Le', 'StrictLessThan': 'Lt', 'Equality': 'Eq', 'Unequality': 'Ne', } typ = type(x) if str(typ).startswith("<type '"): typ = str(typ).split("'")[1].split("'")[0] elif str(typ).startswith("<class '"): typ = str(typ).split("'")[1].split("'")[0] rv = getattr(getattr(x, 'func', x), '__name__', typ) if '.' in rv: rv = rv.split('.')[-1] if short: rv = alias.get(rv, rv) return rv def _replace(reps): """Return a function that can make the replacements, given in ``reps``, on a string. The replacements should be given as mapping. Examples ======== >>> from sympy.utilities.misc import _replace >>> f = _replace(dict(foo='bar', d='t')) >>> f('food') 'bart' >>> f = _replace({}) >>> f('food') 'food' """ if not reps: return lambda x: x D = lambda match: reps[match.group(0)] pattern = _re.compile("|".join( [_re.escape(k) for k, v in reps.items()]), _re.M) return lambda string: pattern.sub(D, string) def replace(string, *reps): """Return ``string`` with all keys in ``reps`` replaced with their corresponding values, longer strings first, irrespective of the order they are given. ``reps`` may be passed as tuples or a single mapping. Examples ======== >>> from sympy.utilities.misc import replace >>> replace('foo', {'oo': 'ar', 'f': 'b'}) 'bar' >>> replace("spamham sha", ("spam", "eggs"), ("sha","md5")) 'eggsham md5' There is no guarantee that a unique answer will be obtained if keys in a mapping overlap (i.e. are the same length and have some identical sequence at the beginning/end): >>> reps = [ ... ('ab', 'x'), ... ('bc', 'y')] >>> replace('abc', *reps) in ('xc', 'ay') True References ========== .. [1] https://stackoverflow.com/questions/6116978/python-replace-multiple-strings """ if len(reps) == 1: kv = reps[0] if type(kv) is dict: reps = kv else: return string.replace(*kv) else: reps = dict(reps) return _replace(reps)(string) def translate(s, a, b=None, c=None): """Return ``s`` where characters have been replaced or deleted. SYNTAX ====== translate(s, None, deletechars): all characters in ``deletechars`` are deleted translate(s, map [,deletechars]): all characters in ``deletechars`` (if provided) are deleted then the replacements defined by map are made; if the keys of map are strings then the longer ones are handled first. Multicharacter deletions should have a value of ''. translate(s, oldchars, newchars, deletechars) all characters in ``deletechars`` are deleted then each character in ``oldchars`` is replaced with the corresponding character in ``newchars`` Examples ======== >>> from sympy.utilities.misc import translate >>> abc = 'abc' >>> translate(abc, None, 'a') 'bc' >>> translate(abc, {'a': 'x'}, 'c') 'xb' >>> translate(abc, {'abc': 'x', 'a': 'y'}) 'x' >>> translate('abcd', 'ac', 'AC', 'd') 'AbC' There is no guarantee that a unique answer will be obtained if keys in a mapping overlap are the same length and have some identical sequences at the beginning/end: >>> translate(abc, {'ab': 'x', 'bc': 'y'}) in ('xc', 'ay') True """ mr = {} if a is None: if c is not None: raise ValueError('c should be None when a=None is passed, instead got %s' % c) if b is None: return s c = b a = b = '' else: if type(a) is dict: short = {} for k in list(a.keys()): if len(k) == 1 and len(a[k]) == 1: short[k] = a.pop(k) mr = a c = b if short: a, b = [''.join(i) for i in list(zip(*short.items()))] else: a = b = '' elif len(a) != len(b): raise ValueError('oldchars and newchars have different lengths') if c: val = str.maketrans('', '', c) s = s.translate(val) s = replace(s, mr) n = str.maketrans(a, b) return s.translate(n) def ordinal(num): """Return ordinal number string of num, e.g. 1 becomes 1st. """ # modified from https://codereview.stackexchange.com/questions/41298/producing-ordinal-numbers n = as_int(num) k = abs(n) % 100 if 11 <= k <= 13: suffix = 'th' elif k % 10 == 1: suffix = 'st' elif k % 10 == 2: suffix = 'nd' elif k % 10 == 3: suffix = 'rd' else: suffix = 'th' return str(n) + suffix
a698d0c6e887f10b933440ef27555ec13f7e242c9b11b959aefd248468a45225
""" A Printer for generating readable representation of most sympy classes. """ from typing import Any, Dict from sympy.core import S, Rational, Pow, Basic, Mul, Number from sympy.core.mul import _keep_coeff from .printer import Printer, print_function from sympy.printing.precedence import precedence, PRECEDENCE from mpmath.libmp import prec_to_dps, to_str as mlib_to_str from sympy.utilities import default_sort_key class StrPrinter(Printer): printmethod = "_sympystr" _default_settings = { "order": None, "full_prec": "auto", "sympy_integers": False, "abbrev": False, "perm_cyclic": True, "min": None, "max": None, } # type: Dict[str, Any] _relationals = dict() # type: Dict[str, str] def parenthesize(self, item, level, strict=False): if (precedence(item) < level) or ((not strict) and precedence(item) <= level): return "(%s)" % self._print(item) else: return self._print(item) def stringify(self, args, sep, level=0): return sep.join([self.parenthesize(item, level) for item in args]) def emptyPrinter(self, expr): if isinstance(expr, str): return expr elif isinstance(expr, Basic): return repr(expr) else: return str(expr) def _print_Add(self, expr, order=None): terms = self._as_ordered_terms(expr, order=order) PREC = precedence(expr) l = [] for term in terms: t = self._print(term) if t.startswith('-'): sign = "-" t = t[1:] else: sign = "+" if precedence(term) < PREC: l.extend([sign, "(%s)" % t]) else: l.extend([sign, t]) sign = l.pop(0) if sign == '+': sign = "" return sign + ' '.join(l) def _print_BooleanTrue(self, expr): return "True" def _print_BooleanFalse(self, expr): return "False" def _print_Not(self, expr): return '~%s' %(self.parenthesize(expr.args[0],PRECEDENCE["Not"])) def _print_And(self, expr): return self.stringify(expr.args, " & ", PRECEDENCE["BitwiseAnd"]) def _print_Or(self, expr): return self.stringify(expr.args, " | ", PRECEDENCE["BitwiseOr"]) def _print_Xor(self, expr): return self.stringify(expr.args, " ^ ", PRECEDENCE["BitwiseXor"]) def _print_AppliedPredicate(self, expr): return '%s(%s)' % (self._print(expr.func), self._print(expr.arg)) def _print_Basic(self, expr): l = [self._print(o) for o in expr.args] return expr.__class__.__name__ + "(%s)" % ", ".join(l) def _print_BlockMatrix(self, B): if B.blocks.shape == (1, 1): self._print(B.blocks[0, 0]) return self._print(B.blocks) def _print_Catalan(self, expr): return 'Catalan' def _print_ComplexInfinity(self, expr): return 'zoo' def _print_ConditionSet(self, s): args = tuple([self._print(i) for i in (s.sym, s.condition)]) if s.base_set is S.UniversalSet: return 'ConditionSet(%s, %s)' % args args += (self._print(s.base_set),) return 'ConditionSet(%s, %s, %s)' % args def _print_Derivative(self, expr): dexpr = expr.expr dvars = [i[0] if i[1] == 1 else i for i in expr.variable_count] return 'Derivative(%s)' % ", ".join(map(lambda arg: self._print(arg), [dexpr] + dvars)) def _print_dict(self, d): keys = sorted(d.keys(), key=default_sort_key) items = [] for key in keys: item = "%s: %s" % (self._print(key), self._print(d[key])) items.append(item) return "{%s}" % ", ".join(items) def _print_Dict(self, expr): return self._print_dict(expr) def _print_RandomDomain(self, d): if hasattr(d, 'as_boolean'): return 'Domain: ' + self._print(d.as_boolean()) elif hasattr(d, 'set'): return ('Domain: ' + self._print(d.symbols) + ' in ' + self._print(d.set)) else: return 'Domain on ' + self._print(d.symbols) def _print_Dummy(self, expr): return '_' + expr.name def _print_EulerGamma(self, expr): return 'EulerGamma' def _print_Exp1(self, expr): return 'E' def _print_ExprCondPair(self, expr): return '(%s, %s)' % (self._print(expr.expr), self._print(expr.cond)) def _print_Function(self, expr): return expr.func.__name__ + "(%s)" % self.stringify(expr.args, ", ") def _print_GoldenRatio(self, expr): return 'GoldenRatio' def _print_TribonacciConstant(self, expr): return 'TribonacciConstant' def _print_ImaginaryUnit(self, expr): return 'I' def _print_Infinity(self, expr): return 'oo' def _print_Integral(self, expr): def _xab_tostr(xab): if len(xab) == 1: return self._print(xab[0]) else: return self._print((xab[0],) + tuple(xab[1:])) L = ', '.join([_xab_tostr(l) for l in expr.limits]) return 'Integral(%s, %s)' % (self._print(expr.function), L) def _print_Interval(self, i): fin = 'Interval{m}({a}, {b})' a, b, l, r = i.args if a.is_infinite and b.is_infinite: m = '' elif a.is_infinite and not r: m = '' elif b.is_infinite and not l: m = '' elif not l and not r: m = '' elif l and r: m = '.open' elif l: m = '.Lopen' else: m = '.Ropen' return fin.format(**{'a': a, 'b': b, 'm': m}) def _print_AccumulationBounds(self, i): return "AccumBounds(%s, %s)" % (self._print(i.min), self._print(i.max)) def _print_Inverse(self, I): return "%s**(-1)" % self.parenthesize(I.arg, PRECEDENCE["Pow"]) def _print_Lambda(self, obj): expr = obj.expr sig = obj.signature if len(sig) == 1 and sig[0].is_symbol: sig = sig[0] return "Lambda(%s, %s)" % (self._print(sig), self._print(expr)) def _print_LatticeOp(self, expr): args = sorted(expr.args, key=default_sort_key) return expr.func.__name__ + "(%s)" % ", ".join(self._print(arg) for arg in args) def _print_Limit(self, expr): e, z, z0, dir = expr.args if str(dir) == "+": return "Limit(%s, %s, %s)" % tuple(map(self._print, (e, z, z0))) else: return "Limit(%s, %s, %s, dir='%s')" % tuple(map(self._print, (e, z, z0, dir))) def _print_list(self, expr): return "[%s]" % self.stringify(expr, ", ") def _print_MatrixBase(self, expr): return expr._format_str(self) def _print_MatrixElement(self, expr): return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) \ + '[%s, %s]' % (self._print(expr.i), self._print(expr.j)) def _print_MatrixSlice(self, expr): def strslice(x, dim): x = list(x) if x[2] == 1: del x[2] if x[0] == 0: x[0] = '' if x[1] == dim: x[1] = '' return ':'.join(map(lambda arg: self._print(arg), x)) return (self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) + '[' + strslice(expr.rowslice, expr.parent.rows) + ', ' + strslice(expr.colslice, expr.parent.cols) + ']') def _print_DeferredVector(self, expr): return expr.name def _print_Mul(self, expr): prec = precedence(expr) # Check for unevaluated Mul. In this case we need to make sure the # identities are visible, multiple Rational factors are not combined # etc so we display in a straight-forward form that fully preserves all # args and their order. args = expr.args if args[0] is S.One or any(isinstance(arg, Number) for arg in args[1:]): factors = [self.parenthesize(a, prec, strict=False) for a in args] return '*'.join(factors) c, e = expr.as_coeff_Mul() if c < 0: expr = _keep_coeff(-c, e) sign = "-" else: sign = "" a = [] # items in the numerator b = [] # items that are in the denominator (if any) pow_paren = [] # Will collect all pow with more than one base element and exp = -1 if self.order not in ('old', 'none'): args = expr.as_ordered_factors() else: # use make_args in case expr was something like -x -> x args = Mul.make_args(expr) # Gather args for numerator/denominator for item in args: if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative: if item.exp != -1: b.append(Pow(item.base, -item.exp, evaluate=False)) else: if len(item.args[0].args) != 1 and isinstance(item.base, Mul): # To avoid situations like #14160 pow_paren.append(item) b.append(Pow(item.base, -item.exp)) elif item.is_Rational and item is not S.Infinity: if item.p != 1: a.append(Rational(item.p)) if item.q != 1: b.append(Rational(item.q)) else: a.append(item) a = a or [S.One] a_str = [self.parenthesize(x, prec, strict=False) for x in a] b_str = [self.parenthesize(x, prec, strict=False) for x in b] # To parenthesize Pow with exp = -1 and having more than one Symbol for item in pow_paren: if item.base in b: b_str[b.index(item.base)] = "(%s)" % b_str[b.index(item.base)] if not b: return sign + '*'.join(a_str) elif len(b) == 1: return sign + '*'.join(a_str) + "/" + b_str[0] else: return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str) def _print_MatMul(self, expr): c, m = expr.as_coeff_mmul() sign = "" if c.is_number: re, im = c.as_real_imag() if im.is_zero and re.is_negative: expr = _keep_coeff(-c, m) sign = "-" elif re.is_zero and im.is_negative: expr = _keep_coeff(-c, m) sign = "-" return sign + '*'.join( [self.parenthesize(arg, precedence(expr)) for arg in expr.args] ) def _print_ElementwiseApplyFunction(self, expr): return "{}.({})".format( expr.function, self._print(expr.expr), ) def _print_NaN(self, expr): return 'nan' def _print_NegativeInfinity(self, expr): return '-oo' def _print_Order(self, expr): if not expr.variables or all(p is S.Zero for p in expr.point): if len(expr.variables) <= 1: return 'O(%s)' % self._print(expr.expr) else: return 'O(%s)' % self.stringify((expr.expr,) + expr.variables, ', ', 0) else: return 'O(%s)' % self.stringify(expr.args, ', ', 0) def _print_Ordinal(self, expr): return expr.__str__() def _print_Cycle(self, expr): return expr.__str__() def _print_Permutation(self, expr): from sympy.combinatorics.permutations import Permutation, Cycle from sympy.utilities.exceptions import SymPyDeprecationWarning perm_cyclic = Permutation.print_cyclic if perm_cyclic is not None: SymPyDeprecationWarning( feature="Permutation.print_cyclic = {}".format(perm_cyclic), useinstead="init_printing(perm_cyclic={})" .format(perm_cyclic), issue=15201, deprecated_since_version="1.6").warn() else: perm_cyclic = self._settings.get("perm_cyclic", True) if perm_cyclic: if not expr.size: return '()' # before taking Cycle notation, see if the last element is # a singleton and move it to the head of the string s = Cycle(expr)(expr.size - 1).__repr__()[len('Cycle'):] last = s.rfind('(') if not last == 0 and ',' not in s[last:]: s = s[last:] + s[:last] s = s.replace(',', '') return s else: s = expr.support() if not s: if expr.size < 5: return 'Permutation(%s)' % self._print(expr.array_form) return 'Permutation([], size=%s)' % self._print(expr.size) trim = self._print(expr.array_form[:s[-1] + 1]) + ', size=%s' % self._print(expr.size) use = full = self._print(expr.array_form) if len(trim) < len(full): use = trim return 'Permutation(%s)' % use def _print_Subs(self, obj): expr, old, new = obj.args if len(obj.point) == 1: old = old[0] new = new[0] return "Subs(%s, %s, %s)" % ( self._print(expr), self._print(old), self._print(new)) def _print_TensorIndex(self, expr): return expr._print() def _print_TensorHead(self, expr): return expr._print() def _print_Tensor(self, expr): return expr._print() def _print_TensMul(self, expr): # prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)" sign, args = expr._get_args_for_traditional_printer() return sign + "*".join( [self.parenthesize(arg, precedence(expr)) for arg in args] ) def _print_TensAdd(self, expr): return expr._print() def _print_PermutationGroup(self, expr): p = [' %s' % self._print(a) for a in expr.args] return 'PermutationGroup([\n%s])' % ',\n'.join(p) def _print_Pi(self, expr): return 'pi' def _print_PolyRing(self, ring): return "Polynomial ring in %s over %s with %s order" % \ (", ".join(map(lambda rs: self._print(rs), ring.symbols)), self._print(ring.domain), self._print(ring.order)) def _print_FracField(self, field): return "Rational function field in %s over %s with %s order" % \ (", ".join(map(lambda fs: self._print(fs), field.symbols)), self._print(field.domain), self._print(field.order)) def _print_FreeGroupElement(self, elm): return elm.__str__() def _print_GaussianElement(self, poly): return "(%s + %s*I)" % (poly.x, poly.y) def _print_PolyElement(self, poly): return poly.str(self, PRECEDENCE, "%s**%s", "*") def _print_FracElement(self, frac): if frac.denom == 1: return self._print(frac.numer) else: numer = self.parenthesize(frac.numer, PRECEDENCE["Mul"], strict=True) denom = self.parenthesize(frac.denom, PRECEDENCE["Atom"], strict=True) return numer + "/" + denom def _print_Poly(self, expr): ATOM_PREC = PRECEDENCE["Atom"] - 1 terms, gens = [], [ self.parenthesize(s, ATOM_PREC) for s in expr.gens ] for monom, coeff in expr.terms(): s_monom = [] for i, exp in enumerate(monom): if exp > 0: if exp == 1: s_monom.append(gens[i]) else: s_monom.append(gens[i] + "**%d" % exp) s_monom = "*".join(s_monom) if coeff.is_Add: if s_monom: s_coeff = "(" + self._print(coeff) + ")" else: s_coeff = self._print(coeff) else: if s_monom: if coeff is S.One: terms.extend(['+', s_monom]) continue if coeff is S.NegativeOne: terms.extend(['-', s_monom]) continue s_coeff = self._print(coeff) if not s_monom: s_term = s_coeff else: s_term = s_coeff + "*" + s_monom if s_term.startswith('-'): terms.extend(['-', s_term[1:]]) else: terms.extend(['+', s_term]) if terms[0] in ['-', '+']: modifier = terms.pop(0) if modifier == '-': terms[0] = '-' + terms[0] format = expr.__class__.__name__ + "(%s, %s" from sympy.polys.polyerrors import PolynomialError try: format += ", modulus=%s" % expr.get_modulus() except PolynomialError: format += ", domain='%s'" % expr.get_domain() format += ")" for index, item in enumerate(gens): if len(item) > 2 and (item[:1] == "(" and item[len(item) - 1:] == ")"): gens[index] = item[1:len(item) - 1] return format % (' '.join(terms), ', '.join(gens)) def _print_UniversalSet(self, p): return 'UniversalSet' def _print_AlgebraicNumber(self, expr): if expr.is_aliased: return self._print(expr.as_poly().as_expr()) else: return self._print(expr.as_expr()) def _print_Pow(self, expr, rational=False): """Printing helper function for ``Pow`` Parameters ========== rational : bool, optional If ``True``, it will not attempt printing ``sqrt(x)`` or ``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)`` instead. See examples for additional details Examples ======== >>> from sympy.functions import sqrt >>> from sympy.printing.str import StrPrinter >>> from sympy.abc import x How ``rational`` keyword works with ``sqrt``: >>> printer = StrPrinter() >>> printer._print_Pow(sqrt(x), rational=True) 'x**(1/2)' >>> printer._print_Pow(sqrt(x), rational=False) 'sqrt(x)' >>> printer._print_Pow(1/sqrt(x), rational=True) 'x**(-1/2)' >>> printer._print_Pow(1/sqrt(x), rational=False) '1/sqrt(x)' Notes ===== ``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy, so there is no need of defining a separate printer for ``sqrt``. Instead, it should be handled here as well. """ PREC = precedence(expr) if expr.exp is S.Half and not rational: return "sqrt(%s)" % self._print(expr.base) if expr.is_commutative: if -expr.exp is S.Half and not rational: # Note: Don't test "expr.exp == -S.Half" here, because that will # match -0.5, which we don't want. return "%s/sqrt(%s)" % tuple(map(lambda arg: self._print(arg), (S.One, expr.base))) if expr.exp is -S.One: # Similarly to the S.Half case, don't test with "==" here. return '%s/%s' % (self._print(S.One), self.parenthesize(expr.base, PREC, strict=False)) e = self.parenthesize(expr.exp, PREC, strict=False) if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1: # the parenthesized exp should be '(Rational(a, b))' so strip parens, # but just check to be sure. if e.startswith('(Rational'): return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1]) return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e) def _print_UnevaluatedExpr(self, expr): return self._print(expr.args[0]) def _print_MatPow(self, expr): PREC = precedence(expr) return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), self.parenthesize(expr.exp, PREC, strict=False)) def _print_Integer(self, expr): if self._settings.get("sympy_integers", False): return "S(%s)" % (expr) return str(expr.p) def _print_Integers(self, expr): return 'Integers' def _print_Naturals(self, expr): return 'Naturals' def _print_Naturals0(self, expr): return 'Naturals0' def _print_Rationals(self, expr): return 'Rationals' def _print_Reals(self, expr): return 'Reals' def _print_Complexes(self, expr): return 'Complexes' def _print_EmptySet(self, expr): return 'EmptySet' def _print_EmptySequence(self, expr): return 'EmptySequence' def _print_int(self, expr): return str(expr) def _print_mpz(self, expr): return str(expr) def _print_Rational(self, expr): if expr.q == 1: return str(expr.p) else: if self._settings.get("sympy_integers", False): return "S(%s)/%s" % (expr.p, expr.q) return "%s/%s" % (expr.p, expr.q) def _print_PythonRational(self, expr): if expr.q == 1: return str(expr.p) else: return "%d/%d" % (expr.p, expr.q) def _print_Fraction(self, expr): if expr.denominator == 1: return str(expr.numerator) else: return "%s/%s" % (expr.numerator, expr.denominator) def _print_mpq(self, expr): if expr.denominator == 1: return str(expr.numerator) else: return "%s/%s" % (expr.numerator, expr.denominator) def _print_Float(self, expr): prec = expr._prec if prec < 5: dps = 0 else: dps = prec_to_dps(expr._prec) if self._settings["full_prec"] is True: strip = False elif self._settings["full_prec"] is False: strip = True elif self._settings["full_prec"] == "auto": strip = self._print_level > 1 low = self._settings["min"] if "min" in self._settings else None high = self._settings["max"] if "max" in self._settings else None rv = mlib_to_str(expr._mpf_, dps, strip_zeros=strip, min_fixed=low, max_fixed=high) if rv.startswith('-.0'): rv = '-0.' + rv[3:] elif rv.startswith('.0'): rv = '0.' + rv[2:] if rv.startswith('+'): # e.g., +inf -> inf rv = rv[1:] return rv def _print_Relational(self, expr): charmap = { "==": "Eq", "!=": "Ne", ":=": "Assignment", '+=': "AddAugmentedAssignment", "-=": "SubAugmentedAssignment", "*=": "MulAugmentedAssignment", "/=": "DivAugmentedAssignment", "%=": "ModAugmentedAssignment", } if expr.rel_op in charmap: return '%s(%s, %s)' % (charmap[expr.rel_op], self._print(expr.lhs), self._print(expr.rhs)) return '%s %s %s' % (self.parenthesize(expr.lhs, precedence(expr)), self._relationals.get(expr.rel_op) or expr.rel_op, self.parenthesize(expr.rhs, precedence(expr))) def _print_ComplexRootOf(self, expr): return "CRootOf(%s, %d)" % (self._print_Add(expr.expr, order='lex'), expr.index) def _print_RootSum(self, expr): args = [self._print_Add(expr.expr, order='lex')] if expr.fun is not S.IdentityFunction: args.append(self._print(expr.fun)) return "RootSum(%s)" % ", ".join(args) def _print_GroebnerBasis(self, basis): cls = basis.__class__.__name__ exprs = [self._print_Add(arg, order=basis.order) for arg in basis.exprs] exprs = "[%s]" % ", ".join(exprs) gens = [ self._print(gen) for gen in basis.gens ] domain = "domain='%s'" % self._print(basis.domain) order = "order='%s'" % self._print(basis.order) args = [exprs] + gens + [domain, order] return "%s(%s)" % (cls, ", ".join(args)) def _print_set(self, s): items = sorted(s, key=default_sort_key) args = ', '.join(self._print(item) for item in items) if not args: return "set()" return '{%s}' % args def _print_frozenset(self, s): if not s: return "frozenset()" return "frozenset(%s)" % self._print_set(s) def _print_Sum(self, expr): def _xab_tostr(xab): if len(xab) == 1: return self._print(xab[0]) else: return self._print((xab[0],) + tuple(xab[1:])) L = ', '.join([_xab_tostr(l) for l in expr.limits]) return 'Sum(%s, %s)' % (self._print(expr.function), L) def _print_Symbol(self, expr): return expr.name _print_MatrixSymbol = _print_Symbol _print_RandomSymbol = _print_Symbol def _print_Identity(self, expr): return "I" def _print_ZeroMatrix(self, expr): return "0" def _print_OneMatrix(self, expr): return "1" def _print_Predicate(self, expr): return "Q.%s" % expr.name def _print_str(self, expr): return str(expr) def _print_tuple(self, expr): if len(expr) == 1: return "(%s,)" % self._print(expr[0]) else: return "(%s)" % self.stringify(expr, ", ") def _print_Tuple(self, expr): return self._print_tuple(expr) def _print_Transpose(self, T): return "%s.T" % self.parenthesize(T.arg, PRECEDENCE["Pow"]) def _print_Uniform(self, expr): return "Uniform(%s, %s)" % (self._print(expr.a), self._print(expr.b)) def _print_Quantity(self, expr): if self._settings.get("abbrev", False): return "%s" % expr.abbrev return "%s" % expr.name def _print_Quaternion(self, expr): s = [self.parenthesize(i, PRECEDENCE["Mul"], strict=True) for i in expr.args] a = [s[0]] + [i+"*"+j for i, j in zip(s[1:], "ijk")] return " + ".join(a) def _print_Dimension(self, expr): return str(expr) def _print_Wild(self, expr): return expr.name + '_' def _print_WildFunction(self, expr): return expr.name + '_' def _print_WildDot(self, expr): return expr.name + '_' def _print_WildPlus(self, expr): return expr.name + '__' def _print_WildStar(self, expr): return expr.name + '___' def _print_Zero(self, expr): if self._settings.get("sympy_integers", False): return "S(0)" return "0" def _print_DMP(self, p): from sympy.core.sympify import SympifyError try: if p.ring is not None: # TODO incorporate order return self._print(p.ring.to_sympy(p)) except SympifyError: pass cls = p.__class__.__name__ rep = self._print(p.rep) dom = self._print(p.dom) ring = self._print(p.ring) return "%s(%s, %s, %s)" % (cls, rep, dom, ring) def _print_DMF(self, expr): return self._print_DMP(expr) def _print_Object(self, obj): return 'Object("%s")' % obj.name def _print_IdentityMorphism(self, morphism): return 'IdentityMorphism(%s)' % morphism.domain def _print_NamedMorphism(self, morphism): return 'NamedMorphism(%s, %s, "%s")' % \ (morphism.domain, morphism.codomain, morphism.name) def _print_Category(self, category): return 'Category("%s")' % category.name def _print_Manifold(self, manifold): return manifold.name.name def _print_Patch(self, patch): return patch.name.name def _print_CoordSystem(self, coords): return coords.name.name def _print_BaseScalarField(self, field): return field._coord_sys.symbols[field._index].name def _print_BaseVectorField(self, field): return 'e_%s' % field._coord_sys.symbols[field._index].name def _print_Differential(self, diff): field = diff._form_field if hasattr(field, '_coord_sys'): return 'd%s' % field._coord_sys.symbols[field._index].name else: return 'd(%s)' % self._print(field) def _print_Tr(self, expr): #TODO : Handle indices return "%s(%s)" % ("Tr", self._print(expr.args[0])) def _print_Str(self, s): return self._print(s.name) @print_function(StrPrinter) def sstr(expr, **settings): """Returns the expression as a string. For large expressions where speed is a concern, use the setting order='none'. If abbrev=True setting is used then units are printed in abbreviated form. Examples ======== >>> from sympy import symbols, Eq, sstr >>> a, b = symbols('a b') >>> sstr(Eq(a + b, 0)) 'Eq(a + b, 0)' """ p = StrPrinter(settings) s = p.doprint(expr) return s class StrReprPrinter(StrPrinter): """(internal) -- see sstrrepr""" def _print_str(self, s): return repr(s) def _print_Str(self, s): # Str does not to be printed same as str here return "%s(%s)" % (s.__class__.__name__, self._print(s.name)) @print_function(StrReprPrinter) def sstrrepr(expr, **settings): """return expr in mixed str/repr form i.e. strings are returned in repr form with quotes, and everything else is returned in str form. This function could be useful for hooking into sys.displayhook """ p = StrReprPrinter(settings) s = p.doprint(expr) return s
b1084d60885fdbeb4b570f5dbcae36fe3ce67f648d1ac1a77ce253ebfc9df98a
""" C++ code printer """ from itertools import chain from sympy.codegen.ast import Type, none from .c import C89CodePrinter, C99CodePrinter # These are defined in the other file so we can avoid importing sympy.codegen # from the top-level 'import sympy'. Export them here as well. from sympy.printing.codeprinter import cxxcode # noqa:F401 # from http://en.cppreference.com/w/cpp/keyword reserved = { 'C++98': [ 'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor', 'bool', 'break', 'case', 'catch,', 'char', 'class', 'compl', 'const', 'const_cast', 'continue', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if', 'inline', 'int', 'long', 'mutable', 'namespace', 'new', 'not', 'not_eq', 'operator', 'or', 'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'return', 'short', 'signed', 'sizeof', 'static', 'static_cast', 'struct', 'switch', 'template', 'this', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq' ] } reserved['C++11'] = reserved['C++98'][:] + [ 'alignas', 'alignof', 'char16_t', 'char32_t', 'constexpr', 'decltype', 'noexcept', 'nullptr', 'static_assert', 'thread_local' ] reserved['C++17'] = reserved['C++11'][:] reserved['C++17'].remove('register') # TM TS: atomic_cancel, atomic_commit, atomic_noexcept, synchronized # concepts TS: concept, requires # module TS: import, module _math_functions = { 'C++98': { 'Mod': 'fmod', 'ceiling': 'ceil', }, 'C++11': { 'gamma': 'tgamma', }, 'C++17': { 'beta': 'beta', 'Ei': 'expint', 'zeta': 'riemann_zeta', } } # from http://en.cppreference.com/w/cpp/header/cmath for k in ('Abs', 'exp', 'log', 'log10', 'sqrt', 'sin', 'cos', 'tan', # 'Pow' 'asin', 'acos', 'atan', 'atan2', 'sinh', 'cosh', 'tanh', 'floor'): _math_functions['C++98'][k] = k.lower() for k in ('asinh', 'acosh', 'atanh', 'erf', 'erfc'): _math_functions['C++11'][k] = k.lower() def _attach_print_method(cls, sympy_name, func_name): meth_name = '_print_%s' % sympy_name if hasattr(cls, meth_name): raise ValueError("Edit method (or subclass) instead of overwriting.") def _print_method(self, expr): return '{}{}({})'.format(self._ns, func_name, ', '.join(map(self._print, expr.args))) _print_method.__doc__ = "Prints code for %s" % k setattr(cls, meth_name, _print_method) def _attach_print_methods(cls, cont): for sympy_name, cxx_name in cont[cls.standard].items(): _attach_print_method(cls, sympy_name, cxx_name) class _CXXCodePrinterBase: printmethod = "_cxxcode" language = 'C++' _ns = 'std::' # namespace def __init__(self, settings=None): super().__init__(settings or {}) def _print_Max(self, expr): from sympy import Max if len(expr.args) == 1: return self._print(expr.args[0]) return "%smax(%s, %s)" % (self._ns, self._print(expr.args[0]), self._print(Max(*expr.args[1:]))) def _print_Min(self, expr): from sympy import Min if len(expr.args) == 1: return self._print(expr.args[0]) return "%smin(%s, %s)" % (self._ns, self._print(expr.args[0]), self._print(Min(*expr.args[1:]))) def _print_using(self, expr): if expr.alias == none: return 'using %s' % expr.type else: raise ValueError("C++98 does not support type aliases") class CXX98CodePrinter(_CXXCodePrinterBase, C89CodePrinter): standard = 'C++98' reserved_words = set(reserved['C++98']) # _attach_print_methods(CXX98CodePrinter, _math_functions) class CXX11CodePrinter(_CXXCodePrinterBase, C99CodePrinter): standard = 'C++11' reserved_words = set(reserved['C++11']) type_mappings = dict(chain( CXX98CodePrinter.type_mappings.items(), { Type('int8'): ('int8_t', {'cstdint'}), Type('int16'): ('int16_t', {'cstdint'}), Type('int32'): ('int32_t', {'cstdint'}), Type('int64'): ('int64_t', {'cstdint'}), Type('uint8'): ('uint8_t', {'cstdint'}), Type('uint16'): ('uint16_t', {'cstdint'}), Type('uint32'): ('uint32_t', {'cstdint'}), Type('uint64'): ('uint64_t', {'cstdint'}), Type('complex64'): ('std::complex<float>', {'complex'}), Type('complex128'): ('std::complex<double>', {'complex'}), Type('bool'): ('bool', None), }.items() )) def _print_using(self, expr): if expr.alias == none: return super()._print_using(expr) else: return 'using %(alias)s = %(type)s' % expr.kwargs(apply=self._print) # _attach_print_methods(CXX11CodePrinter, _math_functions) class CXX17CodePrinter(_CXXCodePrinterBase, C99CodePrinter): standard = 'C++17' reserved_words = set(reserved['C++17']) _kf = dict(C99CodePrinter._kf, **_math_functions['C++17']) def _print_beta(self, expr): return self._print_math_func(expr) def _print_Ei(self, expr): return self._print_math_func(expr) def _print_zeta(self, expr): return self._print_math_func(expr) # _attach_print_methods(CXX17CodePrinter, _math_functions) cxx_code_printers = { 'c++98': CXX98CodePrinter, 'c++11': CXX11CodePrinter, 'c++17': CXX17CodePrinter }
1e1de66c2b92053bfd1adecaa55b718aacf497d94cf2f4056b2510b4b36fd63d
""" A few practical conventions common to all printers. """ import re from collections.abc import Iterable from sympy import Derivative _name_with_digits_p = re.compile(r'^([a-zA-Z]+)([0-9]+)$') def split_super_sub(text): """Split a symbol name into a name, superscripts and subscripts The first part of the symbol name is considered to be its actual 'name', followed by super- and subscripts. Each superscript is preceded with a "^" character or by "__". Each subscript is preceded by a "_" character. The three return values are the actual name, a list with superscripts and a list with subscripts. Examples ======== >>> from sympy.printing.conventions import split_super_sub >>> split_super_sub('a_x^1') ('a', ['1'], ['x']) >>> split_super_sub('var_sub1__sup_sub2') ('var', ['sup'], ['sub1', 'sub2']) """ if not text: return text, [], [] pos = 0 name = None supers = [] subs = [] while pos < len(text): start = pos + 1 if text[pos:pos + 2] == "__": start += 1 pos_hat = text.find("^", start) if pos_hat < 0: pos_hat = len(text) pos_usc = text.find("_", start) if pos_usc < 0: pos_usc = len(text) pos_next = min(pos_hat, pos_usc) part = text[pos:pos_next] pos = pos_next if name is None: name = part elif part.startswith("^"): supers.append(part[1:]) elif part.startswith("__"): supers.append(part[2:]) elif part.startswith("_"): subs.append(part[1:]) else: raise RuntimeError("This should never happen.") # make a little exception when a name ends with digits, i.e. treat them # as a subscript too. m = _name_with_digits_p.match(name) if m: name, sub = m.groups() subs.insert(0, sub) return name, supers, subs def requires_partial(expr): """Return whether a partial derivative symbol is required for printing This requires checking how many free variables there are, filtering out the ones that are integers. Some expressions don't have free variables. In that case, check its variable list explicitly to get the context of the expression. """ if isinstance(expr, Derivative): return requires_partial(expr.expr) if not isinstance(expr.free_symbols, Iterable): return len(set(expr.variables)) > 1 return sum(not s.is_integer for s in expr.free_symbols) > 1
afe809a38c09935e55e5bee72b05227a46487b928b340aec0f9e5ef8bf1fa128
"""Printing subsystem driver SymPy's printing system works the following way: Any expression can be passed to a designated Printer who then is responsible to return an adequate representation of that expression. **The basic concept is the following:** 1. Let the object print itself if it knows how. 2. Take the best fitting method defined in the printer. 3. As fall-back use the emptyPrinter method for the printer. Which Method is Responsible for Printing? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The whole printing process is started by calling ``.doprint(expr)`` on the printer which you want to use. This method looks for an appropriate method which can print the given expression in the given style that the printer defines. While looking for the method, it follows these steps: 1. **Let the object print itself if it knows how.** The printer looks for a specific method in every object. The name of that method depends on the specific printer and is defined under ``Printer.printmethod``. For example, StrPrinter calls ``_sympystr`` and LatexPrinter calls ``_latex``. Look at the documentation of the printer that you want to use. The name of the method is specified there. This was the original way of doing printing in sympy. Every class had its own latex, mathml, str and repr methods, but it turned out that it is hard to produce a high quality printer, if all the methods are spread out that far. Therefore all printing code was combined into the different printers, which works great for built-in sympy objects, but not that good for user defined classes where it is inconvenient to patch the printers. 2. **Take the best fitting method defined in the printer.** The printer loops through expr classes (class + its bases), and tries to dispatch the work to ``_print_<EXPR_CLASS>`` e.g., suppose we have the following class hierarchy:: Basic | Atom | Number | Rational then, for ``expr=Rational(...)``, the Printer will try to call printer methods in the order as shown in the figure below:: p._print(expr) | |-- p._print_Rational(expr) | |-- p._print_Number(expr) | |-- p._print_Atom(expr) | `-- p._print_Basic(expr) if ``._print_Rational`` method exists in the printer, then it is called, and the result is returned back. Otherwise, the printer tries to call ``._print_Number`` and so on. 3. **As a fall-back use the emptyPrinter method for the printer.** As fall-back ``self.emptyPrinter`` will be called with the expression. If not defined in the Printer subclass this will be the same as ``str(expr)``. .. _printer_example: Example of Custom Printer ^^^^^^^^^^^^^^^^^^^^^^^^^ In the example below, we have a printer which prints the derivative of a function in a shorter form. .. code-block:: python from sympy import Symbol from sympy.printing.latex import LatexPrinter, print_latex from sympy.core.function import UndefinedFunction, Function class MyLatexPrinter(LatexPrinter): \"\"\"Print derivative of a function of symbols in a shorter form. \"\"\" def _print_Derivative(self, expr): function, *vars = expr.args if not isinstance(type(function), UndefinedFunction) or \\ not all(isinstance(i, Symbol) for i in vars): return super()._print_Derivative(expr) # If you want the printer to work correctly for nested # expressions then use self._print() instead of str() or latex(). # See the example of nested modulo below in the custom printing # method section. return "{}_{{{}}}".format( self._print(Symbol(function.func.__name__)), ''.join(self._print(i) for i in vars)) def print_my_latex(expr): \"\"\" Most of the printers define their own wrappers for print(). These wrappers usually take printer settings. Our printer does not have any settings. \"\"\" print(MyLatexPrinter().doprint(expr)) y = Symbol("y") x = Symbol("x") f = Function("f") expr = f(x, y).diff(x, y) # Print the expression using the normal latex printer and our custom # printer. print_latex(expr) print_my_latex(expr) The output of the code above is:: \\frac{\\partial^{2}}{\\partial x\\partial y} f{\\left(x,y \\right)} f_{xy} .. _printer_method_example: Example of Custom Printing Method ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the example below, the latex printing of the modulo operator is modified. This is done by overriding the method ``_latex`` of ``Mod``. >>> from sympy import Symbol, Mod, Integer >>> from sympy.printing.latex import print_latex >>> # Always use printer._print() >>> class ModOp(Mod): ... def _latex(self, printer): ... a, b = [printer._print(i) for i in self.args] ... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b) Comparing the output of our custom operator to the builtin one: >>> x = Symbol('x') >>> m = Symbol('m') >>> print_latex(Mod(x, m)) x\\bmod{m} >>> print_latex(ModOp(x, m)) \\operatorname{Mod}{\\left( x,m \\right)} Common mistakes ~~~~~~~~~~~~~~~ It's important to always use ``self._print(obj)`` to print subcomponents of an expression when customizing a printer. Mistakes include: 1. Using ``self.doprint(obj)`` instead: >>> # This example does not work properly, as only the outermost call may use >>> # doprint. >>> class ModOpModeWrong(Mod): ... def _latex(self, printer): ... a, b = [printer.doprint(i) for i in self.args] ... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b) This fails when the `mode` argument is passed to the printer: >>> print_latex(ModOp(x, m), mode='inline') # ok $\\operatorname{Mod}{\\left( x,m \\right)}$ >>> print_latex(ModOpModeWrong(x, m), mode='inline') # bad $\\operatorname{Mod}{\\left( $x$,$m$ \\right)}$ 2. Using ``str(obj)`` instead: >>> class ModOpNestedWrong(Mod): ... def _latex(self, printer): ... a, b = [str(i) for i in self.args] ... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b) This fails on nested objects: >>> # Nested modulo. >>> print_latex(ModOp(ModOp(x, m), Integer(7))) # ok \\operatorname{Mod}{\\left( \\operatorname{Mod}{\\left( x,m \\right)},7 \\right)} >>> print_latex(ModOpNestedWrong(ModOpNestedWrong(x, m), Integer(7))) # bad \\operatorname{Mod}{\\left( ModOpNestedWrong(x, m),7 \\right)} 3. Using ``LatexPrinter()._print(obj)`` instead. >>> from sympy.printing.latex import LatexPrinter >>> class ModOpSettingsWrong(Mod): ... def _latex(self, printer): ... a, b = [LatexPrinter()._print(i) for i in self.args] ... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b) This causes all the settings to be discarded in the subobjects. As an example, the ``full_prec`` setting which shows floats to full precision is ignored: >>> from sympy import Float >>> print_latex(ModOp(Float(1) * x, m), full_prec=True) # ok \\operatorname{Mod}{\\left( 1.00000000000000 x,m \\right)} >>> print_latex(ModOpSettingsWrong(Float(1) * x, m), full_prec=True) # bad \\operatorname{Mod}{\\left( 1.0 x,m \\right)} """ from typing import Any, Dict, Type import inspect from contextlib import contextmanager from functools import cmp_to_key, update_wrapper from sympy import Basic, Add from sympy.core.core import BasicMeta from sympy.core.function import AppliedUndef, UndefinedFunction, Function @contextmanager def printer_context(printer, **kwargs): original = printer._context.copy() try: printer._context.update(kwargs) yield finally: printer._context = original class Printer: """ Generic printer Its job is to provide infrastructure for implementing new printers easily. If you want to define your custom Printer or your custom printing method for your custom class then see the example above: printer_example_ . """ _global_settings = {} # type: Dict[str, Any] _default_settings = {} # type: Dict[str, Any] printmethod = None # type: str @classmethod def _get_initial_settings(cls): settings = cls._default_settings.copy() for key, val in cls._global_settings.items(): if key in cls._default_settings: settings[key] = val return settings def __init__(self, settings=None): self._str = str self._settings = self._get_initial_settings() self._context = dict() # mutable during printing if settings is not None: self._settings.update(settings) if len(self._settings) > len(self._default_settings): for key in self._settings: if key not in self._default_settings: raise TypeError("Unknown setting '%s'." % key) # _print_level is the number of times self._print() was recursively # called. See StrPrinter._print_Float() for an example of usage self._print_level = 0 @classmethod def set_global_settings(cls, **settings): """Set system-wide printing settings. """ for key, val in settings.items(): if val is not None: cls._global_settings[key] = val @property def order(self): if 'order' in self._settings: return self._settings['order'] else: raise AttributeError("No order defined.") def doprint(self, expr): """Returns printer's representation for expr (as a string)""" return self._str(self._print(expr)) def _print(self, expr, **kwargs): """Internal dispatcher Tries the following concepts to print an expression: 1. Let the object print itself if it knows how. 2. Take the best fitting method defined in the printer. 3. As fall-back use the emptyPrinter method for the printer. """ self._print_level += 1 try: # If the printer defines a name for a printing method # (Printer.printmethod) and the object knows for itself how it # should be printed, use that method. if (self.printmethod and hasattr(expr, self.printmethod) and not isinstance(expr, BasicMeta)): return getattr(expr, self.printmethod)(self, **kwargs) # See if the class of expr is known, or if one of its super # classes is known, and use that print function # Exception: ignore the subclasses of Undefined, so that, e.g., # Function('gamma') does not get dispatched to _print_gamma classes = type(expr).__mro__ if AppliedUndef in classes: classes = classes[classes.index(AppliedUndef):] if UndefinedFunction in classes: classes = classes[classes.index(UndefinedFunction):] # Another exception: if someone subclasses a known function, e.g., # gamma, and changes the name, then ignore _print_gamma if Function in classes: i = classes.index(Function) classes = tuple(c for c in classes[:i] if \ c.__name__ == classes[0].__name__ or \ c.__name__.endswith("Base")) + classes[i:] for cls in classes: printmethod = '_print_' + cls.__name__ if hasattr(self, printmethod): return getattr(self, printmethod)(expr, **kwargs) # Unknown object, fall back to the emptyPrinter. return self.emptyPrinter(expr) finally: self._print_level -= 1 def emptyPrinter(self, expr): return str(expr) def _as_ordered_terms(self, expr, order=None): """A compatibility function for ordering terms in Add. """ order = order or self.order if order == 'old': return sorted(Add.make_args(expr), key=cmp_to_key(Basic._compare_pretty)) elif order == 'none': return list(expr.args) else: return expr.as_ordered_terms(order=order) class _PrintFunction: """ Function wrapper to replace ``**settings`` in the signature with printer defaults """ def __init__(self, f, print_cls: Type[Printer]): # find all the non-setting arguments params = list(inspect.signature(f).parameters.values()) assert params.pop(-1).kind == inspect.Parameter.VAR_KEYWORD self.__other_params = params self.__print_cls = print_cls update_wrapper(self, f) def __reduce__(self): # Since this is used as a decorator, it replaces the original function. # The default pickling will try to pickle self.__wrapped__ and fail # because the wrapped function can't be retrieved by name. return self.__wrapped__.__qualname__ def __repr__(self) -> str: return repr(self.__wrapped__) # type:ignore def __call__(self, *args, **kwargs): return self.__wrapped__(*args, **kwargs) @property def __signature__(self) -> inspect.Signature: settings = self.__print_cls._get_initial_settings() return inspect.Signature( parameters=self.__other_params + [ inspect.Parameter(k, inspect.Parameter.KEYWORD_ONLY, default=v) for k, v in settings.items() ], return_annotation=self.__wrapped__.__annotations__.get('return', inspect.Signature.empty) # type:ignore ) def print_function(print_cls): """ A decorator to replace kwargs with the printer settings in __signature__ """ def decorator(f): return _PrintFunction(f, print_cls) return decorator
ea732e9df28c4cfc417f2ce1983297bfcd9c61e41cc4bee5937cd83194d08b3f
from distutils.version import LooseVersion as V from collections.abc import Iterable from sympy import Mul, S from sympy.codegen.cfunctions import Sqrt from sympy.external import import_module from sympy.printing.precedence import PRECEDENCE from sympy.printing.pycode import AbstractPythonCodePrinter import sympy tensorflow = import_module('tensorflow') class TensorflowPrinter(AbstractPythonCodePrinter): """ Tensorflow printer which handles vectorized piecewise functions, logical operators, max/min, and relational operators. """ printmethod = "_tensorflowcode" mapping = { sympy.Abs: "tensorflow.math.abs", sympy.sign: "tensorflow.math.sign", # XXX May raise error for ints. sympy.ceiling: "tensorflow.math.ceil", sympy.floor: "tensorflow.math.floor", sympy.log: "tensorflow.math.log", sympy.exp: "tensorflow.math.exp", Sqrt: "tensorflow.math.sqrt", sympy.cos: "tensorflow.math.cos", sympy.acos: "tensorflow.math.acos", sympy.sin: "tensorflow.math.sin", sympy.asin: "tensorflow.math.asin", sympy.tan: "tensorflow.math.tan", sympy.atan: "tensorflow.math.atan", sympy.atan2: "tensorflow.math.atan2", # XXX Also may give NaN for complex results. sympy.cosh: "tensorflow.math.cosh", sympy.acosh: "tensorflow.math.acosh", sympy.sinh: "tensorflow.math.sinh", sympy.asinh: "tensorflow.math.asinh", sympy.tanh: "tensorflow.math.tanh", sympy.atanh: "tensorflow.math.atanh", sympy.re: "tensorflow.math.real", sympy.im: "tensorflow.math.imag", sympy.arg: "tensorflow.math.angle", # XXX May raise error for ints and complexes sympy.erf: "tensorflow.math.erf", sympy.loggamma: "tensorflow.math.lgamma", sympy.Eq: "tensorflow.math.equal", sympy.Ne: "tensorflow.math.not_equal", sympy.StrictGreaterThan: "tensorflow.math.greater", sympy.StrictLessThan: "tensorflow.math.less", sympy.LessThan: "tensorflow.math.less_equal", sympy.GreaterThan: "tensorflow.math.greater_equal", sympy.And: "tensorflow.math.logical_and", sympy.Or: "tensorflow.math.logical_or", sympy.Not: "tensorflow.math.logical_not", sympy.Max: "tensorflow.math.maximum", sympy.Min: "tensorflow.math.minimum", # Matrices sympy.MatAdd: "tensorflow.math.add", sympy.HadamardProduct: "tensorflow.math.multiply", sympy.Trace: "tensorflow.linalg.trace", # XXX May raise error for integer matrices. sympy.Determinant : "tensorflow.linalg.det", } _default_settings = dict( AbstractPythonCodePrinter._default_settings, tensorflow_version=None ) def __init__(self, settings=None): super().__init__(settings) version = self._settings['tensorflow_version'] if version is None and tensorflow: version = tensorflow.__version__ self.tensorflow_version = version def _print_Function(self, expr): op = self.mapping.get(type(expr), None) if op is None: return super()._print_Basic(expr) children = [self._print(arg) for arg in expr.args] if len(children) == 1: return "%s(%s)" % ( self._module_format(op), children[0] ) else: return self._expand_fold_binary_op(op, children) _print_Expr = _print_Function _print_Application = _print_Function _print_MatrixExpr = _print_Function # TODO: a better class structure would avoid this mess: _print_Relational = _print_Function _print_Not = _print_Function _print_And = _print_Function _print_Or = _print_Function _print_HadamardProduct = _print_Function _print_Trace = _print_Function _print_Determinant = _print_Function def _print_Inverse(self, expr): op = self._module_format('tensorflow.linalg.inv') return "{}({})".format(op, self._print(expr.arg)) def _print_Transpose(self, expr): version = self.tensorflow_version if version and V(version) < V('1.14'): op = self._module_format('tensorflow.matrix_transpose') else: op = self._module_format('tensorflow.linalg.matrix_transpose') return "{}({})".format(op, self._print(expr.arg)) def _print_Derivative(self, expr): variables = expr.variables if any(isinstance(i, Iterable) for i in variables): raise NotImplementedError("derivation by multiple variables is not supported") def unfold(expr, args): if not args: return self._print(expr) return "%s(%s, %s)[0]" % ( self._module_format("tensorflow.gradients"), unfold(expr, args[:-1]), self._print(args[-1]), ) return unfold(expr.expr, variables) def _print_Piecewise(self, expr): version = self.tensorflow_version if version and V(version) < V('1.0'): tensorflow_piecewise = "tensorflow.select" else: tensorflow_piecewise = "tensorflow.where" from sympy import Piecewise e, cond = expr.args[0].args if len(expr.args) == 1: return '{}({}, {}, {})'.format( self._module_format(tensorflow_piecewise), self._print(cond), self._print(e), 0) return '{}({}, {}, {})'.format( self._module_format(tensorflow_piecewise), self._print(cond), self._print(e), self._print(Piecewise(*expr.args[1:]))) def _print_Pow(self, expr): # XXX May raise error for # int**float or int**complex or float**complex base, exp = expr.args if expr.exp == S.Half: return "{}({})".format( self._module_format("tensorflow.math.sqrt"), self._print(base)) return "{}({}, {})".format( self._module_format("tensorflow.math.pow"), self._print(base), self._print(exp)) def _print_MatrixBase(self, expr): tensorflow_f = "tensorflow.Variable" if expr.free_symbols else "tensorflow.constant" data = "["+", ".join(["["+", ".join([self._print(j) for j in i])+"]" for i in expr.tolist()])+"]" return "%s(%s)" % ( self._module_format(tensorflow_f), data, ) def _print_MatMul(self, expr): from sympy.matrices.expressions import MatrixExpr mat_args = [arg for arg in expr.args if isinstance(arg, MatrixExpr)] args = [arg for arg in expr.args if arg not in mat_args] if args: return "%s*%s" % ( self.parenthesize(Mul.fromiter(args), PRECEDENCE["Mul"]), self._expand_fold_binary_op( "tensorflow.linalg.matmul", mat_args) ) else: return self._expand_fold_binary_op( "tensorflow.linalg.matmul", mat_args) def _print_MatPow(self, expr): return self._expand_fold_binary_op( "tensorflow.linalg.matmul", [expr.base]*expr.exp) def _print_Assignment(self, expr): # TODO: is this necessary? return "%s = %s" % ( self._print(expr.lhs), self._print(expr.rhs), ) def _print_CodeBlock(self, expr): # TODO: is this necessary? ret = [] for subexpr in expr.args: ret.append(self._print(subexpr)) return "\n".join(ret) def _get_letter_generator_for_einsum(self): for i in range(97, 123): yield chr(i) for i in range(65, 91): yield chr(i) raise ValueError("out of letters") def _print_CodegenArrayTensorProduct(self, expr): letters = self._get_letter_generator_for_einsum() contraction_string = ",".join(["".join([next(letters) for j in range(i)]) for i in expr.subranks]) return '%s("%s", %s)' % ( self._module_format('tensorflow.linalg.einsum'), contraction_string, ", ".join([self._print(arg) for arg in expr.args]) ) def _print_CodegenArrayContraction(self, expr): from sympy.codegen.array_utils import CodegenArrayTensorProduct base = expr.expr contraction_indices = expr.contraction_indices contraction_string, letters_free, letters_dum = self._get_einsum_string(base.subranks, contraction_indices) if not contraction_indices: return self._print(base) if isinstance(base, CodegenArrayTensorProduct): elems = ["%s" % (self._print(arg)) for arg in base.args] return "%s(\"%s\", %s)" % ( self._module_format("tensorflow.linalg.einsum"), contraction_string, ", ".join(elems) ) raise NotImplementedError() def _print_CodegenArrayDiagonal(self, expr): from sympy.codegen.array_utils import CodegenArrayTensorProduct diagonal_indices = list(expr.diagonal_indices) if len(diagonal_indices) > 1: # TODO: this should be handled in sympy.codegen.array_utils, # possibly by creating the possibility of unfolding the # CodegenArrayDiagonal object into nested ones. Same reasoning for # the array contraction. raise NotImplementedError if len(diagonal_indices[0]) != 2: raise NotImplementedError if isinstance(expr.expr, CodegenArrayTensorProduct): subranks = expr.expr.subranks elems = expr.expr.args else: subranks = expr.subranks elems = [expr.expr] diagonal_string, letters_free, letters_dum = self._get_einsum_string(subranks, diagonal_indices) elems = [self._print(i) for i in elems] return '%s("%s", %s)' % ( self._module_format("tensorflow.linalg.einsum"), "{}->{}{}".format(diagonal_string, "".join(letters_free), "".join(letters_dum)), ", ".join(elems) ) def _print_CodegenArrayPermuteDims(self, expr): return "%s(%s, %s)" % ( self._module_format("tensorflow.transpose"), self._print(expr.expr), self._print(expr.permutation.array_form), ) def _print_CodegenArrayElementwiseAdd(self, expr): return self._expand_fold_binary_op('tensorflow.math.add', expr.args) def tensorflow_code(expr, **settings): printer = TensorflowPrinter(settings) return printer.doprint(expr)
ab638879025e89189aacd1e20ddb19c997e1ec0d219354c290b217da1bc39b4a
"""Integration method that emulates by-hand techniques. This module also provides functionality to get the steps used to evaluate a particular integral, in the ``integral_steps`` function. This will return nested namedtuples representing the integration rules used. The ``manualintegrate`` function computes the integral using those steps given an integrand; given the steps, ``_manualintegrate`` will evaluate them. The integrator can be extended with new heuristics and evaluation techniques. To do so, write a function that accepts an ``IntegralInfo`` object and returns either a namedtuple representing a rule or ``None``. Then, write another function that accepts the namedtuple's fields and returns the antiderivative, and decorate it with ``@evaluates(namedtuple_type)``. If the new technique requires a new match, add the key and call to the antiderivative function to integral_steps. To enable simple substitutions, add the match to find_substitutions. """ from typing import Dict as tDict, Optional from collections import namedtuple, defaultdict from collections.abc import Mapping from functools import reduce import sympy from sympy.core.compatibility import iterable from sympy.core.containers import Dict from sympy.core.expr import Expr from sympy.core.logic import fuzzy_not from sympy.functions.elementary.trigonometric import TrigonometricFunction from sympy.functions.special.polynomials import OrthogonalPolynomial from sympy.functions.elementary.piecewise import Piecewise from sympy.strategies.core import switch, do_one, null_safe, condition from sympy.core.relational import Eq, Ne from sympy.polys.polytools import degree from sympy.ntheory.factor_ import divisors from sympy.utilities.misc import debug ZERO = sympy.S.Zero def Rule(name, props=""): # GOTCHA: namedtuple class name not considered! def __eq__(self, other): return self.__class__ == other.__class__ and tuple.__eq__(self, other) __neq__ = lambda self, other: not __eq__(self, other) cls = namedtuple(name, props + " context symbol") cls.__eq__ = __eq__ cls.__ne__ = __neq__ return cls ConstantRule = Rule("ConstantRule", "constant") ConstantTimesRule = Rule("ConstantTimesRule", "constant other substep") PowerRule = Rule("PowerRule", "base exp") AddRule = Rule("AddRule", "substeps") URule = Rule("URule", "u_var u_func constant substep") PartsRule = Rule("PartsRule", "u dv v_step second_step") CyclicPartsRule = Rule("CyclicPartsRule", "parts_rules coefficient") TrigRule = Rule("TrigRule", "func arg") ExpRule = Rule("ExpRule", "base exp") ReciprocalRule = Rule("ReciprocalRule", "func") ArcsinRule = Rule("ArcsinRule") InverseHyperbolicRule = Rule("InverseHyperbolicRule", "func") AlternativeRule = Rule("AlternativeRule", "alternatives") DontKnowRule = Rule("DontKnowRule") DerivativeRule = Rule("DerivativeRule") RewriteRule = Rule("RewriteRule", "rewritten substep") PiecewiseRule = Rule("PiecewiseRule", "subfunctions") HeavisideRule = Rule("HeavisideRule", "harg ibnd substep") TrigSubstitutionRule = Rule("TrigSubstitutionRule", "theta func rewritten substep restriction") ArctanRule = Rule("ArctanRule", "a b c") ArccothRule = Rule("ArccothRule", "a b c") ArctanhRule = Rule("ArctanhRule", "a b c") JacobiRule = Rule("JacobiRule", "n a b") GegenbauerRule = Rule("GegenbauerRule", "n a") ChebyshevTRule = Rule("ChebyshevTRule", "n") ChebyshevURule = Rule("ChebyshevURule", "n") LegendreRule = Rule("LegendreRule", "n") HermiteRule = Rule("HermiteRule", "n") LaguerreRule = Rule("LaguerreRule", "n") AssocLaguerreRule = Rule("AssocLaguerreRule", "n a") CiRule = Rule("CiRule", "a b") ChiRule = Rule("ChiRule", "a b") EiRule = Rule("EiRule", "a b") SiRule = Rule("SiRule", "a b") ShiRule = Rule("ShiRule", "a b") ErfRule = Rule("ErfRule", "a b c") FresnelCRule = Rule("FresnelCRule", "a b c") FresnelSRule = Rule("FresnelSRule", "a b c") LiRule = Rule("LiRule", "a b") PolylogRule = Rule("PolylogRule", "a b") UpperGammaRule = Rule("UpperGammaRule", "a e") EllipticFRule = Rule("EllipticFRule", "a d") EllipticERule = Rule("EllipticERule", "a d") IntegralInfo = namedtuple('IntegralInfo', 'integrand symbol') evaluators = {} def evaluates(rule): def _evaluates(func): func.rule = rule evaluators[rule] = func return func return _evaluates def contains_dont_know(rule): if isinstance(rule, DontKnowRule): return True else: for val in rule: if isinstance(val, tuple): if contains_dont_know(val): return True elif isinstance(val, list): if any(contains_dont_know(i) for i in val): return True return False def manual_diff(f, symbol): """Derivative of f in form expected by find_substitutions SymPy's derivatives for some trig functions (like cot) aren't in a form that works well with finding substitutions; this replaces the derivatives for those particular forms with something that works better. """ if f.args: arg = f.args[0] if isinstance(f, sympy.tan): return arg.diff(symbol) * sympy.sec(arg)**2 elif isinstance(f, sympy.cot): return -arg.diff(symbol) * sympy.csc(arg)**2 elif isinstance(f, sympy.sec): return arg.diff(symbol) * sympy.sec(arg) * sympy.tan(arg) elif isinstance(f, sympy.csc): return -arg.diff(symbol) * sympy.csc(arg) * sympy.cot(arg) elif isinstance(f, sympy.Add): return sum([manual_diff(arg, symbol) for arg in f.args]) elif isinstance(f, sympy.Mul): if len(f.args) == 2 and isinstance(f.args[0], sympy.Number): return f.args[0] * manual_diff(f.args[1], symbol) return f.diff(symbol) def manual_subs(expr, *args): """ A wrapper for `expr.subs(*args)` with additional logic for substitution of invertible functions. """ if len(args) == 1: sequence = args[0] if isinstance(sequence, (Dict, Mapping)): sequence = sequence.items() elif not iterable(sequence): raise ValueError("Expected an iterable of (old, new) pairs") elif len(args) == 2: sequence = [args] else: raise ValueError("subs accepts either 1 or 2 arguments") new_subs = [] for old, new in sequence: if isinstance(old, sympy.log): # If log(x) = y, then exp(a*log(x)) = exp(a*y) # that is, x**a = exp(a*y). Replace nontrivial powers of x # before subs turns them into `exp(y)**a`, but # do not replace x itself yet, to avoid `log(exp(y))`. x0 = old.args[0] expr = expr.replace(lambda x: x.is_Pow and x.base == x0, lambda x: sympy.exp(x.exp*new)) new_subs.append((x0, sympy.exp(new))) return expr.subs(list(sequence) + new_subs) # Method based on that on SIN, described in "Symbolic Integration: The # Stormy Decade" inverse_trig_functions = (sympy.atan, sympy.asin, sympy.acos, sympy.acot, sympy.acsc, sympy.asec) def find_substitutions(integrand, symbol, u_var): results = [] def test_subterm(u, u_diff): if u_diff == 0: return False substituted = integrand / u_diff if symbol not in substituted.free_symbols: # replaced everything already return False debug("substituted: {}, u: {}, u_var: {}".format(substituted, u, u_var)) substituted = manual_subs(substituted, u, u_var).cancel() if symbol not in substituted.free_symbols: # avoid increasing the degree of a rational function if integrand.is_rational_function(symbol) and substituted.is_rational_function(u_var): deg_before = max([degree(t, symbol) for t in integrand.as_numer_denom()]) deg_after = max([degree(t, u_var) for t in substituted.as_numer_denom()]) if deg_after > deg_before: return False return substituted.as_independent(u_var, as_Add=False) # special treatment for substitutions u = (a*x+b)**(1/n) if (isinstance(u, sympy.Pow) and (1/u.exp).is_Integer and sympy.Abs(u.exp) < 1): a = sympy.Wild('a', exclude=[symbol]) b = sympy.Wild('b', exclude=[symbol]) match = u.base.match(a*symbol + b) if match: a, b = [match.get(i, ZERO) for i in (a, b)] if a != 0 and b != 0: substituted = substituted.subs(symbol, (u_var**(1/u.exp) - b)/a) return substituted.as_independent(u_var, as_Add=False) return False def possible_subterms(term): if isinstance(term, (TrigonometricFunction, *inverse_trig_functions, sympy.exp, sympy.log, sympy.Heaviside)): return [term.args[0]] elif isinstance(term, (sympy.chebyshevt, sympy.chebyshevu, sympy.legendre, sympy.hermite, sympy.laguerre)): return [term.args[1]] elif isinstance(term, (sympy.gegenbauer, sympy.assoc_laguerre)): return [term.args[2]] elif isinstance(term, sympy.jacobi): return [term.args[3]] elif isinstance(term, sympy.Mul): r = [] for u in term.args: r.append(u) r.extend(possible_subterms(u)) return r elif isinstance(term, sympy.Pow): r = [] if term.args[1].is_constant(symbol): r.append(term.args[0]) elif term.args[0].is_constant(symbol): r.append(term.args[1]) if term.args[1].is_Integer: r.extend([term.args[0]**d for d in divisors(term.args[1]) if 1 < d < abs(term.args[1])]) if term.args[0].is_Add: r.extend([t for t in possible_subterms(term.args[0]) if t.is_Pow]) return r elif isinstance(term, sympy.Add): r = [] for arg in term.args: r.append(arg) r.extend(possible_subterms(arg)) return r return [] for u in possible_subterms(integrand): if u == symbol: continue u_diff = manual_diff(u, symbol) new_integrand = test_subterm(u, u_diff) if new_integrand is not False: constant, new_integrand = new_integrand if new_integrand == integrand.subs(symbol, u_var): continue substitution = (u, constant, new_integrand) if substitution not in results: results.append(substitution) return results def rewriter(condition, rewrite): """Strategy that rewrites an integrand.""" def _rewriter(integral): integrand, symbol = integral debug("Integral: {} is rewritten with {} on symbol: {}".format(integrand, rewrite, symbol)) if condition(*integral): rewritten = rewrite(*integral) if rewritten != integrand: substep = integral_steps(rewritten, symbol) if not isinstance(substep, DontKnowRule) and substep: return RewriteRule( rewritten, substep, integrand, symbol) return _rewriter def proxy_rewriter(condition, rewrite): """Strategy that rewrites an integrand based on some other criteria.""" def _proxy_rewriter(criteria): criteria, integral = criteria integrand, symbol = integral debug("Integral: {} is rewritten with {} on symbol: {} and criteria: {}".format(integrand, rewrite, symbol, criteria)) args = criteria + list(integral) if condition(*args): rewritten = rewrite(*args) if rewritten != integrand: return RewriteRule( rewritten, integral_steps(rewritten, symbol), integrand, symbol) return _proxy_rewriter def multiplexer(conditions): """Apply the rule that matches the condition, else None""" def multiplexer_rl(expr): for key, rule in conditions.items(): if key(expr): return rule(expr) return multiplexer_rl def alternatives(*rules): """Strategy that makes an AlternativeRule out of multiple possible results.""" def _alternatives(integral): alts = [] count = 0 debug("List of Alternative Rules") for rule in rules: count = count + 1 debug("Rule {}: {}".format(count, rule)) result = rule(integral) if (result and not isinstance(result, DontKnowRule) and result != integral and result not in alts): alts.append(result) if len(alts) == 1: return alts[0] elif alts: doable = [rule for rule in alts if not contains_dont_know(rule)] if doable: return AlternativeRule(doable, *integral) else: return AlternativeRule(alts, *integral) return _alternatives def constant_rule(integral): integrand, symbol = integral return ConstantRule(integral.integrand, *integral) def power_rule(integral): integrand, symbol = integral base, exp = integrand.as_base_exp() if symbol not in exp.free_symbols and isinstance(base, sympy.Symbol): if sympy.simplify(exp + 1) == 0: return ReciprocalRule(base, integrand, symbol) return PowerRule(base, exp, integrand, symbol) elif symbol not in base.free_symbols and isinstance(exp, sympy.Symbol): rule = ExpRule(base, exp, integrand, symbol) if fuzzy_not(sympy.log(base).is_zero): return rule elif sympy.log(base).is_zero: return ConstantRule(1, 1, symbol) return PiecewiseRule([ (rule, sympy.Ne(sympy.log(base), 0)), (ConstantRule(1, 1, symbol), True) ], integrand, symbol) def exp_rule(integral): integrand, symbol = integral if isinstance(integrand.args[0], sympy.Symbol): return ExpRule(sympy.E, integrand.args[0], integrand, symbol) def orthogonal_poly_rule(integral): orthogonal_poly_classes = { sympy.jacobi: JacobiRule, sympy.gegenbauer: GegenbauerRule, sympy.chebyshevt: ChebyshevTRule, sympy.chebyshevu: ChebyshevURule, sympy.legendre: LegendreRule, sympy.hermite: HermiteRule, sympy.laguerre: LaguerreRule, sympy.assoc_laguerre: AssocLaguerreRule } orthogonal_poly_var_index = { sympy.jacobi: 3, sympy.gegenbauer: 2, sympy.assoc_laguerre: 2 } integrand, symbol = integral for klass in orthogonal_poly_classes: if isinstance(integrand, klass): var_index = orthogonal_poly_var_index.get(klass, 1) if (integrand.args[var_index] is symbol and not any(v.has(symbol) for v in integrand.args[:var_index])): args = integrand.args[:var_index] + (integrand, symbol) return orthogonal_poly_classes[klass](*args) def special_function_rule(integral): integrand, symbol = integral a = sympy.Wild('a', exclude=[symbol], properties=[lambda x: not x.is_zero]) b = sympy.Wild('b', exclude=[symbol]) c = sympy.Wild('c', exclude=[symbol]) d = sympy.Wild('d', exclude=[symbol], properties=[lambda x: not x.is_zero]) e = sympy.Wild('e', exclude=[symbol], properties=[ lambda x: not (x.is_nonnegative and x.is_integer)]) wilds = (a, b, c, d, e) # patterns consist of a SymPy class, a wildcard expr, an optional # condition coded as a lambda (when Wild properties are not enough), # followed by an applicable rule patterns = ( (sympy.Mul, sympy.exp(a*symbol + b)/symbol, None, EiRule), (sympy.Mul, sympy.cos(a*symbol + b)/symbol, None, CiRule), (sympy.Mul, sympy.cosh(a*symbol + b)/symbol, None, ChiRule), (sympy.Mul, sympy.sin(a*symbol + b)/symbol, None, SiRule), (sympy.Mul, sympy.sinh(a*symbol + b)/symbol, None, ShiRule), (sympy.Pow, 1/sympy.log(a*symbol + b), None, LiRule), (sympy.exp, sympy.exp(a*symbol**2 + b*symbol + c), None, ErfRule), (sympy.sin, sympy.sin(a*symbol**2 + b*symbol + c), None, FresnelSRule), (sympy.cos, sympy.cos(a*symbol**2 + b*symbol + c), None, FresnelCRule), (sympy.Mul, symbol**e*sympy.exp(a*symbol), None, UpperGammaRule), (sympy.Mul, sympy.polylog(b, a*symbol)/symbol, None, PolylogRule), (sympy.Pow, 1/sympy.sqrt(a - d*sympy.sin(symbol)**2), lambda a, d: a != d, EllipticFRule), (sympy.Pow, sympy.sqrt(a - d*sympy.sin(symbol)**2), lambda a, d: a != d, EllipticERule), ) for p in patterns: if isinstance(integrand, p[0]): match = integrand.match(p[1]) if match: wild_vals = tuple(match.get(w) for w in wilds if match.get(w) is not None) if p[2] is None or p[2](*wild_vals): args = wild_vals + (integrand, symbol) return p[3](*args) def inverse_trig_rule(integral): integrand, symbol = integral base, exp = integrand.as_base_exp() a = sympy.Wild('a', exclude=[symbol]) b = sympy.Wild('b', exclude=[symbol]) match = base.match(a + b*symbol**2) if not match: return def negative(x): return x.is_negative or x.could_extract_minus_sign() def ArcsinhRule(integrand, symbol): return InverseHyperbolicRule(sympy.asinh, integrand, symbol) def ArccoshRule(integrand, symbol): return InverseHyperbolicRule(sympy.acosh, integrand, symbol) def make_inverse_trig(RuleClass, base_exp, a, sign_a, b, sign_b): u_var = sympy.Dummy("u") current_base = base current_symbol = symbol constant = u_func = u_constant = substep = None factored = integrand if a != 1: constant = a**base_exp current_base = sign_a + sign_b * (b/a) * current_symbol**2 factored = current_base ** base_exp if (b/a) != 1: u_func = sympy.sqrt(b/a) * symbol u_constant = sympy.sqrt(a/b) current_symbol = u_var current_base = sign_a + sign_b * current_symbol**2 substep = RuleClass(current_base ** base_exp, current_symbol) if u_func is not None: if u_constant != 1 and substep is not None: substep = ConstantTimesRule( u_constant, current_base ** base_exp, substep, u_constant * current_base ** base_exp, symbol) substep = URule(u_var, u_func, u_constant, substep, factored, symbol) if constant is not None and substep is not None: substep = ConstantTimesRule(constant, factored, substep, integrand, symbol) return substep a, b = [match.get(i, ZERO) for i in (a, b)] # list of (rule, base_exp, a, sign_a, b, sign_b, condition) possibilities = [] if sympy.simplify(2*exp + 1) == 0: possibilities.append((ArcsinRule, exp, a, 1, -b, -1, sympy.And(a > 0, b < 0))) possibilities.append((ArcsinhRule, exp, a, 1, b, 1, sympy.And(a > 0, b > 0))) possibilities.append((ArccoshRule, exp, -a, -1, b, 1, sympy.And(a < 0, b > 0))) possibilities = [p for p in possibilities if p[-1] is not sympy.false] if a.is_number and b.is_number: possibility = [p for p in possibilities if p[-1] is sympy.true] if len(possibility) == 1: return make_inverse_trig(*possibility[0][:-1]) elif possibilities: return PiecewiseRule( [(make_inverse_trig(*p[:-1]), p[-1]) for p in possibilities], integrand, symbol) def add_rule(integral): integrand, symbol = integral results = [integral_steps(g, symbol) for g in integrand.as_ordered_terms()] return None if None in results else AddRule(results, integrand, symbol) def mul_rule(integral): integrand, symbol = integral # Constant times function case coeff, f = integrand.as_independent(symbol) next_step = integral_steps(f, symbol) if coeff != 1 and next_step is not None: return ConstantTimesRule( coeff, f, next_step, integrand, symbol) def _parts_rule(integrand, symbol): # LIATE rule: # log, inverse trig, algebraic, trigonometric, exponential def pull_out_algebraic(integrand): integrand = integrand.cancel().together() # iterating over Piecewise args would not work here algebraic = ([] if isinstance(integrand, sympy.Piecewise) else [arg for arg in integrand.args if arg.is_algebraic_expr(symbol)]) if algebraic: u = sympy.Mul(*algebraic) dv = (integrand / u).cancel() return u, dv def pull_out_u(*functions): def pull_out_u_rl(integrand): if any([integrand.has(f) for f in functions]): args = [arg for arg in integrand.args if any(isinstance(arg, cls) for cls in functions)] if args: u = reduce(lambda a,b: a*b, args) dv = integrand / u return u, dv return pull_out_u_rl liate_rules = [pull_out_u(sympy.log), pull_out_u(*inverse_trig_functions), pull_out_algebraic, pull_out_u(sympy.sin, sympy.cos), pull_out_u(sympy.exp)] dummy = sympy.Dummy("temporary") # we can integrate log(x) and atan(x) by setting dv = 1 if isinstance(integrand, (sympy.log, *inverse_trig_functions)): integrand = dummy * integrand for index, rule in enumerate(liate_rules): result = rule(integrand) if result: u, dv = result # Don't pick u to be a constant if possible if symbol not in u.free_symbols and not u.has(dummy): return u = u.subs(dummy, 1) dv = dv.subs(dummy, 1) # Don't pick a non-polynomial algebraic to be differentiated if rule == pull_out_algebraic and not u.is_polynomial(symbol): return # Don't trade one logarithm for another if isinstance(u, sympy.log): rec_dv = 1/dv if (rec_dv.is_polynomial(symbol) and degree(rec_dv, symbol) == 1): return # Can integrate a polynomial times OrthogonalPolynomial if rule == pull_out_algebraic and isinstance(dv, OrthogonalPolynomial): v_step = integral_steps(dv, symbol) if contains_dont_know(v_step): return else: du = u.diff(symbol) v = _manualintegrate(v_step) return u, dv, v, du, v_step # make sure dv is amenable to integration accept = False if index < 2: # log and inverse trig are usually worth trying accept = True elif (rule == pull_out_algebraic and dv.args and all(isinstance(a, (sympy.sin, sympy.cos, sympy.exp)) for a in dv.args)): accept = True else: for rule in liate_rules[index + 1:]: r = rule(integrand) if r and r[0].subs(dummy, 1).equals(dv): accept = True break if accept: du = u.diff(symbol) v_step = integral_steps(sympy.simplify(dv), symbol) if not contains_dont_know(v_step): v = _manualintegrate(v_step) return u, dv, v, du, v_step def parts_rule(integral): integrand, symbol = integral constant, integrand = integrand.as_coeff_Mul() result = _parts_rule(integrand, symbol) steps = [] if result: u, dv, v, du, v_step = result debug("u : {}, dv : {}, v : {}, du : {}, v_step: {}".format(u, dv, v, du, v_step)) steps.append(result) if isinstance(v, sympy.Integral): return # Set a limit on the number of times u can be used if isinstance(u, (sympy.sin, sympy.cos, sympy.exp, sympy.sinh, sympy.cosh)): cachekey = u.xreplace({symbol: _cache_dummy}) if _parts_u_cache[cachekey] > 2: return _parts_u_cache[cachekey] += 1 # Try cyclic integration by parts a few times for _ in range(4): debug("Cyclic integration {} with v: {}, du: {}, integrand: {}".format(_, v, du, integrand)) coefficient = ((v * du) / integrand).cancel() if coefficient == 1: break if symbol not in coefficient.free_symbols: rule = CyclicPartsRule( [PartsRule(u, dv, v_step, None, None, None) for (u, dv, v, du, v_step) in steps], (-1) ** len(steps) * coefficient, integrand, symbol ) if (constant != 1) and rule: rule = ConstantTimesRule(constant, integrand, rule, constant * integrand, symbol) return rule # _parts_rule is sensitive to constants, factor it out next_constant, next_integrand = (v * du).as_coeff_Mul() result = _parts_rule(next_integrand, symbol) if result: u, dv, v, du, v_step = result u *= next_constant du *= next_constant steps.append((u, dv, v, du, v_step)) else: break def make_second_step(steps, integrand): if steps: u, dv, v, du, v_step = steps[0] return PartsRule(u, dv, v_step, make_second_step(steps[1:], v * du), integrand, symbol) else: steps = integral_steps(integrand, symbol) if steps: return steps else: return DontKnowRule(integrand, symbol) if steps: u, dv, v, du, v_step = steps[0] rule = PartsRule(u, dv, v_step, make_second_step(steps[1:], v * du), integrand, symbol) if (constant != 1) and rule: rule = ConstantTimesRule(constant, integrand, rule, constant * integrand, symbol) return rule def trig_rule(integral): integrand, symbol = integral if isinstance(integrand, sympy.sin) or isinstance(integrand, sympy.cos): arg = integrand.args[0] if not isinstance(arg, sympy.Symbol): return # perhaps a substitution can deal with it if isinstance(integrand, sympy.sin): func = 'sin' else: func = 'cos' return TrigRule(func, arg, integrand, symbol) if integrand == sympy.sec(symbol)**2: return TrigRule('sec**2', symbol, integrand, symbol) elif integrand == sympy.csc(symbol)**2: return TrigRule('csc**2', symbol, integrand, symbol) if isinstance(integrand, sympy.tan): rewritten = sympy.sin(*integrand.args) / sympy.cos(*integrand.args) elif isinstance(integrand, sympy.cot): rewritten = sympy.cos(*integrand.args) / sympy.sin(*integrand.args) elif isinstance(integrand, sympy.sec): arg = integrand.args[0] rewritten = ((sympy.sec(arg)**2 + sympy.tan(arg) * sympy.sec(arg)) / (sympy.sec(arg) + sympy.tan(arg))) elif isinstance(integrand, sympy.csc): arg = integrand.args[0] rewritten = ((sympy.csc(arg)**2 + sympy.cot(arg) * sympy.csc(arg)) / (sympy.csc(arg) + sympy.cot(arg))) else: return return RewriteRule( rewritten, integral_steps(rewritten, symbol), integrand, symbol ) def trig_product_rule(integral): integrand, symbol = integral sectan = sympy.sec(symbol) * sympy.tan(symbol) q = integrand / sectan if symbol not in q.free_symbols: rule = TrigRule('sec*tan', symbol, sectan, symbol) if q != 1 and rule: rule = ConstantTimesRule(q, sectan, rule, integrand, symbol) return rule csccot = -sympy.csc(symbol) * sympy.cot(symbol) q = integrand / csccot if symbol not in q.free_symbols: rule = TrigRule('csc*cot', symbol, csccot, symbol) if q != 1 and rule: rule = ConstantTimesRule(q, csccot, rule, integrand, symbol) return rule def quadratic_denom_rule(integral): integrand, symbol = integral a = sympy.Wild('a', exclude=[symbol]) b = sympy.Wild('b', exclude=[symbol]) c = sympy.Wild('c', exclude=[symbol]) match = integrand.match(a / (b * symbol ** 2 + c)) if match: a, b, c = match[a], match[b], match[c] if b.is_extended_real and c.is_extended_real: return PiecewiseRule([(ArctanRule(a, b, c, integrand, symbol), sympy.Gt(c / b, 0)), (ArccothRule(a, b, c, integrand, symbol), sympy.And(sympy.Gt(symbol ** 2, -c / b), sympy.Lt(c / b, 0))), (ArctanhRule(a, b, c, integrand, symbol), sympy.And(sympy.Lt(symbol ** 2, -c / b), sympy.Lt(c / b, 0))), ], integrand, symbol) else: return ArctanRule(a, b, c, integrand, symbol) d = sympy.Wild('d', exclude=[symbol]) match2 = integrand.match(a / (b * symbol ** 2 + c * symbol + d)) if match2: b, c = match2[b], match2[c] if b.is_zero: return u = sympy.Dummy('u') u_func = symbol + c/(2*b) integrand2 = integrand.subs(symbol, u - c / (2*b)) next_step = integral_steps(integrand2, u) if next_step: return URule(u, u_func, None, next_step, integrand2, symbol) else: return e = sympy.Wild('e', exclude=[symbol]) match3 = integrand.match((a* symbol + b) / (c * symbol ** 2 + d * symbol + e)) if match3: a, b, c, d, e = match3[a], match3[b], match3[c], match3[d], match3[e] if c.is_zero: return denominator = c * symbol**2 + d * symbol + e const = a/(2*c) numer1 = (2*c*symbol+d) numer2 = - const*d + b u = sympy.Dummy('u') step1 = URule(u, denominator, const, integral_steps(u**(-1), u), integrand, symbol) if const != 1: step1 = ConstantTimesRule(const, numer1/denominator, step1, const*numer1/denominator, symbol) if numer2.is_zero: return step1 step2 = integral_steps(numer2/denominator, symbol) substeps = AddRule([step1, step2], integrand, symbol) rewriten = const*numer1/denominator+numer2/denominator return RewriteRule(rewriten, substeps, integrand, symbol) return def root_mul_rule(integral): integrand, symbol = integral a = sympy.Wild('a', exclude=[symbol]) b = sympy.Wild('b', exclude=[symbol]) c = sympy.Wild('c') match = integrand.match(sympy.sqrt(a * symbol + b) * c) if not match: return a, b, c = match[a], match[b], match[c] d = sympy.Wild('d', exclude=[symbol]) e = sympy.Wild('e', exclude=[symbol]) f = sympy.Wild('f') recursion_test = c.match(sympy.sqrt(d * symbol + e) * f) if recursion_test: return u = sympy.Dummy('u') u_func = sympy.sqrt(a * symbol + b) integrand = integrand.subs(u_func, u) integrand = integrand.subs(symbol, (u**2 - b) / a) integrand = integrand * 2 * u / a next_step = integral_steps(integrand, u) if next_step: return URule(u, u_func, None, next_step, integrand, symbol) @sympy.cacheit def make_wilds(symbol): a = sympy.Wild('a', exclude=[symbol]) b = sympy.Wild('b', exclude=[symbol]) m = sympy.Wild('m', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)]) n = sympy.Wild('n', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)]) return a, b, m, n @sympy.cacheit def sincos_pattern(symbol): a, b, m, n = make_wilds(symbol) pattern = sympy.sin(a*symbol)**m * sympy.cos(b*symbol)**n return pattern, a, b, m, n @sympy.cacheit def tansec_pattern(symbol): a, b, m, n = make_wilds(symbol) pattern = sympy.tan(a*symbol)**m * sympy.sec(b*symbol)**n return pattern, a, b, m, n @sympy.cacheit def cotcsc_pattern(symbol): a, b, m, n = make_wilds(symbol) pattern = sympy.cot(a*symbol)**m * sympy.csc(b*symbol)**n return pattern, a, b, m, n @sympy.cacheit def heaviside_pattern(symbol): m = sympy.Wild('m', exclude=[symbol]) b = sympy.Wild('b', exclude=[symbol]) g = sympy.Wild('g') pattern = sympy.Heaviside(m*symbol + b) * g return pattern, m, b, g def uncurry(func): def uncurry_rl(args): return func(*args) return uncurry_rl def trig_rewriter(rewrite): def trig_rewriter_rl(args): a, b, m, n, integrand, symbol = args rewritten = rewrite(a, b, m, n, integrand, symbol) if rewritten != integrand: return RewriteRule( rewritten, integral_steps(rewritten, symbol), integrand, symbol) return trig_rewriter_rl sincos_botheven_condition = uncurry( lambda a, b, m, n, i, s: m.is_even and n.is_even and m.is_nonnegative and n.is_nonnegative) sincos_botheven = trig_rewriter( lambda a, b, m, n, i, symbol: ( (((1 - sympy.cos(2*a*symbol)) / 2) ** (m / 2)) * (((1 + sympy.cos(2*b*symbol)) / 2) ** (n / 2)) )) sincos_sinodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd and m >= 3) sincos_sinodd = trig_rewriter( lambda a, b, m, n, i, symbol: ( (1 - sympy.cos(a*symbol)**2)**((m - 1) / 2) * sympy.sin(a*symbol) * sympy.cos(b*symbol) ** n)) sincos_cosodd_condition = uncurry(lambda a, b, m, n, i, s: n.is_odd and n >= 3) sincos_cosodd = trig_rewriter( lambda a, b, m, n, i, symbol: ( (1 - sympy.sin(b*symbol)**2)**((n - 1) / 2) * sympy.cos(b*symbol) * sympy.sin(a*symbol) ** m)) tansec_seceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4) tansec_seceven = trig_rewriter( lambda a, b, m, n, i, symbol: ( (1 + sympy.tan(b*symbol)**2) ** (n/2 - 1) * sympy.sec(b*symbol)**2 * sympy.tan(a*symbol) ** m )) tansec_tanodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd) tansec_tanodd = trig_rewriter( lambda a, b, m, n, i, symbol: ( (sympy.sec(a*symbol)**2 - 1) ** ((m - 1) / 2) * sympy.tan(a*symbol) * sympy.sec(b*symbol) ** n )) tan_tansquared_condition = uncurry(lambda a, b, m, n, i, s: m == 2 and n == 0) tan_tansquared = trig_rewriter( lambda a, b, m, n, i, symbol: ( sympy.sec(a*symbol)**2 - 1)) cotcsc_csceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4) cotcsc_csceven = trig_rewriter( lambda a, b, m, n, i, symbol: ( (1 + sympy.cot(b*symbol)**2) ** (n/2 - 1) * sympy.csc(b*symbol)**2 * sympy.cot(a*symbol) ** m )) cotcsc_cotodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd) cotcsc_cotodd = trig_rewriter( lambda a, b, m, n, i, symbol: ( (sympy.csc(a*symbol)**2 - 1) ** ((m - 1) / 2) * sympy.cot(a*symbol) * sympy.csc(b*symbol) ** n )) def trig_sincos_rule(integral): integrand, symbol = integral if any(integrand.has(f) for f in (sympy.sin, sympy.cos)): pattern, a, b, m, n = sincos_pattern(symbol) match = integrand.match(pattern) if not match: return return multiplexer({ sincos_botheven_condition: sincos_botheven, sincos_sinodd_condition: sincos_sinodd, sincos_cosodd_condition: sincos_cosodd })(tuple( [match.get(i, ZERO) for i in (a, b, m, n)] + [integrand, symbol])) def trig_tansec_rule(integral): integrand, symbol = integral integrand = integrand.subs({ 1 / sympy.cos(symbol): sympy.sec(symbol) }) if any(integrand.has(f) for f in (sympy.tan, sympy.sec)): pattern, a, b, m, n = tansec_pattern(symbol) match = integrand.match(pattern) if not match: return return multiplexer({ tansec_tanodd_condition: tansec_tanodd, tansec_seceven_condition: tansec_seceven, tan_tansquared_condition: tan_tansquared })(tuple( [match.get(i, ZERO) for i in (a, b, m, n)] + [integrand, symbol])) def trig_cotcsc_rule(integral): integrand, symbol = integral integrand = integrand.subs({ 1 / sympy.sin(symbol): sympy.csc(symbol), 1 / sympy.tan(symbol): sympy.cot(symbol), sympy.cos(symbol) / sympy.tan(symbol): sympy.cot(symbol) }) if any(integrand.has(f) for f in (sympy.cot, sympy.csc)): pattern, a, b, m, n = cotcsc_pattern(symbol) match = integrand.match(pattern) if not match: return return multiplexer({ cotcsc_cotodd_condition: cotcsc_cotodd, cotcsc_csceven_condition: cotcsc_csceven })(tuple( [match.get(i, ZERO) for i in (a, b, m, n)] + [integrand, symbol])) def trig_sindouble_rule(integral): integrand, symbol = integral a = sympy.Wild('a', exclude=[sympy.sin(2*symbol)]) match = integrand.match(sympy.sin(2*symbol)*a) if match: sin_double = 2*sympy.sin(symbol)*sympy.cos(symbol)/sympy.sin(2*symbol) return integral_steps(integrand * sin_double, symbol) def trig_powers_products_rule(integral): return do_one(null_safe(trig_sincos_rule), null_safe(trig_tansec_rule), null_safe(trig_cotcsc_rule), null_safe(trig_sindouble_rule))(integral) def trig_substitution_rule(integral): integrand, symbol = integral A = sympy.Wild('a', exclude=[0, symbol]) B = sympy.Wild('b', exclude=[0, symbol]) theta = sympy.Dummy("theta") target_pattern = A + B*symbol**2 matches = integrand.find(target_pattern) for expr in matches: match = expr.match(target_pattern) a = match.get(A, ZERO) b = match.get(B, ZERO) a_positive = ((a.is_number and a > 0) or a.is_positive) b_positive = ((b.is_number and b > 0) or b.is_positive) a_negative = ((a.is_number and a < 0) or a.is_negative) b_negative = ((b.is_number and b < 0) or b.is_negative) x_func = None if a_positive and b_positive: # a**2 + b*x**2. Assume sec(theta) > 0, -pi/2 < theta < pi/2 x_func = (sympy.sqrt(a)/sympy.sqrt(b)) * sympy.tan(theta) # Do not restrict the domain: tan(theta) takes on any real # value on the interval -pi/2 < theta < pi/2 so x takes on # any value restriction = True elif a_positive and b_negative: # a**2 - b*x**2. Assume cos(theta) > 0, -pi/2 < theta < pi/2 constant = sympy.sqrt(a)/sympy.sqrt(-b) x_func = constant * sympy.sin(theta) restriction = sympy.And(symbol > -constant, symbol < constant) elif a_negative and b_positive: # b*x**2 - a**2. Assume sin(theta) > 0, 0 < theta < pi constant = sympy.sqrt(-a)/sympy.sqrt(b) x_func = constant * sympy.sec(theta) restriction = sympy.And(symbol > -constant, symbol < constant) if x_func: # Manually simplify sqrt(trig(theta)**2) to trig(theta) # Valid due to assumed domain restriction substitutions = {} for f in [sympy.sin, sympy.cos, sympy.tan, sympy.sec, sympy.csc, sympy.cot]: substitutions[sympy.sqrt(f(theta)**2)] = f(theta) substitutions[sympy.sqrt(f(theta)**(-2))] = 1/f(theta) replaced = integrand.subs(symbol, x_func).trigsimp() replaced = manual_subs(replaced, substitutions) if not replaced.has(symbol): replaced *= manual_diff(x_func, theta) replaced = replaced.trigsimp() secants = replaced.find(1/sympy.cos(theta)) if secants: replaced = replaced.xreplace({ 1/sympy.cos(theta): sympy.sec(theta) }) substep = integral_steps(replaced, theta) if not contains_dont_know(substep): return TrigSubstitutionRule( theta, x_func, replaced, substep, restriction, integrand, symbol) def heaviside_rule(integral): integrand, symbol = integral pattern, m, b, g = heaviside_pattern(symbol) match = integrand.match(pattern) if match and 0 != match[g]: # f = Heaviside(m*x + b)*g v_step = integral_steps(match[g], symbol) result = _manualintegrate(v_step) m, b = match[m], match[b] return HeavisideRule(m*symbol + b, -b/m, result, integrand, symbol) def substitution_rule(integral): integrand, symbol = integral u_var = sympy.Dummy("u") substitutions = find_substitutions(integrand, symbol, u_var) count = 0 if substitutions: debug("List of Substitution Rules") ways = [] for u_func, c, substituted in substitutions: subrule = integral_steps(substituted, u_var) count = count + 1 debug("Rule {}: {}".format(count, subrule)) if contains_dont_know(subrule): continue if sympy.simplify(c - 1) != 0: _, denom = c.as_numer_denom() if subrule: subrule = ConstantTimesRule(c, substituted, subrule, substituted, u_var) if denom.free_symbols: piecewise = [] could_be_zero = [] if isinstance(denom, sympy.Mul): could_be_zero = denom.args else: could_be_zero.append(denom) for expr in could_be_zero: if not fuzzy_not(expr.is_zero): substep = integral_steps(manual_subs(integrand, expr, 0), symbol) if substep: piecewise.append(( substep, sympy.Eq(expr, 0) )) piecewise.append((subrule, True)) subrule = PiecewiseRule(piecewise, substituted, symbol) ways.append(URule(u_var, u_func, c, subrule, integrand, symbol)) if len(ways) > 1: return AlternativeRule(ways, integrand, symbol) elif ways: return ways[0] elif integrand.has(sympy.exp): u_func = sympy.exp(symbol) c = 1 substituted = integrand / u_func.diff(symbol) substituted = substituted.subs(u_func, u_var) if symbol not in substituted.free_symbols: return URule(u_var, u_func, c, integral_steps(substituted, u_var), integrand, symbol) partial_fractions_rule = rewriter( lambda integrand, symbol: integrand.is_rational_function(), lambda integrand, symbol: integrand.apart(symbol)) cancel_rule = rewriter( # lambda integrand, symbol: integrand.is_algebraic_expr(), # lambda integrand, symbol: isinstance(integrand, sympy.Mul), lambda integrand, symbol: True, lambda integrand, symbol: integrand.cancel()) distribute_expand_rule = rewriter( lambda integrand, symbol: ( all(arg.is_Pow or arg.is_polynomial(symbol) for arg in integrand.args) or isinstance(integrand, sympy.Pow) or isinstance(integrand, sympy.Mul)), lambda integrand, symbol: integrand.expand()) trig_expand_rule = rewriter( # If there are trig functions with different arguments, expand them lambda integrand, symbol: ( len({a.args[0] for a in integrand.atoms(TrigonometricFunction)}) > 1), lambda integrand, symbol: integrand.expand(trig=True)) def derivative_rule(integral): integrand = integral[0] diff_variables = integrand.variables undifferentiated_function = integrand.expr integrand_variables = undifferentiated_function.free_symbols if integral.symbol in integrand_variables: if integral.symbol in diff_variables: return DerivativeRule(*integral) else: return DontKnowRule(integrand, integral.symbol) else: return ConstantRule(integral.integrand, *integral) def rewrites_rule(integral): integrand, symbol = integral if integrand.match(1/sympy.cos(symbol)): rewritten = integrand.subs(1/sympy.cos(symbol), sympy.sec(symbol)) return RewriteRule(rewritten, integral_steps(rewritten, symbol), integrand, symbol) def fallback_rule(integral): return DontKnowRule(*integral) # Cache is used to break cyclic integrals. # Need to use the same dummy variable in cached expressions for them to match. # Also record "u" of integration by parts, to avoid infinite repetition. _integral_cache = {} # type: tDict[Expr, Optional[Expr]] _parts_u_cache = defaultdict(int) # type: tDict[Expr, int] _cache_dummy = sympy.Dummy("z") def integral_steps(integrand, symbol, **options): """Returns the steps needed to compute an integral. Explanation =========== This function attempts to mirror what a student would do by hand as closely as possible. SymPy Gamma uses this to provide a step-by-step explanation of an integral. The code it uses to format the results of this function can be found at https://github.com/sympy/sympy_gamma/blob/master/app/logic/intsteps.py. Examples ======== >>> from sympy import exp, sin >>> from sympy.integrals.manualintegrate import integral_steps >>> from sympy.abc import x >>> print(repr(integral_steps(exp(x) / (1 + exp(2 * x)), x))) \ # doctest: +NORMALIZE_WHITESPACE URule(u_var=_u, u_func=exp(x), constant=1, substep=PiecewiseRule(subfunctions=[(ArctanRule(a=1, b=1, c=1, context=1/(_u**2 + 1), symbol=_u), True), (ArccothRule(a=1, b=1, c=1, context=1/(_u**2 + 1), symbol=_u), False), (ArctanhRule(a=1, b=1, c=1, context=1/(_u**2 + 1), symbol=_u), False)], context=1/(_u**2 + 1), symbol=_u), context=exp(x)/(exp(2*x) + 1), symbol=x) >>> print(repr(integral_steps(sin(x), x))) \ # doctest: +NORMALIZE_WHITESPACE TrigRule(func='sin', arg=x, context=sin(x), symbol=x) >>> print(repr(integral_steps((x**2 + 3)**2 , x))) \ # doctest: +NORMALIZE_WHITESPACE RewriteRule(rewritten=x**4 + 6*x**2 + 9, substep=AddRule(substeps=[PowerRule(base=x, exp=4, context=x**4, symbol=x), ConstantTimesRule(constant=6, other=x**2, substep=PowerRule(base=x, exp=2, context=x**2, symbol=x), context=6*x**2, symbol=x), ConstantRule(constant=9, context=9, symbol=x)], context=x**4 + 6*x**2 + 9, symbol=x), context=(x**2 + 3)**2, symbol=x) Returns ======= rule : namedtuple The first step; most rules have substeps that must also be considered. These substeps can be evaluated using ``manualintegrate`` to obtain a result. """ cachekey = integrand.xreplace({symbol: _cache_dummy}) if cachekey in _integral_cache: if _integral_cache[cachekey] is None: # Stop this attempt, because it leads around in a loop return DontKnowRule(integrand, symbol) else: # TODO: This is for future development, as currently # _integral_cache gets no values other than None return (_integral_cache[cachekey].xreplace(_cache_dummy, symbol), symbol) else: _integral_cache[cachekey] = None integral = IntegralInfo(integrand, symbol) def key(integral): integrand = integral.integrand if isinstance(integrand, TrigonometricFunction): return TrigonometricFunction elif isinstance(integrand, sympy.Derivative): return sympy.Derivative elif symbol not in integrand.free_symbols: return sympy.Number else: for cls in (sympy.Pow, sympy.Symbol, sympy.exp, sympy.log, sympy.Add, sympy.Mul, *inverse_trig_functions, sympy.Heaviside, OrthogonalPolynomial): if isinstance(integrand, cls): return cls def integral_is_subclass(*klasses): def _integral_is_subclass(integral): k = key(integral) return k and issubclass(k, klasses) return _integral_is_subclass result = do_one( null_safe(special_function_rule), null_safe(switch(key, { sympy.Pow: do_one(null_safe(power_rule), null_safe(inverse_trig_rule), \ null_safe(quadratic_denom_rule)), sympy.Symbol: power_rule, sympy.exp: exp_rule, sympy.Add: add_rule, sympy.Mul: do_one(null_safe(mul_rule), null_safe(trig_product_rule), \ null_safe(heaviside_rule), null_safe(quadratic_denom_rule), \ null_safe(root_mul_rule)), sympy.Derivative: derivative_rule, TrigonometricFunction: trig_rule, sympy.Heaviside: heaviside_rule, OrthogonalPolynomial: orthogonal_poly_rule, sympy.Number: constant_rule })), do_one( null_safe(trig_rule), null_safe(alternatives( rewrites_rule, substitution_rule, condition( integral_is_subclass(sympy.Mul, sympy.Pow), partial_fractions_rule), condition( integral_is_subclass(sympy.Mul, sympy.Pow), cancel_rule), condition( integral_is_subclass(sympy.Mul, sympy.log, *inverse_trig_functions), parts_rule), condition( integral_is_subclass(sympy.Mul, sympy.Pow), distribute_expand_rule), trig_powers_products_rule, trig_expand_rule )), null_safe(trig_substitution_rule) ), fallback_rule)(integral) del _integral_cache[cachekey] return result @evaluates(ConstantRule) def eval_constant(constant, integrand, symbol): return constant * symbol @evaluates(ConstantTimesRule) def eval_constanttimes(constant, other, substep, integrand, symbol): return constant * _manualintegrate(substep) @evaluates(PowerRule) def eval_power(base, exp, integrand, symbol): return sympy.Piecewise( ((base**(exp + 1))/(exp + 1), sympy.Ne(exp, -1)), (sympy.log(base), True), ) @evaluates(ExpRule) def eval_exp(base, exp, integrand, symbol): return integrand / sympy.ln(base) @evaluates(AddRule) def eval_add(substeps, integrand, symbol): return sum(map(_manualintegrate, substeps)) @evaluates(URule) def eval_u(u_var, u_func, constant, substep, integrand, symbol): result = _manualintegrate(substep) if u_func.is_Pow and u_func.exp == -1: # avoid needless -log(1/x) from substitution result = result.subs(sympy.log(u_var), -sympy.log(u_func.base)) return result.subs(u_var, u_func) @evaluates(PartsRule) def eval_parts(u, dv, v_step, second_step, integrand, symbol): v = _manualintegrate(v_step) return u * v - _manualintegrate(second_step) @evaluates(CyclicPartsRule) def eval_cyclicparts(parts_rules, coefficient, integrand, symbol): coefficient = 1 - coefficient result = [] sign = 1 for rule in parts_rules: result.append(sign * rule.u * _manualintegrate(rule.v_step)) sign *= -1 return sympy.Add(*result) / coefficient @evaluates(TrigRule) def eval_trig(func, arg, integrand, symbol): if func == 'sin': return -sympy.cos(arg) elif func == 'cos': return sympy.sin(arg) elif func == 'sec*tan': return sympy.sec(arg) elif func == 'csc*cot': return sympy.csc(arg) elif func == 'sec**2': return sympy.tan(arg) elif func == 'csc**2': return -sympy.cot(arg) @evaluates(ArctanRule) def eval_arctan(a, b, c, integrand, symbol): return a / b * 1 / sympy.sqrt(c / b) * sympy.atan(symbol / sympy.sqrt(c / b)) @evaluates(ArccothRule) def eval_arccoth(a, b, c, integrand, symbol): return - a / b * 1 / sympy.sqrt(-c / b) * sympy.acoth(symbol / sympy.sqrt(-c / b)) @evaluates(ArctanhRule) def eval_arctanh(a, b, c, integrand, symbol): return - a / b * 1 / sympy.sqrt(-c / b) * sympy.atanh(symbol / sympy.sqrt(-c / b)) @evaluates(ReciprocalRule) def eval_reciprocal(func, integrand, symbol): return sympy.ln(func) @evaluates(ArcsinRule) def eval_arcsin(integrand, symbol): return sympy.asin(symbol) @evaluates(InverseHyperbolicRule) def eval_inversehyperbolic(func, integrand, symbol): return func(symbol) @evaluates(AlternativeRule) def eval_alternative(alternatives, integrand, symbol): return _manualintegrate(alternatives[0]) @evaluates(RewriteRule) def eval_rewrite(rewritten, substep, integrand, symbol): return _manualintegrate(substep) @evaluates(PiecewiseRule) def eval_piecewise(substeps, integrand, symbol): return sympy.Piecewise(*[(_manualintegrate(substep), cond) for substep, cond in substeps]) @evaluates(TrigSubstitutionRule) def eval_trigsubstitution(theta, func, rewritten, substep, restriction, integrand, symbol): func = func.subs(sympy.sec(theta), 1/sympy.cos(theta)) func = func.subs(sympy.csc(theta), 1/sympy.sin(theta)) func = func.subs(sympy.cot(theta), 1/sympy.tan(theta)) trig_function = list(func.find(TrigonometricFunction)) assert len(trig_function) == 1 trig_function = trig_function[0] relation = sympy.solve(symbol - func, trig_function) assert len(relation) == 1 numer, denom = sympy.fraction(relation[0]) if isinstance(trig_function, sympy.sin): opposite = numer hypotenuse = denom adjacent = sympy.sqrt(denom**2 - numer**2) inverse = sympy.asin(relation[0]) elif isinstance(trig_function, sympy.cos): adjacent = numer hypotenuse = denom opposite = sympy.sqrt(denom**2 - numer**2) inverse = sympy.acos(relation[0]) elif isinstance(trig_function, sympy.tan): opposite = numer adjacent = denom hypotenuse = sympy.sqrt(denom**2 + numer**2) inverse = sympy.atan(relation[0]) substitution = [ (sympy.sin(theta), opposite/hypotenuse), (sympy.cos(theta), adjacent/hypotenuse), (sympy.tan(theta), opposite/adjacent), (theta, inverse) ] return sympy.Piecewise( (_manualintegrate(substep).subs(substitution).trigsimp(), restriction) ) @evaluates(DerivativeRule) def eval_derivativerule(integrand, symbol): # isinstance(integrand, Derivative) should be True variable_count = list(integrand.variable_count) for i, (var, count) in enumerate(variable_count): if var == symbol: variable_count[i] = (var, count-1) break return sympy.Derivative(integrand.expr, *variable_count) @evaluates(HeavisideRule) def eval_heaviside(harg, ibnd, substep, integrand, symbol): # If we are integrating over x and the integrand has the form # Heaviside(m*x+b)*g(x) == Heaviside(harg)*g(symbol) # then there needs to be continuity at -b/m == ibnd, # so we subtract the appropriate term. return sympy.Heaviside(harg)*(substep - substep.subs(symbol, ibnd)) @evaluates(JacobiRule) def eval_jacobi(n, a, b, integrand, symbol): return Piecewise( (2*sympy.jacobi(n + 1, a - 1, b - 1, symbol)/(n + a + b), Ne(n + a + b, 0)), (symbol, Eq(n, 0)), ((a + b + 2)*symbol**2/4 + (a - b)*symbol/2, Eq(n, 1))) @evaluates(GegenbauerRule) def eval_gegenbauer(n, a, integrand, symbol): return Piecewise( (sympy.gegenbauer(n + 1, a - 1, symbol)/(2*(a - 1)), Ne(a, 1)), (sympy.chebyshevt(n + 1, symbol)/(n + 1), Ne(n, -1)), (sympy.S.Zero, True)) @evaluates(ChebyshevTRule) def eval_chebyshevt(n, integrand, symbol): return Piecewise(((sympy.chebyshevt(n + 1, symbol)/(n + 1) - sympy.chebyshevt(n - 1, symbol)/(n - 1))/2, Ne(sympy.Abs(n), 1)), (symbol**2/2, True)) @evaluates(ChebyshevURule) def eval_chebyshevu(n, integrand, symbol): return Piecewise( (sympy.chebyshevt(n + 1, symbol)/(n + 1), Ne(n, -1)), (sympy.S.Zero, True)) @evaluates(LegendreRule) def eval_legendre(n, integrand, symbol): return (sympy.legendre(n + 1, symbol) - sympy.legendre(n - 1, symbol))/(2*n + 1) @evaluates(HermiteRule) def eval_hermite(n, integrand, symbol): return sympy.hermite(n + 1, symbol)/(2*(n + 1)) @evaluates(LaguerreRule) def eval_laguerre(n, integrand, symbol): return sympy.laguerre(n, symbol) - sympy.laguerre(n + 1, symbol) @evaluates(AssocLaguerreRule) def eval_assoclaguerre(n, a, integrand, symbol): return -sympy.assoc_laguerre(n + 1, a - 1, symbol) @evaluates(CiRule) def eval_ci(a, b, integrand, symbol): return sympy.cos(b)*sympy.Ci(a*symbol) - sympy.sin(b)*sympy.Si(a*symbol) @evaluates(ChiRule) def eval_chi(a, b, integrand, symbol): return sympy.cosh(b)*sympy.Chi(a*symbol) + sympy.sinh(b)*sympy.Shi(a*symbol) @evaluates(EiRule) def eval_ei(a, b, integrand, symbol): return sympy.exp(b)*sympy.Ei(a*symbol) @evaluates(SiRule) def eval_si(a, b, integrand, symbol): return sympy.sin(b)*sympy.Ci(a*symbol) + sympy.cos(b)*sympy.Si(a*symbol) @evaluates(ShiRule) def eval_shi(a, b, integrand, symbol): return sympy.sinh(b)*sympy.Chi(a*symbol) + sympy.cosh(b)*sympy.Shi(a*symbol) @evaluates(ErfRule) def eval_erf(a, b, c, integrand, symbol): if a.is_extended_real: return Piecewise( (sympy.sqrt(sympy.pi/(-a))/2 * sympy.exp(c - b**2/(4*a)) * sympy.erf((-2*a*symbol - b)/(2*sympy.sqrt(-a))), a < 0), (sympy.sqrt(sympy.pi/a)/2 * sympy.exp(c - b**2/(4*a)) * sympy.erfi((2*a*symbol + b)/(2*sympy.sqrt(a))), True)) else: return sympy.sqrt(sympy.pi/a)/2 * sympy.exp(c - b**2/(4*a)) * \ sympy.erfi((2*a*symbol + b)/(2*sympy.sqrt(a))) @evaluates(FresnelCRule) def eval_fresnelc(a, b, c, integrand, symbol): return sympy.sqrt(sympy.pi/(2*a)) * ( sympy.cos(b**2/(4*a) - c)*sympy.fresnelc((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)) + sympy.sin(b**2/(4*a) - c)*sympy.fresnels((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi))) @evaluates(FresnelSRule) def eval_fresnels(a, b, c, integrand, symbol): return sympy.sqrt(sympy.pi/(2*a)) * ( sympy.cos(b**2/(4*a) - c)*sympy.fresnels((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)) - sympy.sin(b**2/(4*a) - c)*sympy.fresnelc((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi))) @evaluates(LiRule) def eval_li(a, b, integrand, symbol): return sympy.li(a*symbol + b)/a @evaluates(PolylogRule) def eval_polylog(a, b, integrand, symbol): return sympy.polylog(b + 1, a*symbol) @evaluates(UpperGammaRule) def eval_uppergamma(a, e, integrand, symbol): return symbol**e * (-a*symbol)**(-e) * sympy.uppergamma(e + 1, -a*symbol)/a @evaluates(EllipticFRule) def eval_elliptic_f(a, d, integrand, symbol): return sympy.elliptic_f(symbol, d/a)/sympy.sqrt(a) @evaluates(EllipticERule) def eval_elliptic_e(a, d, integrand, symbol): return sympy.elliptic_e(symbol, d/a)*sympy.sqrt(a) @evaluates(DontKnowRule) def eval_dontknowrule(integrand, symbol): return sympy.Integral(integrand, symbol) def _manualintegrate(rule): evaluator = evaluators.get(rule.__class__) if not evaluator: raise ValueError("Cannot evaluate rule %s" % repr(rule)) return evaluator(*rule) def manualintegrate(f, var): """manualintegrate(f, var) Explanation =========== Compute indefinite integral of a single variable using an algorithm that resembles what a student would do by hand. Unlike :func:`~.integrate`, var can only be a single symbol. Examples ======== >>> from sympy import sin, cos, tan, exp, log, integrate >>> from sympy.integrals.manualintegrate import manualintegrate >>> from sympy.abc import x >>> manualintegrate(1 / x, x) log(x) >>> integrate(1/x) log(x) >>> manualintegrate(log(x), x) x*log(x) - x >>> integrate(log(x)) x*log(x) - x >>> manualintegrate(exp(x) / (1 + exp(2 * x)), x) atan(exp(x)) >>> integrate(exp(x) / (1 + exp(2 * x))) RootSum(4*_z**2 + 1, Lambda(_i, _i*log(2*_i + exp(x)))) >>> manualintegrate(cos(x)**4 * sin(x), x) -cos(x)**5/5 >>> integrate(cos(x)**4 * sin(x), x) -cos(x)**5/5 >>> manualintegrate(cos(x)**4 * sin(x)**3, x) cos(x)**7/7 - cos(x)**5/5 >>> integrate(cos(x)**4 * sin(x)**3, x) cos(x)**7/7 - cos(x)**5/5 >>> manualintegrate(tan(x), x) -log(cos(x)) >>> integrate(tan(x), x) -log(cos(x)) See Also ======== sympy.integrals.integrals.integrate sympy.integrals.integrals.Integral.doit sympy.integrals.integrals.Integral """ result = _manualintegrate(integral_steps(f, var)) # Clear the cache of u-parts _parts_u_cache.clear() # If we got Piecewise with two parts, put generic first if isinstance(result, Piecewise) and len(result.args) == 2: cond = result.args[0][1] if isinstance(cond, Eq) and result.args[1][1] == True: result = result.func( (result.args[1][0], sympy.Ne(*cond.args)), (result.args[0][0], True)) return result
5e95b574d802bb100d49db2ed7bab966f2cd99e44a26e786978cbe56a9d94136
""" Integral Transforms """ from functools import reduce from sympy.core import S from sympy.core.compatibility import iterable from sympy.core.function import Function from sympy.core.relational import _canonical, Ge, Gt from sympy.core.numbers import oo from sympy.core.symbol import Dummy from sympy.integrals import integrate, Integral from sympy.integrals.meijerint import _dummy from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And from sympy.simplify import simplify from sympy.utilities import default_sort_key from sympy.matrices.matrices import MatrixBase ########################################################################## # Helpers / Utilities ########################################################################## class IntegralTransformError(NotImplementedError): """ Exception raised in relation to problems computing transforms. Explanation =========== This class is mostly used internally; if integrals cannot be computed objects representing unevaluated transforms are usually returned. The hint ``needeval=True`` can be used to disable returning transform objects, and instead raise this exception if an integral cannot be computed. """ def __init__(self, transform, function, msg): super().__init__( "%s Transform could not be computed: %s." % (transform, msg)) self.function = function class IntegralTransform(Function): """ Base class for integral transforms. Explanation =========== This class represents unevaluated transforms. To implement a concrete transform, derive from this class and implement the ``_compute_transform(f, x, s, **hints)`` and ``_as_integral(f, x, s)`` functions. If the transform cannot be computed, raise :obj:`IntegralTransformError`. Also set ``cls._name``. For instance, >>> from sympy.integrals.transforms import LaplaceTransform >>> LaplaceTransform._name 'Laplace' Implement ``self._collapse_extra`` if your function returns more than just a number and possibly a convergence condition. """ @property def function(self): """ The function to be transformed. """ return self.args[0] @property def function_variable(self): """ The dependent variable of the function to be transformed. """ return self.args[1] @property def transform_variable(self): """ The independent transform variable. """ return self.args[2] @property def free_symbols(self): """ This method returns the symbols that will exist when the transform is evaluated. """ return self.function.free_symbols.union({self.transform_variable}) \ - {self.function_variable} def _compute_transform(self, f, x, s, **hints): raise NotImplementedError def _as_integral(self, f, x, s): raise NotImplementedError def _collapse_extra(self, extra): cond = And(*extra) if cond == False: raise IntegralTransformError(self.__class__.name, None, '') return cond def doit(self, **hints): """ Try to evaluate the transform in closed form. Explanation =========== This general function handles linearity, but apart from that leaves pretty much everything to _compute_transform. Standard hints are the following: - ``simplify``: whether or not to simplify the result - ``noconds``: if True, don't return convergence conditions - ``needeval``: if True, raise IntegralTransformError instead of returning IntegralTransform objects The default values of these hints depend on the concrete transform, usually the default is ``(simplify, noconds, needeval) = (True, False, False)``. """ from sympy import Add, expand_mul, Mul from sympy.core.function import AppliedUndef needeval = hints.pop('needeval', False) try_directly = not any(func.has(self.function_variable) for func in self.function.atoms(AppliedUndef)) if try_directly: try: return self._compute_transform(self.function, self.function_variable, self.transform_variable, **hints) except IntegralTransformError: pass fn = self.function if not fn.is_Add: fn = expand_mul(fn) if fn.is_Add: hints['needeval'] = needeval res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints) for x in fn.args] extra = [] ress = [] for x in res: if not isinstance(x, tuple): x = [x] ress.append(x[0]) if len(x) == 2: # only a condition extra.append(x[1]) elif len(x) > 2: # some region parameters and a condition (Mellin, Laplace) extra += [x[1:]] res = Add(*ress) if not extra: return res try: extra = self._collapse_extra(extra) if iterable(extra): return tuple([res]) + tuple(extra) else: return (res, extra) except IntegralTransformError: pass if needeval: raise IntegralTransformError( self.__class__._name, self.function, 'needeval') # TODO handle derivatives etc # pull out constant coefficients coeff, rest = fn.as_coeff_mul(self.function_variable) return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:]))) @property def as_integral(self): return self._as_integral(self.function, self.function_variable, self.transform_variable) def _eval_rewrite_as_Integral(self, *args, **kwargs): return self.as_integral from sympy.solvers.inequalities import _solve_inequality def _simplify(expr, doit): from sympy import powdenest, piecewise_fold if doit: return simplify(powdenest(piecewise_fold(expr), polar=True)) return expr def _noconds_(default): """ This is a decorator generator for dropping convergence conditions. Explanation =========== Suppose you define a function ``transform(*args)`` which returns a tuple of the form ``(result, cond1, cond2, ...)``. Decorating it ``@_noconds_(default)`` will add a new keyword argument ``noconds`` to it. If ``noconds=True``, the return value will be altered to be only ``result``, whereas if ``noconds=False`` the return value will not be altered. The default value of the ``noconds`` keyword will be ``default`` (i.e. the argument of this function). """ def make_wrapper(func): from sympy.core.decorators import wraps @wraps(func) def wrapper(*args, noconds=default, **kwargs): res = func(*args, **kwargs) if noconds: return res[0] return res return wrapper return make_wrapper _noconds = _noconds_(False) ########################################################################## # Mellin Transform ########################################################################## def _default_integrator(f, x): return integrate(f, (x, 0, oo)) @_noconds def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True): """ Backend function to compute Mellin transforms. """ from sympy import re, Max, Min, count_ops # We use a fresh dummy, because assumptions on s might drop conditions on # convergence of the integral. s = _dummy('s', 'mellin-transform', f) F = integrator(x**(s - 1) * f, x) if not F.has(Integral): return _simplify(F.subs(s, s_), simplify), (-oo, oo), S.true if not F.is_Piecewise: # XXX can this work if integration gives continuous result now? raise IntegralTransformError('Mellin', f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError( 'Mellin', f, 'integral in unexpected form') def process_conds(cond): """ Turn ``cond`` into a strip (a, b), and auxiliary conditions. """ a = -oo b = oo aux = S.true conds = conjuncts(to_cnf(cond)) t = Dummy('t', real=True) for c in conds: a_ = oo b_ = -oo aux_ = [] for d in disjuncts(c): d_ = d.replace( re, lambda x: x.as_real_imag()[0]).subs(re(s), t) if not d.is_Relational or \ d.rel_op in ('==', '!=') \ or d_.has(s) or not d_.has(t): aux_ += [d] continue soln = _solve_inequality(d_, t) if not soln.is_Relational or \ soln.rel_op in ('==', '!='): aux_ += [d] continue if soln.lts == t: b_ = Max(soln.gts, b_) else: a_ = Min(soln.lts, a_) if a_ != oo and a_ != b: a = Max(a_, a) elif b_ != -oo and b_ != a: b = Min(b_, b) else: aux = And(aux, Or(*aux_)) return a, b, aux conds = [process_conds(c) for c in disjuncts(cond)] conds = [x for x in conds if x[2] != False] conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2]))) if not conds: raise IntegralTransformError('Mellin', f, 'no convergence found') a, b, aux = conds[0] return _simplify(F.subs(s, s_), simplify), (a, b), aux class MellinTransform(IntegralTransform): """ Class representing unevaluated Mellin transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Mellin transforms, see the :func:`mellin_transform` docstring. """ _name = 'Mellin' def _compute_transform(self, f, x, s, **hints): return _mellin_transform(f, x, s, **hints) def _as_integral(self, f, x, s): return Integral(f*x**(s - 1), (x, 0, oo)) def _collapse_extra(self, extra): from sympy import Max, Min a = [] b = [] cond = [] for (sa, sb), c in extra: a += [sa] b += [sb] cond += [c] res = (Max(*a), Min(*b)), And(*cond) if (res[0][0] >= res[0][1]) == True or res[1] == False: raise IntegralTransformError( 'Mellin', None, 'no combined convergence.') return res def mellin_transform(f, x, s, **hints): r""" Compute the Mellin transform `F(s)` of `f(x)`, .. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x. For all "sensible" functions, this converges absolutely in a strip `a < \operatorname{Re}(s) < b`. Explanation =========== The Mellin transform is related via change of variables to the Fourier transform, and also to the (bilateral) Laplace transform. This function returns ``(F, (a, b), cond)`` where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip (as above), and ``cond`` are auxiliary convergence conditions. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`MellinTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``, then only `F` will be returned (i.e. not ``cond``, and also not the strip ``(a, b)``). Examples ======== >>> from sympy.integrals.transforms import mellin_transform >>> from sympy import exp >>> from sympy.abc import x, s >>> mellin_transform(exp(-x), x, s) (gamma(s), (0, oo), True) See Also ======== inverse_mellin_transform, laplace_transform, fourier_transform hankel_transform, inverse_hankel_transform """ return MellinTransform(f, x, s).doit(**hints) def _rewrite_sin(m_n, s, a, b): """ Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible with the strip (a, b). Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``. Examples ======== >>> from sympy.integrals.transforms import _rewrite_sin >>> from sympy import pi, S >>> from sympy.abc import s >>> _rewrite_sin((pi, 0), s, 0, 1) (gamma(s), gamma(1 - s), pi) >>> _rewrite_sin((pi, 0), s, 1, 0) (gamma(s - 1), gamma(2 - s), -pi) >>> _rewrite_sin((pi, 0), s, -1, 0) (gamma(s + 1), gamma(-s), -pi) >>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2) (gamma(s - 1/2), gamma(3/2 - s), -pi) >>> _rewrite_sin((pi, pi), s, 0, 1) (gamma(s), gamma(1 - s), -pi) >>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2) (gamma(2*s), gamma(1 - 2*s), pi) >>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1) (gamma(2*s - 1), gamma(2 - 2*s), -pi) """ # (This is a separate function because it is moderately complicated, # and I want to doctest it.) # We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x). # But there is one comlication: the gamma functions determine the # inegration contour in the definition of the G-function. Usually # it would not matter if this is slightly shifted, unless this way # we create an undefined function! # So we try to write this in such a way that the gammas are # eminently on the right side of the strip. from sympy import expand_mul, pi, ceiling, gamma m, n = m_n m = expand_mul(m/pi) n = expand_mul(n/pi) r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi class MellinTransformStripError(ValueError): """ Exception raised by _rewrite_gamma. Mainly for internal use. """ pass def _rewrite_gamma(f, s, a, b): """ Try to rewrite the product f(s) as a product of gamma functions, so that the inverse Mellin transform of f can be expressed as a meijer G function. Explanation =========== Return (an, ap), (bm, bq), arg, exp, fac such that G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s). Raises IntegralTransformError or MellinTransformStripError on failure. It is asserted that f has no poles in the fundamental strip designated by (a, b). One of a and b is allowed to be None. The fundamental strip is important, because it determines the inversion contour. This function can handle exponentials, linear factors, trigonometric functions. This is a helper function for inverse_mellin_transform that will not attempt any transformations on f. Examples ======== >>> from sympy.integrals.transforms import _rewrite_gamma >>> from sympy.abc import s >>> from sympy import oo >>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo) (([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1) >>> _rewrite_gamma((s-1)**2, s, -oo, oo) (([], [1, 1]), ([2, 2], []), 1, 1, 1) Importance of the fundamental strip: >>> _rewrite_gamma(1/s, s, 0, oo) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, None, oo) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, 0, None) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, -oo, 0) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(1/s, s, None, 0) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(1/s, s, -oo, None) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(2**(-s+3), s, -oo, oo) (([], []), ([], []), 1/2, 1, 8) """ from itertools import repeat from sympy import (Poly, gamma, Mul, re, CRootOf, exp as exp_, expand, roots, ilcm, pi, sin, cos, tan, cot, igcd, exp_polar) # Our strategy will be as follows: # 1) Guess a constant c such that the inversion integral should be # performed wrt s'=c*s (instead of plain s). Write s for s'. # 2) Process all factors, rewrite them independently as gamma functions in # argument s, or exponentials of s. # 3) Try to transform all gamma functions s.t. they have argument # a+s or a-s. # 4) Check that the resulting G function parameters are valid. # 5) Combine all the exponentials. a_, b_ = S([a, b]) def left(c, is_numer): """ Decide whether pole at c lies to the left of the fundamental strip. """ # heuristically, this is the best chance for us to solve the inequalities c = expand(re(c)) if a_ is None and b_ is oo: return True if a_ is None: return c < b_ if b_ is None: return c <= a_ if (c >= b_) == True: return False if (c <= a_) == True: return True if is_numer: return None if a_.free_symbols or b_.free_symbols or c.free_symbols: return None # XXX #raise IntegralTransformError('Inverse Mellin', f, # 'Could not determine position of singularity %s' # ' relative to fundamental strip' % c) raise MellinTransformStripError('Pole inside critical strip?') # 1) s_multipliers = [] for g in f.atoms(gamma): if not g.has(s): continue arg = g.args[0] if arg.is_Add: arg = arg.as_independent(s)[1] coeff, _ = arg.as_coeff_mul(s) s_multipliers += [coeff] for g in f.atoms(sin, cos, tan, cot): if not g.has(s): continue arg = g.args[0] if arg.is_Add: arg = arg.as_independent(s)[1] coeff, _ = arg.as_coeff_mul(s) s_multipliers += [coeff/pi] s_multipliers = [abs(x) if x.is_extended_real else x for x in s_multipliers] common_coefficient = S.One for x in s_multipliers: if not x.is_Rational: common_coefficient = x break s_multipliers = [x/common_coefficient for x in s_multipliers] if (any(not x.is_Rational for x in s_multipliers) or not common_coefficient.is_extended_real): raise IntegralTransformError("Gamma", None, "Nonrational multiplier") s_multiplier = common_coefficient/reduce(ilcm, [S(x.q) for x in s_multipliers], S.One) if s_multiplier == common_coefficient: if len(s_multipliers) == 0: s_multiplier = common_coefficient else: s_multiplier = common_coefficient \ *reduce(igcd, [S(x.p) for x in s_multipliers]) f = f.subs(s, s/s_multiplier) fac = S.One/s_multiplier exponent = S.One/s_multiplier if a_ is not None: a_ *= s_multiplier if b_ is not None: b_ *= s_multiplier # 2) numer, denom = f.as_numer_denom() numer = Mul.make_args(numer) denom = Mul.make_args(denom) args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False))) facs = [] dfacs = [] # *_gammas will contain pairs (a, c) representing Gamma(a*s + c) numer_gammas = [] denom_gammas = [] # exponentials will contain bases for exponentials of s exponentials = [] def exception(fact): return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact) while args: fact, is_numer = args.pop() if is_numer: ugammas, lgammas = numer_gammas, denom_gammas ufacs = facs else: ugammas, lgammas = denom_gammas, numer_gammas ufacs = dfacs def linear_arg(arg): """ Test if arg is of form a*s+b, raise exception if not. """ if not arg.is_polynomial(s): raise exception(fact) p = Poly(arg, s) if p.degree() != 1: raise exception(fact) return p.all_coeffs() # constants if not fact.has(s): ufacs += [fact] # exponentials elif fact.is_Pow or isinstance(fact, exp_): if fact.is_Pow: base = fact.base exp = fact.exp else: base = exp_polar(1) exp = fact.args[0] if exp.is_Integer: cond = is_numer if exp < 0: cond = not cond args += [(base, cond)]*abs(exp) continue elif not base.has(s): a, b = linear_arg(exp) if not is_numer: base = 1/base exponentials += [base**a] facs += [base**b] else: raise exception(fact) # linear factors elif fact.is_polynomial(s): p = Poly(fact, s) if p.degree() != 1: # We completely factor the poly. For this we need the roots. # Now roots() only works in some cases (low degree), and CRootOf # only works without parameters. So try both... coeff = p.LT()[1] rs = roots(p, s) if len(rs) != p.degree(): rs = CRootOf.all_roots(p) ufacs += [coeff] args += [(s - c, is_numer) for c in rs] continue a, c = p.all_coeffs() ufacs += [a] c /= -a # Now need to convert s - c if left(c, is_numer): ugammas += [(S.One, -c + 1)] lgammas += [(S.One, -c)] else: ufacs += [-1] ugammas += [(S.NegativeOne, c + 1)] lgammas += [(S.NegativeOne, c)] elif isinstance(fact, gamma): a, b = linear_arg(fact.args[0]) if is_numer: if (a > 0 and (left(-b/a, is_numer) == False)) or \ (a < 0 and (left(-b/a, is_numer) == True)): raise NotImplementedError( 'Gammas partially over the strip.') ugammas += [(a, b)] elif isinstance(fact, sin): # We try to re-write all trigs as gammas. This is not in # general the best strategy, since sometimes this is impossible, # but rewriting as exponentials would work. However trig functions # in inverse mellin transforms usually all come from simplifying # gamma terms, so this should work. a = fact.args[0] if is_numer: # No problem with the poles. gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi else: gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_) args += [(gamma1, not is_numer), (gamma2, not is_numer)] ufacs += [fac_] elif isinstance(fact, tan): a = fact.args[0] args += [(sin(a, evaluate=False), is_numer), (sin(pi/2 - a, evaluate=False), not is_numer)] elif isinstance(fact, cos): a = fact.args[0] args += [(sin(pi/2 - a, evaluate=False), is_numer)] elif isinstance(fact, cot): a = fact.args[0] args += [(sin(pi/2 - a, evaluate=False), is_numer), (sin(a, evaluate=False), not is_numer)] else: raise exception(fact) fac *= Mul(*facs)/Mul(*dfacs) # 3) an, ap, bm, bq = [], [], [], [] for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True), (denom_gammas, bq, ap, False)]: while gammas: a, c = gammas.pop() if a != -1 and a != +1: # We use the gamma function multiplication theorem. p = abs(S(a)) newa = a/p newc = c/p if not a.is_Integer: raise TypeError("a is not an integer") for k in range(p): gammas += [(newa, newc + k/p)] if is_numer: fac *= (2*pi)**((1 - p)/2) * p**(c - S.Half) exponentials += [p**a] else: fac /= (2*pi)**((1 - p)/2) * p**(c - S.Half) exponentials += [p**(-a)] continue if a == +1: plus.append(1 - c) else: minus.append(c) # 4) # TODO # 5) arg = Mul(*exponentials) # for testability, sort the arguments an.sort(key=default_sort_key) ap.sort(key=default_sort_key) bm.sort(key=default_sort_key) bq.sort(key=default_sort_key) return (an, ap), (bm, bq), arg, exponent, fac @_noconds_(True) def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False): """ A helper for the real inverse_mellin_transform function, this one here assumes x to be real and positive. """ from sympy import (expand, expand_mul, hyperexpand, meijerg, arg, pi, re, factor, Heaviside, gamma, Add) x = _dummy('t', 'inverse-mellin-transform', F, positive=True) # Actually, we won't try integration at all. Instead we use the definition # of the Meijer G function as a fairly general inverse mellin transform. F = F.rewrite(gamma) for g in [factor(F), expand_mul(F), expand(F)]: if g.is_Add: # do all terms separately ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg, noconds=False) for G in g.args] conds = [p[1] for p in ress] ress = [p[0] for p in ress] res = Add(*ress) if not as_meijerg: res = factor(res, gens=res.atoms(Heaviside)) return res.subs(x, x_), And(*conds) try: a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1]) except IntegralTransformError: continue try: G = meijerg(a, b, C/x**e) except ValueError: continue if as_meijerg: h = G else: try: h = hyperexpand(G) except NotImplementedError: raise IntegralTransformError( 'Inverse Mellin', F, 'Could not calculate integral') if h.is_Piecewise and len(h.args) == 3: # XXX we break modularity here! h = Heaviside(x - abs(C))*h.args[0].args[0] \ + Heaviside(abs(C) - x)*h.args[1].args[0] # We must ensure that the integral along the line we want converges, # and return that value. # See [L], 5.2 cond = [abs(arg(G.argument)) < G.delta*pi] # Note: we allow ">=" here, this corresponds to convergence if we let # limits go to oo symmetrically. ">" corresponds to absolute convergence. cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1), abs(arg(G.argument)) == G.delta*pi)] cond = Or(*cond) if cond == False: raise IntegralTransformError( 'Inverse Mellin', F, 'does not converge') return (h*fac).subs(x, x_), cond raise IntegralTransformError('Inverse Mellin', F, '') _allowed = None class InverseMellinTransform(IntegralTransform): """ Class representing unevaluated inverse Mellin transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Mellin transforms, see the :func:`inverse_mellin_transform` docstring. """ _name = 'Inverse Mellin' _none_sentinel = Dummy('None') _c = Dummy('c') def __new__(cls, F, s, x, a, b, **opts): if a is None: a = InverseMellinTransform._none_sentinel if b is None: b = InverseMellinTransform._none_sentinel return IntegralTransform.__new__(cls, F, s, x, a, b, **opts) @property def fundamental_strip(self): a, b = self.args[3], self.args[4] if a is InverseMellinTransform._none_sentinel: a = None if b is InverseMellinTransform._none_sentinel: b = None return a, b def _compute_transform(self, F, s, x, **hints): from sympy import postorder_traversal global _allowed if _allowed is None: from sympy import ( exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth, factorial, rf) _allowed = { exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth, factorial, rf} for f in postorder_traversal(F): if f.is_Function and f.has(s) and f.func not in _allowed: raise IntegralTransformError('Inverse Mellin', F, 'Component %s not recognised.' % f) strip = self.fundamental_strip return _inverse_mellin_transform(F, s, x, strip, **hints) def _as_integral(self, F, s, x): from sympy import I c = self.__class__._c return Integral(F*x**(-s), (s, c - I*oo, c + I*oo))/(2*S.Pi*S.ImaginaryUnit) def inverse_mellin_transform(F, s, x, strip, **hints): r""" Compute the inverse Mellin transform of `F(s)` over the fundamental strip given by ``strip=(a, b)``. Explanation =========== This can be defined as .. math:: f(x) = \frac{1}{2\pi i} \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s, for any `c` in the fundamental strip. Under certain regularity conditions on `F` and/or `f`, this recovers `f` from its Mellin transform `F` (and vice versa), for positive real `x`. One of `a` or `b` may be passed as ``None``; a suitable `c` will be inferred. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`InverseMellinTransform` object. Note that this function will assume x to be positive and real, regardless of the sympy assumptions! For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Examples ======== >>> from sympy.integrals.transforms import inverse_mellin_transform >>> from sympy import oo, gamma >>> from sympy.abc import x, s >>> inverse_mellin_transform(gamma(s), s, x, (0, oo)) exp(-x) The fundamental strip matters: >>> f = 1/(s**2 - 1) >>> inverse_mellin_transform(f, s, x, (-oo, -1)) x*(1 - 1/x**2)*Heaviside(x - 1)/2 >>> inverse_mellin_transform(f, s, x, (-1, 1)) -x*Heaviside(1 - x)/2 - Heaviside(x - 1)/(2*x) >>> inverse_mellin_transform(f, s, x, (1, oo)) (1/2 - x**2/2)*Heaviside(1 - x)/x See Also ======== mellin_transform hankel_transform, inverse_hankel_transform """ return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints) ########################################################################## # Laplace Transform ########################################################################## def _simplifyconds(expr, s, a): r""" Naively simplify some conditions occurring in ``expr``, given that `\operatorname{Re}(s) > a`. Examples ======== >>> from sympy.integrals.transforms import _simplifyconds as simp >>> from sympy.abc import x >>> from sympy import sympify as S >>> simp(abs(x**2) < 1, x, 1) False >>> simp(abs(x**2) < 1, x, 2) False >>> simp(abs(x**2) < 1, x, 0) Abs(x**2) < 1 >>> simp(abs(1/x**2) < 1, x, 1) True >>> simp(S(1) < abs(x), x, 1) True >>> simp(S(1) < abs(1/x), x, 1) False >>> from sympy import Ne >>> simp(Ne(1, x**3), x, 1) True >>> simp(Ne(1, x**3), x, 2) True >>> simp(Ne(1, x**3), x, 0) Ne(1, x**3) """ from sympy.core.relational import ( StrictGreaterThan, StrictLessThan, Unequality ) from sympy import Abs def power(ex): if ex == s: return 1 if ex.is_Pow and ex.base == s: return ex.exp return None def bigger(ex1, ex2): """ Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|. Else return None. """ if ex1.has(s) and ex2.has(s): return None if isinstance(ex1, Abs): ex1 = ex1.args[0] if isinstance(ex2, Abs): ex2 = ex2.args[0] if ex1.has(s): return bigger(1/ex2, 1/ex1) n = power(ex2) if n is None: return None try: if n > 0 and (abs(ex1) <= abs(a)**n) == True: return False if n < 0 and (abs(ex1) >= abs(a)**n) == True: return True except TypeError: pass def replie(x, y): """ simplify x < y """ if not (x.is_positive or isinstance(x, Abs)) \ or not (y.is_positive or isinstance(y, Abs)): return (x < y) r = bigger(x, y) if r is not None: return not r return (x < y) def replue(x, y): b = bigger(x, y) if b == True or b == False: return True return Unequality(x, y) def repl(ex, *args): if ex == True or ex == False: return bool(ex) return ex.replace(*args) from sympy.simplify.radsimp import collect_abs expr = collect_abs(expr) expr = repl(expr, StrictLessThan, replie) expr = repl(expr, StrictGreaterThan, lambda x, y: replie(y, x)) expr = repl(expr, Unequality, replue) return S(expr) @_noconds def _laplace_transform(f, t, s_, simplify=True): """ The backend function for Laplace transforms. """ from sympy import (re, Max, exp, pi, Min, periodic_argument as arg_, arg, cos, Wild, symbols, polar_lift) s = Dummy('s') F = integrate(exp(-s*t) * f, (t, 0, oo)) if not F.has(Integral): return _simplify(F.subs(s, s_), simplify), -oo, S.true if not F.is_Piecewise: raise IntegralTransformError( 'Laplace', f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError( 'Laplace', f, 'integral in unexpected form') def process_conds(conds): """ Turn ``conds`` into a strip and auxiliary conditions. """ a = -oo aux = S.true conds = conjuncts(to_cnf(conds)) p, q, w1, w2, w3, w4, w5 = symbols( 'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s]) patterns = ( p*abs(arg((s + w3)*q)) < w2, p*abs(arg((s + w3)*q)) <= w2, abs(arg_((s + w3)**p*q, w1)) < w2, abs(arg_((s + w3)**p*q, w1)) <= w2, abs(arg_((polar_lift(s + w3))**p*q, w1)) < w2, abs(arg_((polar_lift(s + w3))**p*q, w1)) <= w2) for c in conds: a_ = oo aux_ = [] for d in disjuncts(c): if d.is_Relational and s in d.rhs.free_symbols: d = d.reversed if d.is_Relational and isinstance(d, (Ge, Gt)): d = d.reversedsign for pat in patterns: m = d.match(pat) if m: break if m: if m[q].is_positive and m[w2]/m[p] == pi/2: d = -re(s + m[w3]) < 0 m = d.match(p - cos(w1*abs(arg(s*w5))*w2)*abs(s**w3)**w4 < 0) if not m: m = d.match( cos(p - abs(arg_(s**w1*w5, q))*w2)*abs(s**w3)**w4 < 0) if not m: m = d.match( p - cos(abs(arg_(polar_lift(s)**w1*w5, q))*w2 )*abs(s**w3)**w4 < 0) if m and all(m[wild].is_positive for wild in [w1, w2, w3, w4, w5]): d = re(s) > m[p] d_ = d.replace( re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t) if not d.is_Relational or \ d.rel_op in ('==', '!=') \ or d_.has(s) or not d_.has(t): aux_ += [d] continue soln = _solve_inequality(d_, t) if not soln.is_Relational or \ soln.rel_op in ('==', '!='): aux_ += [d] continue if soln.lts == t: raise IntegralTransformError('Laplace', f, 'convergence not in half-plane?') else: a_ = Min(soln.lts, a_) if a_ != oo: a = Max(a_, a) else: aux = And(aux, Or(*aux_)) return a, aux conds = [process_conds(c) for c in disjuncts(cond)] conds2 = [x for x in conds if x[1] != False and x[0] != -oo] if not conds2: conds2 = [x for x in conds if x[1] != False] conds = conds2 def cnt(expr): if expr == True or expr == False: return 0 return expr.count_ops() conds.sort(key=lambda x: (-x[0], cnt(x[1]))) if not conds: raise IntegralTransformError('Laplace', f, 'no convergence found') a, aux = conds[0] def sbs(expr): return expr.subs(s, s_) if simplify: F = _simplifyconds(F, s, a) aux = _simplifyconds(aux, s, a) return _simplify(F.subs(s, s_), simplify), sbs(a), _canonical(sbs(aux)) class LaplaceTransform(IntegralTransform): """ Class representing unevaluated Laplace transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Laplace transforms, see the :func:`laplace_transform` docstring. """ _name = 'Laplace' def _compute_transform(self, f, t, s, **hints): return _laplace_transform(f, t, s, **hints) def _as_integral(self, f, t, s): from sympy import exp return Integral(f*exp(-s*t), (t, 0, oo)) def _collapse_extra(self, extra): from sympy import Max conds = [] planes = [] for plane, cond in extra: conds.append(cond) planes.append(plane) cond = And(*conds) plane = Max(*planes) if cond == False: raise IntegralTransformError( 'Laplace', None, 'No combined convergence.') return plane, cond def laplace_transform(f, t, s, **hints): r""" Compute the Laplace Transform `F(s)` of `f(t)`, .. math :: F(s) = \int_0^\infty e^{-st} f(t) \mathrm{d}t. Explanation =========== For all "sensible" functions, this converges absolutely in a half plane `a < \operatorname{Re}(s)`. This function returns ``(F, a, cond)`` where ``F`` is the Laplace transform of ``f``, `\operatorname{Re}(s) > a` is the half-plane of convergence, and ``cond`` are auxiliary convergence conditions. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`LaplaceTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``, only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``). Examples ======== >>> from sympy.integrals import laplace_transform >>> from sympy.abc import t, s, a >>> laplace_transform(t**a, t, s) (s**(-a)*gamma(a + 1)/s, 0, re(a) > -1) See Also ======== inverse_laplace_transform, mellin_transform, fourier_transform hankel_transform, inverse_hankel_transform """ if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'): return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints)) return LaplaceTransform(f, t, s).doit(**hints) @_noconds_(True) def _inverse_laplace_transform(F, s, t_, plane, simplify=True): """ The backend function for inverse Laplace transforms. """ from sympy import exp, Heaviside, log, expand_complex, Integral, Piecewise from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp # There are two strategies we can try: # 1) Use inverse mellin transforms - related by a simple change of variables. # 2) Use the inversion integral. t = Dummy('t', real=True) def pw_simp(*args): """ Simplify a piecewise expression from hyperexpand. """ # XXX we break modularity here! if len(args) != 3: return Piecewise(*args) arg = args[2].args[0].argument coeff, exponent = _get_coeff_exp(arg, t) e1 = args[0].args[0] e2 = args[1].args[0] return Heaviside(1/abs(coeff) - t**exponent)*e1 \ + Heaviside(t**exponent - 1/abs(coeff))*e2 try: f, cond = inverse_mellin_transform(F, s, exp(-t), (None, oo), needeval=True, noconds=False) except IntegralTransformError: f = None if f is None: f = meijerint_inversion(F, s, t) if f is None: raise IntegralTransformError('Inverse Laplace', f, '') if f.is_Piecewise: f, cond = f.args[0] if f.has(Integral): raise IntegralTransformError('Inverse Laplace', f, 'inversion integral of unrecognised form.') else: cond = S.true f = f.replace(Piecewise, pw_simp) if f.is_Piecewise: # many of the functions called below can't work with piecewise # (b/c it has a bool in args) return f.subs(t, t_), cond u = Dummy('u') def simp_heaviside(arg): a = arg.subs(exp(-t), u) if a.has(t): return Heaviside(arg) rel = _solve_inequality(a > 0, u) if rel.lts == u: k = log(rel.gts) return Heaviside(t + k) else: k = log(rel.lts) return Heaviside(-(t + k)) f = f.replace(Heaviside, simp_heaviside) def simp_exp(arg): return expand_complex(exp(arg)) f = f.replace(exp, simp_exp) # TODO it would be nice to fix cosh and sinh ... simplify messes these # exponentials up return _simplify(f.subs(t, t_), simplify), cond class InverseLaplaceTransform(IntegralTransform): """ Class representing unevaluated inverse Laplace transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Laplace transforms, see the :func:`inverse_laplace_transform` docstring. """ _name = 'Inverse Laplace' _none_sentinel = Dummy('None') _c = Dummy('c') def __new__(cls, F, s, x, plane, **opts): if plane is None: plane = InverseLaplaceTransform._none_sentinel return IntegralTransform.__new__(cls, F, s, x, plane, **opts) @property def fundamental_plane(self): plane = self.args[3] if plane is InverseLaplaceTransform._none_sentinel: plane = None return plane def _compute_transform(self, F, s, t, **hints): return _inverse_laplace_transform(F, s, t, self.fundamental_plane, **hints) def _as_integral(self, F, s, t): from sympy import I, exp c = self.__class__._c return Integral(exp(s*t)*F, (s, c - I*oo, c + I*oo))/(2*S.Pi*S.ImaginaryUnit) def inverse_laplace_transform(F, s, t, plane=None, **hints): r""" Compute the inverse Laplace transform of `F(s)`, defined as .. math :: f(t) = \frac{1}{2\pi i} \int_{c-i\infty}^{c+i\infty} e^{st} F(s) \mathrm{d}s, for `c` so large that `F(s)` has no singularites in the half-plane `\operatorname{Re}(s) > c-\epsilon`. Explanation =========== The plane can be specified by argument ``plane``, but will be inferred if passed as None. Under certain regularity conditions, this recovers `f(t)` from its Laplace Transform `F(s)`, for non-negative `t`, and vice versa. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`InverseLaplaceTransform` object. Note that this function will always assume `t` to be real, regardless of the sympy assumption on `t`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Examples ======== >>> from sympy.integrals.transforms import inverse_laplace_transform >>> from sympy import exp, Symbol >>> from sympy.abc import s, t >>> a = Symbol('a', positive=True) >>> inverse_laplace_transform(exp(-a*s)/s, s, t) Heaviside(-a + t) See Also ======== laplace_transform hankel_transform, inverse_hankel_transform """ if isinstance(F, MatrixBase) and hasattr(F, 'applyfunc'): return F.applyfunc(lambda Fij: inverse_laplace_transform(Fij, s, t, plane, **hints)) return InverseLaplaceTransform(F, s, t, plane).doit(**hints) ########################################################################## # Fourier Transform ########################################################################## @_noconds_(True) def _fourier_transform(f, x, k, a, b, name, simplify=True): r""" Compute a general Fourier-type transform .. math:: F(k) = a \int_{-\infty}^{\infty} e^{bixk} f(x)\, dx. For suitable choice of *a* and *b*, this reduces to the standard Fourier and inverse Fourier transforms. """ from sympy import exp, I F = integrate(a*f*exp(b*I*x*k), (x, -oo, oo)) if not F.has(Integral): return _simplify(F, simplify), S.true integral_f = integrate(f, (x, -oo, oo)) if integral_f in (-oo, oo, S.NaN) or integral_f.has(Integral): raise IntegralTransformError(name, f, 'function not integrable on real axis') if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class FourierTypeTransform(IntegralTransform): """ Base class for Fourier transforms.""" def a(self): raise NotImplementedError( "Class %s must implement a(self) but does not" % self.__class__) def b(self): raise NotImplementedError( "Class %s must implement b(self) but does not" % self.__class__) def _compute_transform(self, f, x, k, **hints): return _fourier_transform(f, x, k, self.a(), self.b(), self.__class__._name, **hints) def _as_integral(self, f, x, k): from sympy import exp, I a = self.a() b = self.b() return Integral(a*f*exp(b*I*x*k), (x, -oo, oo)) class FourierTransform(FourierTypeTransform): """ Class representing unevaluated Fourier transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Fourier transforms, see the :func:`fourier_transform` docstring. """ _name = 'Fourier' def a(self): return 1 def b(self): return -2*S.Pi def fourier_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency Fourier transform of ``f``, defined as .. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`FourierTransform` object. For other Fourier transform conventions, see the function :func:`sympy.integrals.transforms._fourier_transform`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import fourier_transform, exp >>> from sympy.abc import x, k >>> fourier_transform(exp(-x**2), x, k) sqrt(pi)*exp(-pi**2*k**2) >>> fourier_transform(exp(-x**2), x, k, noconds=False) (sqrt(pi)*exp(-pi**2*k**2), True) See Also ======== inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return FourierTransform(f, x, k).doit(**hints) class InverseFourierTransform(FourierTypeTransform): """ Class representing unevaluated inverse Fourier transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Fourier transforms, see the :func:`inverse_fourier_transform` docstring. """ _name = 'Inverse Fourier' def a(self): return 1 def b(self): return 2*S.Pi def inverse_fourier_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse Fourier transform of `F`, defined as .. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseFourierTransform` object. For other Fourier transform conventions, see the function :func:`sympy.integrals.transforms._fourier_transform`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import inverse_fourier_transform, exp, sqrt, pi >>> from sympy.abc import x, k >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x) exp(-x**2) >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False) (exp(-x**2), True) See Also ======== fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseFourierTransform(F, k, x).doit(**hints) ########################################################################## # Fourier Sine and Cosine Transform ########################################################################## from sympy import sin, cos, sqrt, pi @_noconds_(True) def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True): """ Compute a general sine or cosine-type transform F(k) = a int_0^oo b*sin(x*k) f(x) dx. F(k) = a int_0^oo b*cos(x*k) f(x) dx. For suitable choice of a and b, this reduces to the standard sine/cosine and inverse sine/cosine transforms. """ F = integrate(a*f*K(b*x*k), (x, 0, oo)) if not F.has(Integral): return _simplify(F, simplify), S.true if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class SineCosineTypeTransform(IntegralTransform): """ Base class for sine and cosine transforms. Specify cls._kern. """ def a(self): raise NotImplementedError( "Class %s must implement a(self) but does not" % self.__class__) def b(self): raise NotImplementedError( "Class %s must implement b(self) but does not" % self.__class__) def _compute_transform(self, f, x, k, **hints): return _sine_cosine_transform(f, x, k, self.a(), self.b(), self.__class__._kern, self.__class__._name, **hints) def _as_integral(self, f, x, k): a = self.a() b = self.b() K = self.__class__._kern return Integral(a*f*K(b*x*k), (x, 0, oo)) class SineTransform(SineCosineTypeTransform): """ Class representing unevaluated sine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute sine transforms, see the :func:`sine_transform` docstring. """ _name = 'Sine' _kern = sin def a(self): return sqrt(2)/sqrt(pi) def b(self): return 1 def sine_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency sine transform of `f`, defined as .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`SineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import sine_transform, exp >>> from sympy.abc import x, k, a >>> sine_transform(x*exp(-a*x**2), x, k) sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2)) >>> sine_transform(x**(-a), x, k) 2**(1/2 - a)*k**(a - 1)*gamma(1 - a/2)/gamma(a/2 + 1/2) See Also ======== fourier_transform, inverse_fourier_transform inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return SineTransform(f, x, k).doit(**hints) class InverseSineTransform(SineCosineTypeTransform): """ Class representing unevaluated inverse sine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse sine transforms, see the :func:`inverse_sine_transform` docstring. """ _name = 'Inverse Sine' _kern = sin def a(self): return sqrt(2)/sqrt(pi) def b(self): return 1 def inverse_sine_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse sine transform of `F`, defined as .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseSineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import inverse_sine_transform, exp, sqrt, gamma >>> from sympy.abc import x, k, a >>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)* ... gamma(-a/2 + 1)/gamma((a+1)/2), k, x) x**(-a) >>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x) x*exp(-a*x**2) See Also ======== fourier_transform, inverse_fourier_transform sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseSineTransform(F, k, x).doit(**hints) class CosineTransform(SineCosineTypeTransform): """ Class representing unevaluated cosine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute cosine transforms, see the :func:`cosine_transform` docstring. """ _name = 'Cosine' _kern = cos def a(self): return sqrt(2)/sqrt(pi) def b(self): return 1 def cosine_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency cosine transform of `f`, defined as .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`CosineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import cosine_transform, exp, sqrt, cos >>> from sympy.abc import x, k, a >>> cosine_transform(exp(-a*x), x, k) sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)) >>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k) a*exp(-a**2/(2*k))/(2*k**(3/2)) See Also ======== fourier_transform, inverse_fourier_transform, sine_transform, inverse_sine_transform inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return CosineTransform(f, x, k).doit(**hints) class InverseCosineTransform(SineCosineTypeTransform): """ Class representing unevaluated inverse cosine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse cosine transforms, see the :func:`inverse_cosine_transform` docstring. """ _name = 'Inverse Cosine' _kern = cos def a(self): return sqrt(2)/sqrt(pi) def b(self): return 1 def inverse_cosine_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse cosine transform of `F`, defined as .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseCosineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import inverse_cosine_transform, sqrt, pi >>> from sympy.abc import x, k, a >>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x) exp(-a*x) >>> inverse_cosine_transform(1/sqrt(k), k, x) 1/sqrt(x) See Also ======== fourier_transform, inverse_fourier_transform, sine_transform, inverse_sine_transform cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseCosineTransform(F, k, x).doit(**hints) ########################################################################## # Hankel Transform ########################################################################## @_noconds_(True) def _hankel_transform(f, r, k, nu, name, simplify=True): r""" Compute a general Hankel transform .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. """ from sympy import besselj F = integrate(f*besselj(nu, k*r)*r, (r, 0, oo)) if not F.has(Integral): return _simplify(F, simplify), S.true if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class HankelTypeTransform(IntegralTransform): """ Base class for Hankel transforms. """ def doit(self, **hints): return self._compute_transform(self.function, self.function_variable, self.transform_variable, self.args[3], **hints) def _compute_transform(self, f, r, k, nu, **hints): return _hankel_transform(f, r, k, nu, self._name, **hints) def _as_integral(self, f, r, k, nu): from sympy import besselj return Integral(f*besselj(nu, k*r)*r, (r, 0, oo)) @property def as_integral(self): return self._as_integral(self.function, self.function_variable, self.transform_variable, self.args[3]) class HankelTransform(HankelTypeTransform): """ Class representing unevaluated Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Hankel transforms, see the :func:`hankel_transform` docstring. """ _name = 'Hankel' def hankel_transform(f, r, k, nu, **hints): r""" Compute the Hankel transform of `f`, defined as .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`HankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import hankel_transform, inverse_hankel_transform >>> from sympy import exp >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform inverse_hankel_transform mellin_transform, laplace_transform """ return HankelTransform(f, r, k, nu).doit(**hints) class InverseHankelTransform(HankelTypeTransform): """ Class representing unevaluated inverse Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Hankel transforms, see the :func:`inverse_hankel_transform` docstring. """ _name = 'Inverse Hankel' def inverse_hankel_transform(F, k, r, nu, **hints): r""" Compute the inverse Hankel transform of `F` defined as .. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseHankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import hankel_transform, inverse_hankel_transform >>> from sympy import exp >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform mellin_transform, laplace_transform """ return InverseHankelTransform(F, k, r, nu).doit(**hints)
56c62ebd05c8a967a2fcbdabd6411c6cdc47b16084e67d131f6a06aa099985fa
from typing import Dict, List from itertools import permutations from functools import reduce from sympy.core.add import Add from sympy.core.basic import Basic from sympy.core.mul import Mul from sympy.core.symbol import Wild, Dummy from sympy.core.basic import sympify from sympy.core.numbers import Rational, pi, I from sympy.core.relational import Eq, Ne from sympy.core.singleton import S from sympy.functions import exp, sin, cos, tan, cot, asin, atan from sympy.functions import log, sinh, cosh, tanh, coth, asinh, acosh from sympy.functions import sqrt, erf, erfi, li, Ei from sympy.functions import besselj, bessely, besseli, besselk from sympy.functions import hankel1, hankel2, jn, yn from sympy.functions.elementary.complexes import Abs, re, im, sign, arg from sympy.functions.elementary.exponential import LambertW from sympy.functions.elementary.integers import floor, ceiling from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.special.delta_functions import Heaviside, DiracDelta from sympy.simplify.radsimp import collect from sympy.logic.boolalg import And, Or from sympy.utilities.iterables import uniq from sympy.polys import quo, gcd, lcm, factor, cancel, PolynomialError from sympy.polys.monomials import itermonomials from sympy.polys.polyroots import root_factors from sympy.polys.rings import PolyRing from sympy.polys.solvers import solve_lin_sys from sympy.polys.constructor import construct_domain from sympy.core.compatibility import ordered from sympy.integrals.integrals import integrate def components(f, x): """ Returns a set of all functional components of the given expression which includes symbols, function applications and compositions and non-integer powers. Fractional powers are collected with minimal, positive exponents. Examples ======== >>> from sympy import cos, sin >>> from sympy.abc import x >>> from sympy.integrals.heurisch import components >>> components(sin(x)*cos(x)**2, x) {x, sin(x), cos(x)} See Also ======== heurisch """ result = set() if x in f.free_symbols: if f.is_symbol and f.is_commutative: result.add(f) elif f.is_Function or f.is_Derivative: for g in f.args: result |= components(g, x) result.add(f) elif f.is_Pow: result |= components(f.base, x) if not f.exp.is_Integer: if f.exp.is_Rational: result.add(f.base**Rational(1, f.exp.q)) else: result |= components(f.exp, x) | {f} else: for g in f.args: result |= components(g, x) return result # name -> [] of symbols _symbols_cache = {} # type: Dict[str, List[Dummy]] # NB @cacheit is not convenient here def _symbols(name, n): """get vector of symbols local to this module""" try: lsyms = _symbols_cache[name] except KeyError: lsyms = [] _symbols_cache[name] = lsyms while len(lsyms) < n: lsyms.append( Dummy('%s%i' % (name, len(lsyms))) ) return lsyms[:n] def heurisch_wrapper(f, x, rewrite=False, hints=None, mappings=None, retries=3, degree_offset=0, unnecessary_permutations=None, _try_heurisch=None): """ A wrapper around the heurisch integration algorithm. Explanation =========== This method takes the result from heurisch and checks for poles in the denominator. For each of these poles, the integral is reevaluated, and the final integration result is given in terms of a Piecewise. Examples ======== >>> from sympy.core import symbols >>> from sympy.functions import cos >>> from sympy.integrals.heurisch import heurisch, heurisch_wrapper >>> n, x = symbols('n x') >>> heurisch(cos(n*x), x) sin(n*x)/n >>> heurisch_wrapper(cos(n*x), x) Piecewise((sin(n*x)/n, Ne(n, 0)), (x, True)) See Also ======== heurisch """ from sympy.solvers.solvers import solve, denoms f = sympify(f) if x not in f.free_symbols: return f*x res = heurisch(f, x, rewrite, hints, mappings, retries, degree_offset, unnecessary_permutations, _try_heurisch) if not isinstance(res, Basic): return res # We consider each denominator in the expression, and try to find # cases where one or more symbolic denominator might be zero. The # conditions for these cases are stored in the list slns. slns = [] for d in denoms(res): try: slns += solve(d, dict=True, exclude=(x,)) except NotImplementedError: pass if not slns: return res slns = list(uniq(slns)) # Remove the solutions corresponding to poles in the original expression. slns0 = [] for d in denoms(f): try: slns0 += solve(d, dict=True, exclude=(x,)) except NotImplementedError: pass slns = [s for s in slns if s not in slns0] if not slns: return res if len(slns) > 1: eqs = [] for sub_dict in slns: eqs.extend([Eq(key, value) for key, value in sub_dict.items()]) slns = solve(eqs, dict=True, exclude=(x,)) + slns # For each case listed in the list slns, we reevaluate the integral. pairs = [] for sub_dict in slns: expr = heurisch(f.subs(sub_dict), x, rewrite, hints, mappings, retries, degree_offset, unnecessary_permutations, _try_heurisch) cond = And(*[Eq(key, value) for key, value in sub_dict.items()]) generic = Or(*[Ne(key, value) for key, value in sub_dict.items()]) if expr is None: expr = integrate(f.subs(sub_dict),x) pairs.append((expr, cond)) # If there is one condition, put the generic case first. Otherwise, # doing so may lead to longer Piecewise formulas if len(pairs) == 1: pairs = [(heurisch(f, x, rewrite, hints, mappings, retries, degree_offset, unnecessary_permutations, _try_heurisch), generic), (pairs[0][0], True)] else: pairs.append((heurisch(f, x, rewrite, hints, mappings, retries, degree_offset, unnecessary_permutations, _try_heurisch), True)) return Piecewise(*pairs) class BesselTable: """ Derivatives of Bessel functions of orders n and n-1 in terms of each other. See the docstring of DiffCache. """ def __init__(self): self.table = {} self.n = Dummy('n') self.z = Dummy('z') self._create_table() def _create_table(t): table, n, z = t.table, t.n, t.z for f in (besselj, bessely, hankel1, hankel2): table[f] = (f(n-1, z) - n*f(n, z)/z, (n-1)*f(n-1, z)/z - f(n, z)) f = besseli table[f] = (f(n-1, z) - n*f(n, z)/z, (n-1)*f(n-1, z)/z + f(n, z)) f = besselk table[f] = (-f(n-1, z) - n*f(n, z)/z, (n-1)*f(n-1, z)/z - f(n, z)) for f in (jn, yn): table[f] = (f(n-1, z) - (n+1)*f(n, z)/z, (n-1)*f(n-1, z)/z - f(n, z)) def diffs(t, f, n, z): if f in t.table: diff0, diff1 = t.table[f] repl = [(t.n, n), (t.z, z)] return (diff0.subs(repl), diff1.subs(repl)) def has(t, f): return f in t.table _bessel_table = None class DiffCache: """ Store for derivatives of expressions. Explanation =========== The standard form of the derivative of a Bessel function of order n contains two Bessel functions of orders n-1 and n+1, respectively. Such forms cannot be used in parallel Risch algorithm, because there is a linear recurrence relation between the three functions while the algorithm expects that functions and derivatives are represented in terms of algebraically independent transcendentals. The solution is to take two of the functions, e.g., those of orders n and n-1, and to express the derivatives in terms of the pair. To guarantee that the proper form is used the two derivatives are cached as soon as one is encountered. Derivatives of other functions are also cached at no extra cost. All derivatives are with respect to the same variable `x`. """ def __init__(self, x): self.cache = {} self.x = x global _bessel_table if not _bessel_table: _bessel_table = BesselTable() def get_diff(self, f): cache = self.cache if f in cache: pass elif (not hasattr(f, 'func') or not _bessel_table.has(f.func)): cache[f] = cancel(f.diff(self.x)) else: n, z = f.args d0, d1 = _bessel_table.diffs(f.func, n, z) dz = self.get_diff(z) cache[f] = d0*dz cache[f.func(n-1, z)] = d1*dz return cache[f] def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3, degree_offset=0, unnecessary_permutations=None, _try_heurisch=None): """ Compute indefinite integral using heuristic Risch algorithm. Explanation =========== This is a heuristic approach to indefinite integration in finite terms using the extended heuristic (parallel) Risch algorithm, based on Manuel Bronstein's "Poor Man's Integrator". The algorithm supports various classes of functions including transcendental elementary or special functions like Airy, Bessel, Whittaker and Lambert. Note that this algorithm is not a decision procedure. If it isn't able to compute the antiderivative for a given function, then this is not a proof that such a functions does not exist. One should use recursive Risch algorithm in such case. It's an open question if this algorithm can be made a full decision procedure. This is an internal integrator procedure. You should use toplevel 'integrate' function in most cases, as this procedure needs some preprocessing steps and otherwise may fail. Specification ============= heurisch(f, x, rewrite=False, hints=None) where f : expression x : symbol rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh' hints -> a list of functions that may appear in anti-derivate - hints = None --> no suggestions at all - hints = [ ] --> try to figure out - hints = [f1, ..., fn] --> we know better Examples ======== >>> from sympy import tan >>> from sympy.integrals.heurisch import heurisch >>> from sympy.abc import x, y >>> heurisch(y*tan(x), x) y*log(tan(x)**2 + 1)/2 See Manuel Bronstein's "Poor Man's Integrator": References ========== .. [1] http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html For more information on the implemented algorithm refer to: .. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration Method and its Implementation in Maple, Proceedings of ISSAC'89, ACM Press, 212-217. .. [3] J. H. Davenport, On the Parallel Risch Algorithm (I), Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157. .. [4] J. H. Davenport, On the Parallel Risch Algorithm (III): Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6. .. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch Algorithm (II), ACM Transactions on Mathematical Software 11 (1985), 356-362. See Also ======== sympy.integrals.integrals.Integral.doit sympy.integrals.integrals.Integral sympy.integrals.heurisch.components """ f = sympify(f) # There are some functions that Heurisch cannot currently handle, # so do not even try. # Set _try_heurisch=True to skip this check if _try_heurisch is not True: if f.has(Abs, re, im, sign, Heaviside, DiracDelta, floor, ceiling, arg): return if x not in f.free_symbols: return f*x if not f.is_Add: indep, f = f.as_independent(x) else: indep = S.One rewritables = { (sin, cos, cot): tan, (sinh, cosh, coth): tanh, } if rewrite: for candidates, rule in rewritables.items(): f = f.rewrite(candidates, rule) else: for candidates in rewritables.keys(): if f.has(*candidates): break else: rewrite = True terms = components(f, x) if hints is not None: if not hints: a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) c = Wild('c', exclude=[x]) for g in set(terms): # using copy of terms if g.is_Function: if isinstance(g, li): M = g.args[0].match(a*x**b) if M is not None: terms.add( x*(li(M[a]*x**M[b]) - (M[a]*x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) #terms.add( x*(li(M[a]*x**M[b]) - (x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) #terms.add( x*(li(M[a]*x**M[b]) - x*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) #terms.add( li(M[a]*x**M[b]) - Ei((M[b]+1)*log(M[a]*x**M[b])/M[b]) ) elif isinstance(g, exp): M = g.args[0].match(a*x**2) if M is not None: if M[a].is_positive: terms.add(erfi(sqrt(M[a])*x)) else: # M[a].is_negative or unknown terms.add(erf(sqrt(-M[a])*x)) M = g.args[0].match(a*x**2 + b*x + c) if M is not None: if M[a].is_positive: terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))* erfi(sqrt(M[a])*x + M[b]/(2*sqrt(M[a])))) elif M[a].is_negative: terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))* erf(sqrt(-M[a])*x - M[b]/(2*sqrt(-M[a])))) M = g.args[0].match(a*log(x)**2) if M is not None: if M[a].is_positive: terms.add(erfi(sqrt(M[a])*log(x) + 1/(2*sqrt(M[a])))) if M[a].is_negative: terms.add(erf(sqrt(-M[a])*log(x) - 1/(2*sqrt(-M[a])))) elif g.is_Pow: if g.exp.is_Rational and g.exp.q == 2: M = g.base.match(a*x**2 + b) if M is not None and M[b].is_positive: if M[a].is_positive: terms.add(asinh(sqrt(M[a]/M[b])*x)) elif M[a].is_negative: terms.add(asin(sqrt(-M[a]/M[b])*x)) M = g.base.match(a*x**2 - b) if M is not None and M[b].is_positive: if M[a].is_positive: terms.add(acosh(sqrt(M[a]/M[b])*x)) elif M[a].is_negative: terms.add(-M[b]/2*sqrt(-M[a])* atan(sqrt(-M[a])*x/sqrt(M[a]*x**2 - M[b]))) else: terms |= set(hints) dcache = DiffCache(x) for g in set(terms): # using copy of terms terms |= components(dcache.get_diff(g), x) # TODO: caching is significant factor for why permutations work at all. Change this. V = _symbols('x', len(terms)) # sort mapping expressions from largest to smallest (last is always x). mapping = list(reversed(list(zip(*ordered( # [(a[0].as_independent(x)[1], a) for a in zip(terms, V)])))[1])) # rev_mapping = {v: k for k, v in mapping} # if mappings is None: # # optimizing the number of permutations of mapping # assert mapping[-1][0] == x # if not, find it and correct this comment unnecessary_permutations = [mapping.pop(-1)] mappings = permutations(mapping) else: unnecessary_permutations = unnecessary_permutations or [] def _substitute(expr): return expr.subs(mapping) for mapping in mappings: mapping = list(mapping) mapping = mapping + unnecessary_permutations diffs = [ _substitute(dcache.get_diff(g)) for g in terms ] denoms = [ g.as_numer_denom()[1] for g in diffs ] if all(h.is_polynomial(*V) for h in denoms) and _substitute(f).is_rational_function(*V): denom = reduce(lambda p, q: lcm(p, q, *V), denoms) break else: if not rewrite: result = heurisch(f, x, rewrite=True, hints=hints, unnecessary_permutations=unnecessary_permutations) if result is not None: return indep*result return None numers = [ cancel(denom*g) for g in diffs ] def _derivation(h): return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ]) def _deflation(p): for y in V: if not p.has(y): continue if _derivation(p) is not S.Zero: c, q = p.as_poly(y).primitive() return _deflation(c)*gcd(q, q.diff(y)).as_expr() return p def _splitter(p): for y in V: if not p.has(y): continue if _derivation(y) is not S.Zero: c, q = p.as_poly(y).primitive() q = q.as_expr() h = gcd(q, _derivation(q), y) s = quo(h, gcd(q, q.diff(y), y), y) c_split = _splitter(c) if s.as_poly(y).degree() == 0: return (c_split[0], q * c_split[1]) q_split = _splitter(cancel(q / s)) return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1]) return (S.One, p) special = {} for term in terms: if term.is_Function: if isinstance(term, tan): special[1 + _substitute(term)**2] = False elif isinstance(term, tanh): special[1 + _substitute(term)] = False special[1 - _substitute(term)] = False elif isinstance(term, LambertW): special[_substitute(term)] = True F = _substitute(f) P, Q = F.as_numer_denom() u_split = _splitter(denom) v_split = _splitter(Q) polys = set(list(v_split) + [ u_split[0] ] + list(special.keys())) s = u_split[0] * Mul(*[ k for k, v in special.items() if v ]) polified = [ p.as_poly(*V) for p in [s, P, Q] ] if None in polified: return None #--- definitions for _integrate a, b, c = [ p.total_degree() for p in polified ] poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr() def _exponent(g): if g.is_Pow: if g.exp.is_Rational and g.exp.q != 1: if g.exp.p > 0: return g.exp.p + g.exp.q - 1 else: return abs(g.exp.p + g.exp.q) else: return 1 elif not g.is_Atom and g.args: return max([ _exponent(h) for h in g.args ]) else: return 1 A, B = _exponent(f), a + max(b, c) if A > 1 and B > 1: monoms = tuple(ordered(itermonomials(V, A + B - 1 + degree_offset))) else: monoms = tuple(ordered(itermonomials(V, A + B + degree_offset))) poly_coeffs = _symbols('A', len(monoms)) poly_part = Add(*[ poly_coeffs[i]*monomial for i, monomial in enumerate(monoms) ]) reducibles = set() for poly in polys: if poly.has(*V): try: factorization = factor(poly, greedy=True) except PolynomialError: factorization = poly if factorization.is_Mul: factors = factorization.args else: factors = (factorization, ) for fact in factors: if fact.is_Pow: reducibles.add(fact.base) else: reducibles.add(fact) def _integrate(field=None): irreducibles = set() atans = set() pairs = set() for poly in reducibles: for z in poly.free_symbols: if z in V: break # should this be: `irreducibles |= \ else: # set(root_factors(poly, z, filter=field))` continue # and the line below deleted? # | # V irreducibles |= set(root_factors(poly, z, filter=field)) log_part, atan_part = [], [] for poly in list(irreducibles): m = collect(poly, I, evaluate=False) y = m.get(I, S.Zero) if y: x = m.get(S.One, S.Zero) if x.has(I) or y.has(I): continue # nontrivial x + I*y pairs.add((x, y)) irreducibles.remove(poly) while pairs: x, y = pairs.pop() if (x, -y) in pairs: pairs.remove((x, -y)) # Choosing b with no minus sign if y.could_extract_minus_sign(): y = -y irreducibles.add(x*x + y*y) atans.add(atan(x/y)) else: irreducibles.add(x + I*y) B = _symbols('B', len(irreducibles)) C = _symbols('C', len(atans)) # Note: the ordering matters here for poly, b in reversed(list(zip(ordered(irreducibles), B))): if poly.has(*V): poly_coeffs.append(b) log_part.append(b * log(poly)) for poly, c in reversed(list(zip(ordered(atans), C))): if poly.has(*V): poly_coeffs.append(c) atan_part.append(c * poly) # TODO: Currently it's better to use symbolic expressions here instead # of rational functions, because it's simpler and FracElement doesn't # give big speed improvement yet. This is because cancellation is slow # due to slow polynomial GCD algorithms. If this gets improved then # revise this code. candidate = poly_part/poly_denom + Add(*log_part) + Add(*atan_part) h = F - _derivation(candidate) / denom raw_numer = h.as_numer_denom()[0] # Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field # that we have to determine. We can't use simply atoms() because log(3), # sqrt(y) and similar expressions can appear, leading to non-trivial # domains. syms = set(poly_coeffs) | set(V) non_syms = set() def find_non_syms(expr): if expr.is_Integer or expr.is_Rational: pass # ignore trivial numbers elif expr in syms: pass # ignore variables elif not expr.has(*syms): non_syms.add(expr) elif expr.is_Add or expr.is_Mul or expr.is_Pow: list(map(find_non_syms, expr.args)) else: # TODO: Non-polynomial expression. This should have been # filtered out at an earlier stage. raise PolynomialError try: find_non_syms(raw_numer) except PolynomialError: return None else: ground, _ = construct_domain(non_syms, field=True) coeff_ring = PolyRing(poly_coeffs, ground) ring = PolyRing(V, coeff_ring) try: numer = ring.from_expr(raw_numer) except ValueError: raise PolynomialError solution = solve_lin_sys(numer.coeffs(), coeff_ring, _raw=False) if solution is None: return None else: return candidate.subs(solution).subs( list(zip(poly_coeffs, [S.Zero]*len(poly_coeffs)))) if not (F.free_symbols - set(V)): solution = _integrate('Q') if solution is None: solution = _integrate() else: solution = _integrate() if solution is not None: antideriv = solution.subs(rev_mapping) antideriv = cancel(antideriv).expand(force=True) if antideriv.is_Add: antideriv = antideriv.as_independent(x)[1] return indep*antideriv else: if retries >= 0: result = heurisch(f, x, mappings=mappings, rewrite=rewrite, hints=hints, retries=retries - 1, unnecessary_permutations=unnecessary_permutations) if result is not None: return indep*result return None
bb85b94bcf2aa4d0ce6b53a777cfbb76817ad23d8bf1a982d7a31fe9404e9345
""" Algorithms for solving the Risch differential equation. Given a differential field K of characteristic 0 that is a simple monomial extension of a base field k and f, g in K, the Risch Differential Equation problem is to decide if there exist y in K such that Dy + f*y == g and to find one if there are some. If t is a monomial over k and the coefficients of f and g are in k(t), then y is in k(t), and the outline of the algorithm here is given as: 1. Compute the normal part n of the denominator of y. The problem is then reduced to finding y' in k<t>, where y == y'/n. 2. Compute the special part s of the denominator of y. The problem is then reduced to finding y'' in k[t], where y == y''/(n*s) 3. Bound the degree of y''. 4. Reduce the equation Dy + f*y == g to a similar equation with f, g in k[t]. 5. Find the solutions in k[t] of bounded degree of the reduced equation. See Chapter 6 of "Symbolic Integration I: Transcendental Functions" by Manuel Bronstein. See also the docstring of risch.py. """ from operator import mul from functools import reduce from sympy.core import oo from sympy.core.symbol import Dummy from sympy.polys import Poly, gcd, ZZ, cancel from sympy import sqrt, re, im from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, splitfactor, NonElementaryIntegralException, DecrementLevel, recognize_log_derivative) # TODO: Add messages to NonElementaryIntegralException errors def order_at(a, p, t): """ Computes the order of a at p, with respect to t. Explanation =========== For a, p in k[t], the order of a at p is defined as nu_p(a) = max({n in Z+ such that p**n|a}), where a != 0. If a == 0, nu_p(a) = +oo. To compute the order at a rational function, a/b, use the fact that nu_p(a/b) == nu_p(a) - nu_p(b). """ if a.is_zero: return oo if p == Poly(t, t): return a.as_poly(t).ET()[0][0] # Uses binary search for calculating the power. power_list collects the tuples # (p^k,k) where each k is some power of 2. After deciding the largest k # such that k is power of 2 and p^k|a the loop iteratively calculates # the actual power. power_list = [] p1 = p r = a.rem(p1) tracks_power = 1 while r.is_zero: power_list.append((p1,tracks_power)) p1 = p1*p1 tracks_power *= 2 r = a.rem(p1) n = 0 product = Poly(1, t) while len(power_list) != 0: final = power_list.pop() productf = product*final[0] r = a.rem(productf) if r.is_zero: n += final[1] product = productf return n def order_at_oo(a, d, t): """ Computes the order of a/d at oo (infinity), with respect to t. For f in k(t), the order or f at oo is defined as deg(d) - deg(a), where f == a/d. """ if a.is_zero: return oo return d.degree(t) - a.degree(t) def weak_normalizer(a, d, DE, z=None): """ Weak normalization. Explanation =========== Given a derivation D on k[t] and f == a/d in k(t), return q in k[t] such that f - Dq/q is weakly normalized with respect to t. f in k(t) is said to be "weakly normalized" with respect to t if residue_p(f) is not a positive integer for any normal irreducible p in k[t] such that f is in R_p (Definition 6.1.1). If f has an elementary integral, this is equivalent to no logarithm of integral(f) whose argument depends on t has a positive integer coefficient, where the arguments of the logarithms not in k(t) are in k[t]. Returns (q, f - Dq/q) """ z = z or Dummy('z') dn, ds = splitfactor(d, DE) # Compute d1, where dn == d1*d2**2*...*dn**n is a square-free # factorization of d. g = gcd(dn, dn.diff(DE.t)) d_sqf_part = dn.quo(g) d1 = d_sqf_part.quo(gcd(d_sqf_part, g)) a1, b = gcdex_diophantine(d.quo(d1).as_poly(DE.t), d1.as_poly(DE.t), a.as_poly(DE.t)) r = (a - Poly(z, DE.t)*derivation(d1, DE)).as_poly(DE.t).resultant( d1.as_poly(DE.t)) r = Poly(r, z) if not r.expr.has(z): return (Poly(1, DE.t), (a, d)) N = [i for i in r.real_roots() if i in ZZ and i > 0] q = reduce(mul, [gcd(a - Poly(n, DE.t)*derivation(d1, DE), d1) for n in N], Poly(1, DE.t)) dq = derivation(q, DE) sn = q*a - d*dq sd = q*d sn, sd = sn.cancel(sd, include=True) return (q, (sn, sd)) def normal_denom(fa, fd, ga, gd, DE): """ Normal part of the denominator. Explanation =========== Given a derivation D on k[t] and f, g in k(t) with f weakly normalized with respect to t, either raise NonElementaryIntegralException, in which case the equation Dy + f*y == g has no solution in k(t), or the quadruplet (a, b, c, h) such that a, h in k[t], b, c in k<t>, and for any solution y in k(t) of Dy + f*y == g, q = y*h in k<t> satisfies a*Dq + b*q == c. This constitutes step 1 in the outline given in the rde.py docstring. """ dn, ds = splitfactor(fd, DE) en, es = splitfactor(gd, DE) p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c = a*h if c.div(en)[1]: # en does not divide dn*h**2 raise NonElementaryIntegralException ca = c*ga ca, cd = ca.cancel(gd, include=True) ba = a*fa - dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd, include=True) # (dn*h, dn*h*f - dn*Dh, dn*h**2*g, h) return (a, (ba, bd), (ca, cd), h) def special_denom(a, ba, bd, ca, cd, DE, case='auto'): """ Special part of the denominator. Explanation =========== case is one of {'exp', 'tan', 'primitive'} for the hyperexponential, hypertangent, and primitive cases, respectively. For the hyperexponential (resp. hypertangent) case, given a derivation D on k[t] and a in k[t], b, c, in k<t> with Dt/t in k (resp. Dt/(t**2 + 1) in k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp. gcd(a, t**2 + 1) == 1), return the quadruplet (A, B, C, 1/h) such that A, B, C, h in k[t] and for any solution q in k<t> of a*Dq + b*q == c, r = qh in k[t] satisfies A*Dr + B*r == C. For ``case == 'primitive'``, k<t> == k[t], so it returns (a, b, c, 1) in this case. This constitutes step 2 of the outline given in the rde.py docstring. """ from sympy.integrals.prde import parametric_log_deriv # TODO: finish writing this and write tests if case == 'auto': case = DE.case if case == 'exp': p = Poly(DE.t, DE.t) elif case == 'tan': p = Poly(DE.t**2 + 1, DE.t) elif case in ['primitive', 'base']: B = ba.to_field().quo(bd) C = ca.to_field().quo(cd) return (a, B, C, Poly(1, DE.t)) else: raise ValueError("case must be one of {'exp', 'tan', 'primitive', " "'base'}, not %s." % case) nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t) nc = order_at(ca, p, DE.t) - order_at(cd, p, DE.t) n = min(0, nc - min(0, nb)) if not nb: # Possible cancellation. if case == 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We are guaranteed to not have problems, # because case != 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A is not None: Q, m, z = A if Q == 1: n = min(n, m) elif case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2+1, DE.t)) with DecrementLevel(DE): # We are guaranteed to not have problems, # because case != 'base'. alphaa, alphad = frac_in(im(-ba.eval(sqrt(-1))/bd.eval(sqrt(-1))/a.eval(sqrt(-1))), DE.t) betaa, betad = frac_in(re(-ba.eval(sqrt(-1))/bd.eval(sqrt(-1))/a.eval(sqrt(-1))), DE.t) etaa, etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(Poly(2, DE.t)*betaa, betad, DE): A = parametric_log_deriv(alphaa*Poly(sqrt(-1), DE.t)*betad+alphad*betaa, alphad*betad, etaa, etad, DE) if A is not None: Q, m, z = A if Q == 1: n = min(n, m) N = max(0, -nb, n - nc) pN = p**N pn = p**-n A = a*pN B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN C = (ca*pN*pn).quo(cd) h = pn # (a*p**N, (b + n*a*Dp/p)*p**N, c*p**(N - n), p**-n) return (A, B, C, h) def bound_degree(a, b, cQ, DE, case='auto', parametric=False): """ Bound on polynomial solutions. Explanation =========== Given a derivation D on k[t] and ``a``, ``b``, ``c`` in k[t] with ``a != 0``, return n in ZZ such that deg(q) <= n for any solution q in k[t] of a*Dq + b*q == c, when parametric=False, or deg(q) <= n for any solution c1, ..., cm in Const(k) and q in k[t] of a*Dq + b*q == Sum(ci*gi, (i, 1, m)) when parametric=True. For ``parametric=False``, ``cQ`` is ``c``, a ``Poly``; for ``parametric=True``, ``cQ`` is Q == [q1, ..., qm], a list of Polys. This constitutes step 3 of the outline given in the rde.py docstring. """ from sympy.integrals.prde import (parametric_log_deriv, limited_integrate, is_log_deriv_k_t_radical_in_field) # TODO: finish writing this and write tests if case == 'auto': case = DE.case da = a.degree(DE.t) db = b.degree(DE.t) # The parametric and regular cases are identical, except for this part if parametric: dc = max([i.degree(DE.t) for i in cQ]) else: dc = cQ.degree(DE.t) alpha = cancel(-b.as_poly(DE.t).LC().as_expr()/ a.as_poly(DE.t).LC().as_expr()) if case == 'base': n = max(0, dc - max(db, da - 1)) if db == da - 1 and alpha.is_Integer: n = max(0, alpha, dc - db) elif case == 'primitive': if db > da: n = max(0, dc - db) else: n = max(0, dc - da + 1) etaa, etad = frac_in(DE.d, DE.T[DE.level - 1]) t1 = DE.t with DecrementLevel(DE): alphaa, alphad = frac_in(alpha, DE.t) if db == da - 1: # if alpha == m*Dt + Dz for z in k and m in ZZ: try: (za, zd), m = limited_integrate(alphaa, alphad, [(etaa, etad)], DE) except NonElementaryIntegralException: pass else: if len(m) != 1: raise ValueError("Length of m should be 1") n = max(n, m[0]) elif db == da: # if alpha == Dz/z for z in k*: # beta = -lc(a*Dz + b*z)/(z*lc(a)) # if beta == m*Dt + Dw for w in k and m in ZZ: # n = max(n, m) A = is_log_deriv_k_t_radical_in_field(alphaa, alphad, DE) if A is not None: aa, z = A if aa == 1: beta = -(a*derivation(z, DE).as_poly(t1) + b*z.as_poly(t1)).LC()/(z.as_expr()*a.LC()) betaa, betad = frac_in(beta, DE.t) try: (za, zd), m = limited_integrate(betaa, betad, [(etaa, etad)], DE) except NonElementaryIntegralException: pass else: if len(m) != 1: raise ValueError("Length of m should be 1") n = max(n, m[0]) elif case == 'exp': n = max(0, dc - max(db, da)) if da == db: etaa, etad = frac_in(DE.d.quo(Poly(DE.t, DE.t)), DE.T[DE.level - 1]) with DecrementLevel(DE): alphaa, alphad = frac_in(alpha, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A is not None: # if alpha == m*Dt/t + Dz/z for z in k* and m in ZZ: # n = max(n, m) a, m, z = A if a == 1: n = max(n, m) elif case in ['tan', 'other_nonlinear']: delta = DE.d.degree(DE.t) lam = DE.d.LC() alpha = cancel(alpha/lam) n = max(0, dc - max(da + delta - 1, db)) if db == da + delta - 1 and alpha.is_Integer: n = max(0, alpha, dc - db) else: raise ValueError("case must be one of {'exp', 'tan', 'primitive', " "'other_nonlinear', 'base'}, not %s." % case) return n def spde(a, b, c, n, DE): """ Rothstein's Special Polynomial Differential Equation algorithm. Explanation =========== Given a derivation D on k[t], an integer n and ``a``,``b``,``c`` in k[t] with ``a != 0``, either raise NonElementaryIntegralException, in which case the equation a*Dq + b*q == c has no solution of degree at most ``n`` in k[t], or return the tuple (B, C, m, alpha, beta) such that B, C, alpha, beta in k[t], m in ZZ, and any solution q in k[t] of degree at most n of a*Dq + b*q == c must be of the form q == alpha*h + beta, where h in k[t], deg(h) <= m, and Dh + B*h == C. This constitutes step 4 of the outline given in the rde.py docstring. """ zero = Poly(0, DE.t) alpha = Poly(1, DE.t) beta = Poly(0, DE.t) while True: if c.is_zero: return (zero, zero, 0, zero, beta) # -1 is more to the point if (n < 0) is True: raise NonElementaryIntegralException g = a.gcd(b) if not c.rem(g).is_zero: # g does not divide c raise NonElementaryIntegralException a, b, c = a.quo(g), b.quo(g), c.quo(g) if a.degree(DE.t) == 0: b = b.to_field().quo(a) c = c.to_field().quo(a) return (b, c, n, alpha, beta) r, z = gcdex_diophantine(b, a, c) b += derivation(a, DE) c = z - derivation(r, DE) n -= a.degree(DE.t) beta += alpha * r alpha *= a def no_cancel_b_large(b, c, n, DE): """ Poly Risch Differential Equation - No cancellation: deg(b) large enough. Explanation =========== Given a derivation D on k[t], ``n`` either an integer or +oo, and ``b``,``c`` in k[t] with ``b != 0`` and either D == d/dt or deg(b) > max(0, deg(D) - 1), either raise NonElementaryIntegralException, in which case the equation ``Dq + b*q == c`` has no solution of degree at most n in k[t], or a solution q in k[t] of this equation with ``deg(q) < n``. """ q = Poly(0, DE.t) while not c.is_zero: m = c.degree(DE.t) - b.degree(DE.t) if not 0 <= m <= n: # n < 0 or m < 0 or m > n raise NonElementaryIntegralException p = Poly(c.as_poly(DE.t).LC()/b.as_poly(DE.t).LC()*DE.t**m, DE.t, expand=False) q = q + p n = m - 1 c = c - derivation(p, DE) - b*p return q def no_cancel_b_small(b, c, n, DE): """ Poly Risch Differential Equation - No cancellation: deg(b) small enough. Explanation =========== Given a derivation D on k[t], ``n`` either an integer or +oo, and ``b``,``c`` in k[t] with deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, either raise NonElementaryIntegralException, in which case the equation Dq + b*q == c has no solution of degree at most n in k[t], or a solution q in k[t] of this equation with deg(q) <= n, or the tuple (h, b0, c0) such that h in k[t], b0, c0, in k, and for any solution q in k[t] of degree at most n of Dq + bq == c, y == q - h is a solution in k of Dy + b0*y == c0. """ q = Poly(0, DE.t) while not c.is_zero: if n == 0: m = 0 else: m = c.degree(DE.t) - DE.d.degree(DE.t) + 1 if not 0 <= m <= n: # n < 0 or m < 0 or m > n raise NonElementaryIntegralException if m > 0: p = Poly(c.as_poly(DE.t).LC()/(m*DE.d.as_poly(DE.t).LC())*DE.t**m, DE.t, expand=False) else: if b.degree(DE.t) != c.degree(DE.t): raise NonElementaryIntegralException if b.degree(DE.t) == 0: return (q, b.as_poly(DE.T[DE.level - 1]), c.as_poly(DE.T[DE.level - 1])) p = Poly(c.as_poly(DE.t).LC()/b.as_poly(DE.t).LC(), DE.t, expand=False) q = q + p n = m - 1 c = c - derivation(p, DE) - b*p return q # TODO: better name for this function def no_cancel_equal(b, c, n, DE): """ Poly Risch Differential Equation - No cancellation: deg(b) == deg(D) - 1 Explanation =========== Given a derivation D on k[t] with deg(D) >= 2, n either an integer or +oo, and b, c in k[t] with deg(b) == deg(D) - 1, either raise NonElementaryIntegralException, in which case the equation Dq + b*q == c has no solution of degree at most n in k[t], or a solution q in k[t] of this equation with deg(q) <= n, or the tuple (h, m, C) such that h in k[t], m in ZZ, and C in k[t], and for any solution q in k[t] of degree at most n of Dq + b*q == c, y == q - h is a solution in k[t] of degree at most m of Dy + b*y == C. """ q = Poly(0, DE.t) lc = cancel(-b.as_poly(DE.t).LC()/DE.d.as_poly(DE.t).LC()) if lc.is_Integer and lc.is_positive: M = lc else: M = -1 while not c.is_zero: m = max(M, c.degree(DE.t) - DE.d.degree(DE.t) + 1) if not 0 <= m <= n: # n < 0 or m < 0 or m > n raise NonElementaryIntegralException u = cancel(m*DE.d.as_poly(DE.t).LC() + b.as_poly(DE.t).LC()) if u.is_zero: return (q, m, c) if m > 0: p = Poly(c.as_poly(DE.t).LC()/u*DE.t**m, DE.t, expand=False) else: if c.degree(DE.t) != DE.d.degree(DE.t) - 1: raise NonElementaryIntegralException else: p = c.as_poly(DE.t).LC()/b.as_poly(DE.t).LC() q = q + p n = m - 1 c = c - derivation(p, DE) - b*p return q def cancel_primitive(b, c, n, DE): """ Poly Risch Differential Equation - Cancellation: Primitive case. Explanation =========== Given a derivation D on k[t], n either an integer or +oo, ``b`` in k, and ``c`` in k[t] with Dt in k and ``b != 0``, either raise NonElementaryIntegralException, in which case the equation Dq + b*q == c has no solution of degree at most n in k[t], or a solution q in k[t] of this equation with deg(q) <= n. """ from sympy.integrals.prde import is_log_deriv_k_t_radical_in_field with DecrementLevel(DE): ba, bd = frac_in(b, DE.t) A = is_log_deriv_k_t_radical_in_field(ba, bd, DE) if A is not None: n, z = A if n == 1: # b == Dz/z raise NotImplementedError("is_deriv_in_field() is required to " " solve this problem.") # if z*c == Dp for p in k[t] and deg(p) <= n: # return p/z # else: # raise NonElementaryIntegralException if c.is_zero: return c # return 0 if n < c.degree(DE.t): raise NonElementaryIntegralException q = Poly(0, DE.t) while not c.is_zero: m = c.degree(DE.t) if n < m: raise NonElementaryIntegralException with DecrementLevel(DE): a2a, a2d = frac_in(c.LC(), DE.t) sa, sd = rischDE(ba, bd, a2a, a2d, DE) stm = Poly(sa.as_expr()/sd.as_expr()*DE.t**m, DE.t, expand=False) q += stm n = m - 1 c -= b*stm + derivation(stm, DE) return q def cancel_exp(b, c, n, DE): """ Poly Risch Differential Equation - Cancellation: Hyperexponential case. Explanation =========== Given a derivation D on k[t], n either an integer or +oo, ``b`` in k, and ``c`` in k[t] with Dt/t in k and ``b != 0``, either raise NonElementaryIntegralException, in which case the equation Dq + b*q == c has no solution of degree at most n in k[t], or a solution q in k[t] of this equation with deg(q) <= n. """ from sympy.integrals.prde import parametric_log_deriv eta = DE.d.quo(Poly(DE.t, DE.t)).as_expr() with DecrementLevel(DE): etaa, etad = frac_in(eta, DE.t) ba, bd = frac_in(b, DE.t) A = parametric_log_deriv(ba, bd, etaa, etad, DE) if A is not None: a, m, z = A if a == 1: raise NotImplementedError("is_deriv_in_field() is required to " "solve this problem.") # if c*z*t**m == Dp for p in k<t> and q = p/(z*t**m) in k[t] and # deg(q) <= n: # return q # else: # raise NonElementaryIntegralException if c.is_zero: return c # return 0 if n < c.degree(DE.t): raise NonElementaryIntegralException q = Poly(0, DE.t) while not c.is_zero: m = c.degree(DE.t) if n < m: raise NonElementaryIntegralException # a1 = b + m*Dt/t a1 = b.as_expr() with DecrementLevel(DE): # TODO: Write a dummy function that does this idiom a1a, a1d = frac_in(a1, DE.t) a1a = a1a*etad + etaa*a1d*Poly(m, DE.t) a1d = a1d*etad a2a, a2d = frac_in(c.LC(), DE.t) sa, sd = rischDE(a1a, a1d, a2a, a2d, DE) stm = Poly(sa.as_expr()/sd.as_expr()*DE.t**m, DE.t, expand=False) q += stm n = m - 1 c -= b*stm + derivation(stm, DE) # deg(c) becomes smaller return q def solve_poly_rde(b, cQ, n, DE, parametric=False): """ Solve a Polynomial Risch Differential Equation with degree bound ``n``. This constitutes step 4 of the outline given in the rde.py docstring. For parametric=False, cQ is c, a Poly; for parametric=True, cQ is Q == [q1, ..., qm], a list of Polys. """ from sympy.integrals.prde import (prde_no_cancel_b_large, prde_no_cancel_b_small) # No cancellation if not b.is_zero and (DE.case == 'base' or b.degree(DE.t) > max(0, DE.d.degree(DE.t) - 1)): if parametric: return prde_no_cancel_b_large(b, cQ, n, DE) return no_cancel_b_large(b, cQ, n, DE) elif (b.is_zero or b.degree(DE.t) < DE.d.degree(DE.t) - 1) and \ (DE.case == 'base' or DE.d.degree(DE.t) >= 2): if parametric: return prde_no_cancel_b_small(b, cQ, n, DE) R = no_cancel_b_small(b, cQ, n, DE) if isinstance(R, Poly): return R else: # XXX: Might k be a field? (pg. 209) h, b0, c0 = R with DecrementLevel(DE): b0, c0 = b0.as_poly(DE.t), c0.as_poly(DE.t) if b0 is None: # See above comment raise ValueError("b0 should be a non-Null value") if c0 is None: raise ValueError("c0 should be a non-Null value") y = solve_poly_rde(b0, c0, n, DE).as_poly(DE.t) return h + y elif DE.d.degree(DE.t) >= 2 and b.degree(DE.t) == DE.d.degree(DE.t) - 1 and \ n > -b.as_poly(DE.t).LC()/DE.d.as_poly(DE.t).LC(): # TODO: Is this check necessary, and if so, what should it do if it fails? # b comes from the first element returned from spde() if not b.as_poly(DE.t).LC().is_number: raise TypeError("Result should be a number") if parametric: raise NotImplementedError("prde_no_cancel_b_equal() is not yet " "implemented.") R = no_cancel_equal(b, cQ, n, DE) if isinstance(R, Poly): return R else: h, m, C = R # XXX: Or should it be rischDE()? y = solve_poly_rde(b, C, m, DE) return h + y else: # Cancellation if b.is_zero: raise NotImplementedError("Remaining cases for Poly (P)RDE are " "not yet implemented (is_deriv_in_field() required).") else: if DE.case == 'exp': if parametric: raise NotImplementedError("Parametric RDE cancellation " "hyperexponential case is not yet implemented.") return cancel_exp(b, cQ, n, DE) elif DE.case == 'primitive': if parametric: raise NotImplementedError("Parametric RDE cancellation " "primitive case is not yet implemented.") return cancel_primitive(b, cQ, n, DE) else: raise NotImplementedError("Other Poly (P)RDE cancellation " "cases are not yet implemented (%s)." % DE.case) if parametric: raise NotImplementedError("Remaining cases for Poly PRDE not yet " "implemented.") raise NotImplementedError("Remaining cases for Poly RDE not yet " "implemented.") def rischDE(fa, fd, ga, gd, DE): """ Solve a Risch Differential Equation: Dy + f*y == g. Explanation =========== See the outline in the docstring of rde.py for more information about the procedure used. Either raise NonElementaryIntegralException, in which case there is no solution y in the given differential field, or return y in k(t) satisfying Dy + f*y == g, or raise NotImplementedError, in which case, the algorithms necessary to solve the given Risch Differential Equation have not yet been implemented. """ _, (fa, fd) = weak_normalizer(fa, fd, DE) a, (ba, bd), (ca, cd), hn = normal_denom(fa, fd, ga, gd, DE) A, B, C, hs = special_denom(a, ba, bd, ca, cd, DE) try: # Until this is fully implemented, use oo. Note that this will almost # certainly cause non-termination in spde() (unless A == 1), and # *might* lead to non-termination in the next step for a nonelementary # integral (I don't know for certain yet). Fortunately, spde() is # currently written recursively, so this will just give # RuntimeError: maximum recursion depth exceeded. n = bound_degree(A, B, C, DE) except NotImplementedError: # Useful for debugging: # import warnings # warnings.warn("rischDE: Proceeding with n = oo; may cause " # "non-termination.") n = oo B, C, m, alpha, beta = spde(A, B, C, n, DE) if C.is_zero: y = C else: y = solve_poly_rde(B, C, m, DE) return (alpha*y + beta, hn*hs)
a336a51ae04e7d1cd7c6067778cc85f5fe03d2c94691be673db47f30537bc601
""" The Risch Algorithm for transcendental function integration. The core algorithms for the Risch algorithm are here. The subproblem algorithms are in the rde.py and prde.py files for the Risch Differential Equation solver and the parametric problems solvers, respectively. All important information concerning the differential extension for an integrand is stored in a DifferentialExtension object, which in the code is usually called DE. Throughout the code and Inside the DifferentialExtension object, the conventions/attribute names are that the base domain is QQ and each differential extension is x, t0, t1, ..., tn-1 = DE.t. DE.x is the variable of integration (Dx == 1), DE.D is a list of the derivatives of x, t1, t2, ..., tn-1 = t, DE.T is the list [x, t1, t2, ..., tn-1], DE.t is the outer-most variable of the differential extension at the given level (the level can be adjusted using DE.increment_level() and DE.decrement_level()), k is the field C(x, t0, ..., tn-2), where C is the constant field. The numerator of a fraction is denoted by a and the denominator by d. If the fraction is named f, fa == numer(f) and fd == denom(f). Fractions are returned as tuples (fa, fd). DE.d and DE.t are used to represent the topmost derivation and extension variable, respectively. The docstring of a function signifies whether an argument is in k[t], in which case it will just return a Poly in t, or in k(t), in which case it will return the fraction (fa, fd). Other variable names probably come from the names used in Bronstein's book. """ from sympy import real_roots, default_sort_key from sympy.abc import z from sympy.core.function import Lambda from sympy.core.numbers import ilcm, oo, I from sympy.core.mul import Mul from sympy.core.power import Pow from sympy.core.relational import Ne from sympy.core.singleton import S from sympy.core.symbol import Symbol, Dummy from sympy.core.compatibility import ordered from sympy.integrals.heurisch import _symbols from sympy.functions import (acos, acot, asin, atan, cos, cot, exp, log, Piecewise, sin, tan) from sympy.functions import sinh, cosh, tanh, coth from sympy.integrals import Integral, integrate from sympy.polys import gcd, cancel, PolynomialError, Poly, reduced, RootSum, DomainError from sympy.utilities.iterables import numbered_symbols from types import GeneratorType from functools import reduce def integer_powers(exprs): """ Rewrites a list of expressions as integer multiples of each other. Explanation =========== For example, if you have [x, x/2, x**2 + 1, 2*x/3], then you can rewrite this as [(x/6) * 6, (x/6) * 3, (x**2 + 1) * 1, (x/6) * 4]. This is useful in the Risch integration algorithm, where we must write exp(x) + exp(x/2) as (exp(x/2))**2 + exp(x/2), but not as exp(x) + sqrt(exp(x)) (this is because only the transcendental case is implemented and we therefore cannot integrate algebraic extensions). The integer multiples returned by this function for each term are the smallest possible (their content equals 1). Returns a list of tuples where the first element is the base term and the second element is a list of `(item, factor)` terms, where `factor` is the integer multiplicative factor that must multiply the base term to obtain the original item. The easiest way to understand this is to look at an example: >>> from sympy.abc import x >>> from sympy.integrals.risch import integer_powers >>> integer_powers([x, x/2, x**2 + 1, 2*x/3]) [(x/6, [(x, 6), (x/2, 3), (2*x/3, 4)]), (x**2 + 1, [(x**2 + 1, 1)])] We can see how this relates to the example at the beginning of the docstring. It chose x/6 as the first base term. Then, x can be written as (x/2) * 2, so we get (0, 2), and so on. Now only element (x**2 + 1) remains, and there are no other terms that can be written as a rational multiple of that, so we get that it can be written as (x**2 + 1) * 1. """ # Here is the strategy: # First, go through each term and determine if it can be rewritten as a # rational multiple of any of the terms gathered so far. # cancel(a/b).is_Rational is sufficient for this. If it is a multiple, we # add its multiple to the dictionary. terms = {} for term in exprs: for j in terms: a = cancel(term/j) if a.is_Rational: terms[j].append((term, a)) break else: terms[term] = [(term, S.One)] # After we have done this, we have all the like terms together, so we just # need to find a common denominator so that we can get the base term and # integer multiples such that each term can be written as an integer # multiple of the base term, and the content of the integers is 1. newterms = {} for term in terms: common_denom = reduce(ilcm, [i.as_numer_denom()[1] for _, i in terms[term]]) newterm = term/common_denom newmults = [(i, j*common_denom) for i, j in terms[term]] newterms[newterm] = newmults return sorted(iter(newterms.items()), key=lambda item: item[0].sort_key()) class DifferentialExtension: """ A container for all the information relating to a differential extension. Explanation =========== The attributes of this object are (see also the docstring of __init__): - f: The original (Expr) integrand. - x: The variable of integration. - T: List of variables in the extension. - D: List of derivations in the extension; corresponds to the elements of T. - fa: Poly of the numerator of the integrand. - fd: Poly of the denominator of the integrand. - Tfuncs: Lambda() representations of each element of T (except for x). For back-substitution after integration. - backsubs: A (possibly empty) list of further substitutions to be made on the final integral to make it look more like the integrand. - exts: - extargs: - cases: List of string representations of the cases of T. - t: The top level extension variable, as defined by the current level (see level below). - d: The top level extension derivation, as defined by the current derivation (see level below). - case: The string representation of the case of self.d. (Note that self.T and self.D will always contain the complete extension, regardless of the level. Therefore, you should ALWAYS use DE.t and DE.d instead of DE.T[-1] and DE.D[-1]. If you want to have a list of the derivations or variables only up to the current level, use DE.D[:len(DE.D) + DE.level + 1] and DE.T[:len(DE.T) + DE.level + 1]. Note that, in particular, the derivation() function does this.) The following are also attributes, but will probably not be useful other than in internal use: - newf: Expr form of fa/fd. - level: The number (between -1 and -len(self.T)) such that self.T[self.level] == self.t and self.D[self.level] == self.d. Use the methods self.increment_level() and self.decrement_level() to change the current level. """ # __slots__ is defined mainly so we can iterate over all the attributes # of the class easily (the memory use doesn't matter too much, since we # only create one DifferentialExtension per integration). Also, it's nice # to have a safeguard when debugging. __slots__ = ('f', 'x', 'T', 'D', 'fa', 'fd', 'Tfuncs', 'backsubs', 'exts', 'extargs', 'cases', 'case', 't', 'd', 'newf', 'level', 'ts', 'dummy') def __init__(self, f=None, x=None, handle_first='log', dummy=False, extension=None, rewrite_complex=None): """ Tries to build a transcendental extension tower from ``f`` with respect to ``x``. Explanation =========== If it is successful, creates a DifferentialExtension object with, among others, the attributes fa, fd, D, T, Tfuncs, and backsubs such that fa and fd are Polys in T[-1] with rational coefficients in T[:-1], fa/fd == f, and D[i] is a Poly in T[i] with rational coefficients in T[:i] representing the derivative of T[i] for each i from 1 to len(T). Tfuncs is a list of Lambda objects for back replacing the functions after integrating. Lambda() is only used (instead of lambda) to make them easier to test and debug. Note that Tfuncs corresponds to the elements of T, except for T[0] == x, but they should be back-substituted in reverse order. backsubs is a (possibly empty) back-substitution list that should be applied on the completed integral to make it look more like the original integrand. If it is unsuccessful, it raises NotImplementedError. You can also create an object by manually setting the attributes as a dictionary to the extension keyword argument. You must include at least D. Warning, any attribute that is not given will be set to None. The attributes T, t, d, cases, case, x, and level are set automatically and do not need to be given. The functions in the Risch Algorithm will NOT check to see if an attribute is None before using it. This also does not check to see if the extension is valid (non-algebraic) or even if it is self-consistent. Therefore, this should only be used for testing/debugging purposes. """ # XXX: If you need to debug this function, set the break point here if extension: if 'D' not in extension: raise ValueError("At least the key D must be included with " "the extension flag to DifferentialExtension.") for attr in extension: setattr(self, attr, extension[attr]) self._auto_attrs() return elif f is None or x is None: raise ValueError("Either both f and x or a manual extension must " "be given.") if handle_first not in ['log', 'exp']: raise ValueError("handle_first must be 'log' or 'exp', not %s." % str(handle_first)) # f will be the original function, self.f might change if we reset # (e.g., we pull out a constant from an exponential) self.f = f self.x = x # setting the default value 'dummy' self.dummy = dummy self.reset() exp_new_extension, log_new_extension = True, True # case of 'automatic' choosing if rewrite_complex is None: rewrite_complex = I in self.f.atoms() if rewrite_complex: rewritables = { (sin, cos, cot, tan, sinh, cosh, coth, tanh): exp, (asin, acos, acot, atan): log, } # rewrite the trigonometric components for candidates, rule in rewritables.items(): self.newf = self.newf.rewrite(candidates, rule) self.newf = cancel(self.newf) else: if any(i.has(x) for i in self.f.atoms(sin, cos, tan, atan, asin, acos)): raise NotImplementedError("Trigonometric extensions are not " "supported (yet!)") exps = set() pows = set() numpows = set() sympows = set() logs = set() symlogs = set() while True: if self.newf.is_rational_function(*self.T): break if not exp_new_extension and not log_new_extension: # We couldn't find a new extension on the last pass, so I guess # we can't do it. raise NotImplementedError("Couldn't find an elementary " "transcendental extension for %s. Try using a " % str(f) + "manual extension with the extension flag.") exps, pows, numpows, sympows, log_new_extension = \ self._rewrite_exps_pows(exps, pows, numpows, sympows, log_new_extension) logs, symlogs = self._rewrite_logs(logs, symlogs) if handle_first == 'exp' or not log_new_extension: exp_new_extension = self._exp_part(exps) if exp_new_extension is None: # reset and restart self.f = self.newf self.reset() exp_new_extension = True continue if handle_first == 'log' or not exp_new_extension: log_new_extension = self._log_part(logs) self.fa, self.fd = frac_in(self.newf, self.t) self._auto_attrs() return def __getattr__(self, attr): # Avoid AttributeErrors when debugging if attr not in self.__slots__: raise AttributeError("%s has no attribute %s" % (repr(self), repr(attr))) return None def _rewrite_exps_pows(self, exps, pows, numpows, sympows, log_new_extension): """ Rewrite exps/pows for better processing. """ # Pre-preparsing. ################# # Get all exp arguments, so we can avoid ahead of time doing # something like t1 = exp(x), t2 = exp(x/2) == sqrt(t1). # Things like sqrt(exp(x)) do not automatically simplify to # exp(x/2), so they will be viewed as algebraic. The easiest way # to handle this is to convert all instances of (a**b)**Rational # to a**(Rational*b) before doing anything else. Note that the # _exp_part code can generate terms of this form, so we do need to # do this at each pass (or else modify it to not do that). from sympy.integrals.prde import is_deriv_k ratpows = [i for i in self.newf.atoms(Pow).union(self.newf.atoms(exp)) if (i.base.is_Pow or isinstance(i.base, exp) and i.exp.is_Rational)] ratpows_repl = [ (i, i.base.base**(i.exp*i.base.exp)) for i in ratpows] self.backsubs += [(j, i) for i, j in ratpows_repl] self.newf = self.newf.xreplace(dict(ratpows_repl)) # To make the process deterministic, the args are sorted # so that functions with smaller op-counts are processed first. # Ties are broken with the default_sort_key. # XXX Although the method is deterministic no additional work # has been done to guarantee that the simplest solution is # returned and that it would be affected be using different # variables. Though it is possible that this is the case # one should know that it has not been done intentionally, so # further improvements may be possible. # TODO: This probably doesn't need to be completely recomputed at # each pass. exps = update_sets(exps, self.newf.atoms(exp), lambda i: i.exp.is_rational_function(*self.T) and i.exp.has(*self.T)) pows = update_sets(pows, self.newf.atoms(Pow), lambda i: i.exp.is_rational_function(*self.T) and i.exp.has(*self.T)) numpows = update_sets(numpows, set(pows), lambda i: not i.base.has(*self.T)) sympows = update_sets(sympows, set(pows) - set(numpows), lambda i: i.base.is_rational_function(*self.T) and not i.exp.is_Integer) # The easiest way to deal with non-base E powers is to convert them # into base E, integrate, and then convert back. for i in ordered(pows): old = i new = exp(i.exp*log(i.base)) # If exp is ever changed to automatically reduce exp(x*log(2)) # to 2**x, then this will break. The solution is to not change # exp to do that :) if i in sympows: if i.exp.is_Rational: raise NotImplementedError("Algebraic extensions are " "not supported (%s)." % str(i)) # We can add a**b only if log(a) in the extension, because # a**b == exp(b*log(a)). basea, based = frac_in(i.base, self.t) A = is_deriv_k(basea, based, self) if A is None: # Nonelementary monomial (so far) # TODO: Would there ever be any benefit from just # adding log(base) as a new monomial? # ANSWER: Yes, otherwise we can't integrate x**x (or # rather prove that it has no elementary integral) # without first manually rewriting it as exp(x*log(x)) self.newf = self.newf.xreplace({old: new}) self.backsubs += [(new, old)] log_new_extension = self._log_part([log(i.base)]) exps = update_sets(exps, self.newf.atoms(exp), lambda i: i.exp.is_rational_function(*self.T) and i.exp.has(*self.T)) continue ans, u, const = A newterm = exp(i.exp*(log(const) + u)) # Under the current implementation, exp kills terms # only if they are of the form a*log(x), where a is a # Number. This case should have already been killed by the # above tests. Again, if this changes to kill more than # that, this will break, which maybe is a sign that you # shouldn't be changing that. Actually, if anything, this # auto-simplification should be removed. See # http://groups.google.com/group/sympy/browse_thread/thread/a61d48235f16867f self.newf = self.newf.xreplace({i: newterm}) elif i not in numpows: continue else: # i in numpows newterm = new # TODO: Just put it in self.Tfuncs self.backsubs.append((new, old)) self.newf = self.newf.xreplace({old: newterm}) exps.append(newterm) return exps, pows, numpows, sympows, log_new_extension def _rewrite_logs(self, logs, symlogs): """ Rewrite logs for better processing. """ atoms = self.newf.atoms(log) logs = update_sets(logs, atoms, lambda i: i.args[0].is_rational_function(*self.T) and i.args[0].has(*self.T)) symlogs = update_sets(symlogs, atoms, lambda i: i.has(*self.T) and i.args[0].is_Pow and i.args[0].base.is_rational_function(*self.T) and not i.args[0].exp.is_Integer) # We can handle things like log(x**y) by converting it to y*log(x) # This will fix not only symbolic exponents of the argument, but any # non-Integer exponent, like log(sqrt(x)). The exponent can also # depend on x, like log(x**x). for i in ordered(symlogs): # Unlike in the exponential case above, we do not ever # potentially add new monomials (above we had to add log(a)). # Therefore, there is no need to run any is_deriv functions # here. Just convert log(a**b) to b*log(a) and let # log_new_extension() handle it from there. lbase = log(i.args[0].base) logs.append(lbase) new = i.args[0].exp*lbase self.newf = self.newf.xreplace({i: new}) self.backsubs.append((new, i)) # remove any duplicates logs = sorted(set(logs), key=default_sort_key) return logs, symlogs def _auto_attrs(self): """ Set attributes that are generated automatically. """ if not self.T: # i.e., when using the extension flag and T isn't given self.T = [i.gen for i in self.D] if not self.x: self.x = self.T[0] self.cases = [get_case(d, t) for d, t in zip(self.D, self.T)] self.level = -1 self.t = self.T[self.level] self.d = self.D[self.level] self.case = self.cases[self.level] def _exp_part(self, exps): """ Try to build an exponential extension. Returns ======= Returns True if there was a new extension, False if there was no new extension but it was able to rewrite the given exponentials in terms of the existing extension, and None if the entire extension building process should be restarted. If the process fails because there is no way around an algebraic extension (e.g., exp(log(x)/2)), it will raise NotImplementedError. """ from sympy.integrals.prde import is_log_deriv_k_t_radical new_extension = False restart = False expargs = [i.exp for i in exps] ip = integer_powers(expargs) for arg, others in ip: # Minimize potential problems with algebraic substitution others.sort(key=lambda i: i[1]) arga, argd = frac_in(arg, self.t) A = is_log_deriv_k_t_radical(arga, argd, self) if A is not None: ans, u, n, const = A # if n is 1 or -1, it's algebraic, but we can handle it if n == -1: # This probably will never happen, because # Rational.as_numer_denom() returns the negative term in # the numerator. But in case that changes, reduce it to # n == 1. n = 1 u **= -1 const *= -1 ans = [(i, -j) for i, j in ans] if n == 1: # Example: exp(x + x**2) over QQ(x, exp(x), exp(x**2)) self.newf = self.newf.xreplace({exp(arg): exp(const)*Mul(*[ u**power for u, power in ans])}) self.newf = self.newf.xreplace({exp(p*exparg): exp(const*p) * Mul(*[u**power for u, power in ans]) for exparg, p in others}) # TODO: Add something to backsubs to put exp(const*p) # back together. continue else: # Bad news: we have an algebraic radical. But maybe we # could still avoid it by choosing a different extension. # For example, integer_powers() won't handle exp(x/2 + 1) # over QQ(x, exp(x)), but if we pull out the exp(1), it # will. Or maybe we have exp(x + x**2/2), over # QQ(x, exp(x), exp(x**2)), which is exp(x)*sqrt(exp(x**2)), # but if we use QQ(x, exp(x), exp(x**2/2)), then they will # all work. # # So here is what we do: If there is a non-zero const, pull # it out and retry. Also, if len(ans) > 1, then rewrite # exp(arg) as the product of exponentials from ans, and # retry that. If const == 0 and len(ans) == 1, then we # assume that it would have been handled by either # integer_powers() or n == 1 above if it could be handled, # so we give up at that point. For example, you can never # handle exp(log(x)/2) because it equals sqrt(x). if const or len(ans) > 1: rad = Mul(*[term**(power/n) for term, power in ans]) self.newf = self.newf.xreplace({exp(p*exparg): exp(const*p)*rad for exparg, p in others}) self.newf = self.newf.xreplace(dict(list(zip(reversed(self.T), reversed([f(self.x) for f in self.Tfuncs]))))) restart = True break else: # TODO: give algebraic dependence in error string raise NotImplementedError("Cannot integrate over " "algebraic extensions.") else: arga, argd = frac_in(arg, self.t) darga = (argd*derivation(Poly(arga, self.t), self) - arga*derivation(Poly(argd, self.t), self)) dargd = argd**2 darga, dargd = darga.cancel(dargd, include=True) darg = darga.as_expr()/dargd.as_expr() self.t = next(self.ts) self.T.append(self.t) self.extargs.append(arg) self.exts.append('exp') self.D.append(darg.as_poly(self.t, expand=False)*Poly(self.t, self.t, expand=False)) if self.dummy: i = Dummy("i") else: i = Symbol('i') self.Tfuncs += [Lambda(i, exp(arg.subs(self.x, i)))] self.newf = self.newf.xreplace( {exp(exparg): self.t**p for exparg, p in others}) new_extension = True if restart: return None return new_extension def _log_part(self, logs): """ Try to build a logarithmic extension. Returns ======= Returns True if there was a new extension and False if there was no new extension but it was able to rewrite the given logarithms in terms of the existing extension. Unlike with exponential extensions, there is no way that a logarithm is not transcendental over and cannot be rewritten in terms of an already existing extension in a non-algebraic way, so this function does not ever return None or raise NotImplementedError. """ from sympy.integrals.prde import is_deriv_k new_extension = False logargs = [i.args[0] for i in logs] for arg in ordered(logargs): # The log case is easier, because whenever a logarithm is algebraic # over the base field, it is of the form a1*t1 + ... an*tn + c, # which is a polynomial, so we can just replace it with that. # In other words, we don't have to worry about radicals. arga, argd = frac_in(arg, self.t) A = is_deriv_k(arga, argd, self) if A is not None: ans, u, const = A newterm = log(const) + u self.newf = self.newf.xreplace({log(arg): newterm}) continue else: arga, argd = frac_in(arg, self.t) darga = (argd*derivation(Poly(arga, self.t), self) - arga*derivation(Poly(argd, self.t), self)) dargd = argd**2 darg = darga.as_expr()/dargd.as_expr() self.t = next(self.ts) self.T.append(self.t) self.extargs.append(arg) self.exts.append('log') self.D.append(cancel(darg.as_expr()/arg).as_poly(self.t, expand=False)) if self.dummy: i = Dummy("i") else: i = Symbol('i') self.Tfuncs += [Lambda(i, log(arg.subs(self.x, i)))] self.newf = self.newf.xreplace({log(arg): self.t}) new_extension = True return new_extension @property def _important_attrs(self): """ Returns some of the more important attributes of self. Explanation =========== Used for testing and debugging purposes. The attributes are (fa, fd, D, T, Tfuncs, backsubs, exts, extargs). """ return (self.fa, self.fd, self.D, self.T, self.Tfuncs, self.backsubs, self.exts, self.extargs) # NOTE: this printing doesn't follow the Python's standard # eval(repr(DE)) == DE, where DE is the DifferentialExtension object # , also this printing is supposed to contain all the important # attributes of a DifferentialExtension object def __repr__(self): # no need to have GeneratorType object printed in it r = [(attr, getattr(self, attr)) for attr in self.__slots__ if not isinstance(getattr(self, attr), GeneratorType)] return self.__class__.__name__ + '(dict(%r))' % (r) # fancy printing of DifferentialExtension object def __str__(self): return (self.__class__.__name__ + '({fa=%s, fd=%s, D=%s})' % (self.fa, self.fd, self.D)) # should only be used for debugging purposes, internally # f1 = f2 = log(x) at different places in code execution # may return D1 != D2 as True, since 'level' or other attribute # may differ def __eq__(self, other): for attr in self.__class__.__slots__: d1, d2 = getattr(self, attr), getattr(other, attr) if not (isinstance(d1, GeneratorType) or d1 == d2): return False return True def reset(self): """ Reset self to an initial state. Used by __init__. """ self.t = self.x self.T = [self.x] self.D = [Poly(1, self.x)] self.level = -1 self.exts = [None] self.extargs = [None] if self.dummy: self.ts = numbered_symbols('t', cls=Dummy) else: # For testing self.ts = numbered_symbols('t') # For various things that we change to make things work that we need to # change back when we are done. self.backsubs = [] self.Tfuncs = [] self.newf = self.f def indices(self, extension): """ Parameters ========== extension : str Represents a valid extension type. Returns ======= list: A list of indices of 'exts' where extension of type 'extension' is present. Examples ======== >>> from sympy.integrals.risch import DifferentialExtension >>> from sympy import log, exp >>> from sympy.abc import x >>> DE = DifferentialExtension(log(x) + exp(x), x, handle_first='exp') >>> DE.indices('log') [2] >>> DE.indices('exp') [1] """ return [i for i, ext in enumerate(self.exts) if ext == extension] def increment_level(self): """ Increment the level of self. Explanation =========== This makes the working differential extension larger. self.level is given relative to the end of the list (-1, -2, etc.), so we don't need do worry about it when building the extension. """ if self.level >= -1: raise ValueError("The level of the differential extension cannot " "be incremented any further.") self.level += 1 self.t = self.T[self.level] self.d = self.D[self.level] self.case = self.cases[self.level] return None def decrement_level(self): """ Decrease the level of self. Explanation =========== This makes the working differential extension smaller. self.level is given relative to the end of the list (-1, -2, etc.), so we don't need do worry about it when building the extension. """ if self.level <= -len(self.T): raise ValueError("The level of the differential extension cannot " "be decremented any further.") self.level -= 1 self.t = self.T[self.level] self.d = self.D[self.level] self.case = self.cases[self.level] return None def update_sets(seq, atoms, func): s = set(seq) s = atoms.intersection(s) new = atoms - s s.update(list(filter(func, new))) return list(s) class DecrementLevel: """ A context manager for decrementing the level of a DifferentialExtension. """ __slots__ = ('DE',) def __init__(self, DE): self.DE = DE return def __enter__(self): self.DE.decrement_level() def __exit__(self, exc_type, exc_value, traceback): self.DE.increment_level() class NonElementaryIntegralException(Exception): """ Exception used by subroutines within the Risch algorithm to indicate to one another that the function being integrated does not have an elementary integral in the given differential field. """ # TODO: Rewrite algorithms below to use this (?) # TODO: Pass through information about why the integral was nonelementary, # and store that in the resulting NonElementaryIntegral somehow. pass def gcdex_diophantine(a, b, c): """ Extended Euclidean Algorithm, Diophantine version. Explanation =========== Given ``a``, ``b`` in K[x] and ``c`` in (a, b), the ideal generated by ``a`` and ``b``, return (s, t) such that s*a + t*b == c and either s == 0 or s.degree() < b.degree(). """ # Extended Euclidean Algorithm (Diophantine Version) pg. 13 # TODO: This should go in densetools.py. # XXX: Bettter name? s, g = a.half_gcdex(b) s *= c.exquo(g) # Inexact division means c is not in (a, b) if s and s.degree() >= b.degree(): _, s = s.div(b) t = (c - s*a).exquo(b) return (s, t) def frac_in(f, t, *, cancel=False, **kwargs): """ Returns the tuple (fa, fd), where fa and fd are Polys in t. Explanation =========== This is a common idiom in the Risch Algorithm functions, so we abstract it out here. ``f`` should be a basic expression, a Poly, or a tuple (fa, fd), where fa and fd are either basic expressions or Polys, and f == fa/fd. **kwargs are applied to Poly. """ if type(f) is tuple: fa, fd = f f = fa.as_expr()/fd.as_expr() fa, fd = f.as_expr().as_numer_denom() fa, fd = fa.as_poly(t, **kwargs), fd.as_poly(t, **kwargs) if cancel: fa, fd = fa.cancel(fd, include=True) if fa is None or fd is None: raise ValueError("Could not turn %s into a fraction in %s." % (f, t)) return (fa, fd) def as_poly_1t(p, t, z): """ (Hackish) way to convert an element ``p`` of K[t, 1/t] to K[t, z]. In other words, ``z == 1/t`` will be a dummy variable that Poly can handle better. See issue 5131. Examples ======== >>> from sympy import random_poly >>> from sympy.integrals.risch import as_poly_1t >>> from sympy.abc import x, z >>> p1 = random_poly(x, 10, -10, 10) >>> p2 = random_poly(x, 10, -10, 10) >>> p = p1 + p2.subs(x, 1/x) >>> as_poly_1t(p, x, z).as_expr().subs(z, 1/x) == p True """ # TODO: Use this on the final result. That way, we can avoid answers like # (...)*exp(-x). pa, pd = frac_in(p, t, cancel=True) if not pd.is_monomial: # XXX: Is there a better Poly exception that we could raise here? # Either way, if you see this (from the Risch Algorithm) it indicates # a bug. raise PolynomialError("%s is not an element of K[%s, 1/%s]." % (p, t, t)) d = pd.degree(t) one_t_part = pa.slice(0, d + 1) r = pd.degree() - pa.degree() t_part = pa - one_t_part try: t_part = t_part.to_field().exquo(pd) except DomainError as e: # issue 4950 raise NotImplementedError(e) # Compute the negative degree parts. one_t_part = Poly.from_list(reversed(one_t_part.rep.rep), *one_t_part.gens, domain=one_t_part.domain) if 0 < r < oo: one_t_part *= Poly(t**r, t) one_t_part = one_t_part.replace(t, z) # z will be 1/t if pd.nth(d): one_t_part *= Poly(1/pd.nth(d), z, expand=False) ans = t_part.as_poly(t, z, expand=False) + one_t_part.as_poly(t, z, expand=False) return ans def derivation(p, DE, coefficientD=False, basic=False): """ Computes Dp. Explanation =========== Given the derivation D with D = d/dx and p is a polynomial in t over K(x), return Dp. If coefficientD is True, it computes the derivation kD (kappaD), which is defined as kD(sum(ai*Xi**i, (i, 0, n))) == sum(Dai*Xi**i, (i, 1, n)) (Definition 3.2.2, page 80). X in this case is T[-1], so coefficientD computes the derivative just with respect to T[:-1], with T[-1] treated as a constant. If ``basic=True``, the returns a Basic expression. Elements of D can still be instances of Poly. """ if basic: r = 0 else: r = Poly(0, DE.t) t = DE.t if coefficientD: if DE.level <= -len(DE.T): # 'base' case, the answer is 0. return r DE.decrement_level() D = DE.D[:len(DE.D) + DE.level + 1] T = DE.T[:len(DE.T) + DE.level + 1] for d, v in zip(D, T): pv = p.as_poly(v) if pv is None or basic: pv = p.as_expr() if basic: r += d.as_expr()*pv.diff(v) else: r += (d.as_expr()*pv.diff(v).as_expr()).as_poly(t) if basic: r = cancel(r) if coefficientD: DE.increment_level() return r def get_case(d, t): """ Returns the type of the derivation d. Returns one of {'exp', 'tan', 'base', 'primitive', 'other_linear', 'other_nonlinear'}. """ if not d.expr.has(t): if d.is_one: return 'base' return 'primitive' if d.rem(Poly(t, t)).is_zero: return 'exp' if d.rem(Poly(1 + t**2, t)).is_zero: return 'tan' if d.degree(t) > 1: return 'other_nonlinear' return 'other_linear' def splitfactor(p, DE, coefficientD=False, z=None): """ Splitting factorization. Explanation =========== Given a derivation D on k[t] and ``p`` in k[t], return (p_n, p_s) in k[t] x k[t] such that p = p_n*p_s, p_s is special, and each square factor of p_n is normal. Page. 100 """ kinv = [1/x for x in DE.T[:DE.level]] if z: kinv.append(z) One = Poly(1, DE.t, domain=p.get_domain()) Dp = derivation(p, DE, coefficientD=coefficientD) # XXX: Is this right? if p.is_zero: return (p, One) if not p.expr.has(DE.t): s = p.as_poly(*kinv).gcd(Dp.as_poly(*kinv)).as_poly(DE.t) n = p.exquo(s) return (n, s) if not Dp.is_zero: h = p.gcd(Dp).to_field() g = p.gcd(p.diff(DE.t)).to_field() s = h.exquo(g) if s.degree(DE.t) == 0: return (p, One) q_split = splitfactor(p.exquo(s), DE, coefficientD=coefficientD) return (q_split[0], q_split[1]*s) else: return (p, One) def splitfactor_sqf(p, DE, coefficientD=False, z=None, basic=False): """ Splitting Square-free Factorization. Explanation =========== Given a derivation D on k[t] and ``p`` in k[t], returns (N1, ..., Nm) and (S1, ..., Sm) in k[t]^m such that p = (N1*N2**2*...*Nm**m)*(S1*S2**2*...*Sm**m) is a splitting factorization of ``p`` and the Ni and Si are square-free and coprime. """ # TODO: This algorithm appears to be faster in every case # TODO: Verify this and splitfactor() for multiple extensions kkinv = [1/x for x in DE.T[:DE.level]] + DE.T[:DE.level] if z: kkinv = [z] S = [] N = [] p_sqf = p.sqf_list_include() if p.is_zero: return (((p, 1),), ()) for pi, i in p_sqf: Si = pi.as_poly(*kkinv).gcd(derivation(pi, DE, coefficientD=coefficientD,basic=basic).as_poly(*kkinv)).as_poly(DE.t) pi = Poly(pi, DE.t) Si = Poly(Si, DE.t) Ni = pi.exquo(Si) if not Si.is_one: S.append((Si, i)) if not Ni.is_one: N.append((Ni, i)) return (tuple(N), tuple(S)) def canonical_representation(a, d, DE): """ Canonical Representation. Explanation =========== Given a derivation D on k[t] and f = a/d in k(t), return (f_p, f_s, f_n) in k[t] x k(t) x k(t) such that f = f_p + f_s + f_n is the canonical representation of f (f_p is a polynomial, f_s is reduced (has a special denominator), and f_n is simple (has a normal denominator). """ # Make d monic l = Poly(1/d.LC(), DE.t) a, d = a.mul(l), d.mul(l) q, r = a.div(d) dn, ds = splitfactor(d, DE) b, c = gcdex_diophantine(dn.as_poly(DE.t), ds.as_poly(DE.t), r.as_poly(DE.t)) b, c = b.as_poly(DE.t), c.as_poly(DE.t) return (q, (b, ds), (c, dn)) def hermite_reduce(a, d, DE): """ Hermite Reduction - Mack's Linear Version. Given a derivation D on k(t) and f = a/d in k(t), returns g, h, r in k(t) such that f = Dg + h + r, h is simple, and r is reduced. """ # Make d monic l = Poly(1/d.LC(), DE.t) a, d = a.mul(l), d.mul(l) fp, fs, fn = canonical_representation(a, d, DE) a, d = fn l = Poly(1/d.LC(), DE.t) a, d = a.mul(l), d.mul(l) ga = Poly(0, DE.t) gd = Poly(1, DE.t) dd = derivation(d, DE) dm = gcd(d, dd).as_poly(DE.t) ds, r = d.div(dm) while dm.degree(DE.t)>0: ddm = derivation(dm, DE) dm2 = gcd(dm, ddm) dms, r = dm.div(dm2) ds_ddm = ds.mul(ddm) ds_ddm_dm, r = ds_ddm.div(dm) b, c = gcdex_diophantine(-ds_ddm_dm.as_poly(DE.t), dms.as_poly(DE.t), a.as_poly(DE.t)) b, c = b.as_poly(DE.t), c.as_poly(DE.t) db = derivation(b, DE).as_poly(DE.t) ds_dms, r = ds.div(dms) a = c.as_poly(DE.t) - db.mul(ds_dms).as_poly(DE.t) ga = ga*dm + b*gd gd = gd*dm ga, gd = ga.cancel(gd, include=True) dm = dm2 d = ds q, r = a.div(d) ga, gd = ga.cancel(gd, include=True) r, d = r.cancel(d, include=True) rra = q*fs[1] + fp*fs[1] + fs[0] rrd = fs[1] rra, rrd = rra.cancel(rrd, include=True) return ((ga, gd), (r, d), (rra, rrd)) def polynomial_reduce(p, DE): """ Polynomial Reduction. Explanation =========== Given a derivation D on k(t) and p in k[t] where t is a nonlinear monomial over k, return q, r in k[t] such that p = Dq + r, and deg(r) < deg_t(Dt). """ q = Poly(0, DE.t) while p.degree(DE.t) >= DE.d.degree(DE.t): m = p.degree(DE.t) - DE.d.degree(DE.t) + 1 q0 = Poly(DE.t**m, DE.t).mul(Poly(p.as_poly(DE.t).LC()/ (m*DE.d.LC()), DE.t)) q += q0 p = p - derivation(q0, DE) return (q, p) def laurent_series(a, d, F, n, DE): """ Contribution of ``F`` to the full partial fraction decomposition of A/D. Explanation =========== Given a field K of characteristic 0 and ``A``,``D``,``F`` in K[x] with D monic, nonzero, coprime with A, and ``F`` the factor of multiplicity n in the square- free factorization of D, return the principal parts of the Laurent series of A/D at all the zeros of ``F``. """ if F.degree()==0: return 0 Z = _symbols('z', n) Z.insert(0, z) delta_a = Poly(0, DE.t) delta_d = Poly(1, DE.t) E = d.quo(F**n) ha, hd = (a, E*Poly(z**n, DE.t)) dF = derivation(F,DE) B, G = gcdex_diophantine(E, F, Poly(1,DE.t)) C, G = gcdex_diophantine(dF, F, Poly(1,DE.t)) # initialization F_store = F V, DE_D_list, H_list= [], [], [] for j in range(0, n): # jth derivative of z would be substituted with dfnth/(j+1) where dfnth =(d^n)f/(dx)^n F_store = derivation(F_store, DE) v = (F_store.as_expr())/(j + 1) V.append(v) DE_D_list.append(Poly(Z[j + 1],Z[j])) DE_new = DifferentialExtension(extension = {'D': DE_D_list}) #a differential indeterminate for j in range(0, n): zEha = Poly(z**(n + j), DE.t)*E**(j + 1)*ha zEhd = hd Pa, Pd = cancel((zEha, zEhd))[1], cancel((zEha, zEhd))[2] Q = Pa.quo(Pd) for i in range(0, j + 1): Q = Q.subs(Z[i], V[i]) Dha = (hd*derivation(ha, DE, basic=True).as_poly(DE.t) + ha*derivation(hd, DE, basic=True).as_poly(DE.t) + hd*derivation(ha, DE_new, basic=True).as_poly(DE.t) + ha*derivation(hd, DE_new, basic=True).as_poly(DE.t)) Dhd = Poly(j + 1, DE.t)*hd**2 ha, hd = Dha, Dhd Ff, Fr = F.div(gcd(F, Q)) F_stara, F_stard = frac_in(Ff, DE.t) if F_stara.degree(DE.t) - F_stard.degree(DE.t) > 0: QBC = Poly(Q, DE.t)*B**(1 + j)*C**(n + j) H = QBC H_list.append(H) H = (QBC*F_stard).rem(F_stara) alphas = real_roots(F_stara) for alpha in list(alphas): delta_a = delta_a*Poly((DE.t - alpha)**(n - j), DE.t) + Poly(H.eval(alpha), DE.t) delta_d = delta_d*Poly((DE.t - alpha)**(n - j), DE.t) return (delta_a, delta_d, H_list) def recognize_derivative(a, d, DE, z=None): """ Compute the squarefree factorization of the denominator of f and for each Di the polynomial H in K[x] (see Theorem 2.7.1), using the LaurentSeries algorithm. Write Di = GiEi where Gj = gcd(Hn, Di) and gcd(Ei,Hn) = 1. Since the residues of f at the roots of Gj are all 0, and the residue of f at a root alpha of Ei is Hi(a) != 0, f is the derivative of a rational function if and only if Ei = 1 for each i, which is equivalent to Di | H[-1] for each i. """ flag =True a, d = a.cancel(d, include=True) q, r = a.div(d) Np, Sp = splitfactor_sqf(d, DE, coefficientD=True, z=z) j = 1 for (s, i) in Sp: delta_a, delta_d, H = laurent_series(r, d, s, j, DE) g = gcd(d, H[-1]).as_poly() if g is not d: flag = False break j = j + 1 return flag def recognize_log_derivative(a, d, DE, z=None): """ There exists a v in K(x)* such that f = dv/v where f a rational function if and only if f can be written as f = A/D where D is squarefree,deg(A) < deg(D), gcd(A, D) = 1, and all the roots of the Rothstein-Trager resultant are integers. In that case, any of the Rothstein-Trager, Lazard-Rioboo-Trager or Czichowski algorithm produces u in K(x) such that du/dx = uf. """ z = z or Dummy('z') a, d = a.cancel(d, include=True) p, a = a.div(d) pz = Poly(z, DE.t) Dd = derivation(d, DE) q = a - pz*Dd r, R = d.resultant(q, includePRS=True) r = Poly(r, z) Np, Sp = splitfactor_sqf(r, DE, coefficientD=True, z=z) for s, i in Sp: # TODO also consider the complex roots # incase we have complex roots it should turn the flag false a = real_roots(s.as_poly(z)) if any(not j.is_Integer for j in a): return False return True def residue_reduce(a, d, DE, z=None, invert=True): """ Lazard-Rioboo-Rothstein-Trager resultant reduction. Explanation =========== Given a derivation ``D`` on k(t) and f in k(t) simple, return g elementary over k(t) and a Boolean b in {True, False} such that f - Dg in k[t] if b == True or f + h and f + h - Dg do not have an elementary integral over k(t) for any h in k<t> (reduced) if b == False. Returns (G, b), where G is a tuple of tuples of the form (s_i, S_i), such that g = Add(*[RootSum(s_i, lambda z: z*log(S_i(z, t))) for S_i, s_i in G]). f - Dg is the remaining integral, which is elementary only if b == True, and hence the integral of f is elementary only if b == True. f - Dg is not calculated in this function because that would require explicitly calculating the RootSum. Use residue_reduce_derivation(). """ # TODO: Use log_to_atan() from rationaltools.py # If r = residue_reduce(...), then the logarithmic part is given by: # sum([RootSum(a[0].as_poly(z), lambda i: i*log(a[1].as_expr()).subs(z, # i)).subs(t, log(x)) for a in r[0]]) z = z or Dummy('z') a, d = a.cancel(d, include=True) a, d = a.to_field().mul_ground(1/d.LC()), d.to_field().mul_ground(1/d.LC()) kkinv = [1/x for x in DE.T[:DE.level]] + DE.T[:DE.level] if a.is_zero: return ([], True) p, a = a.div(d) pz = Poly(z, DE.t) Dd = derivation(d, DE) q = a - pz*Dd if Dd.degree(DE.t) <= d.degree(DE.t): r, R = d.resultant(q, includePRS=True) else: r, R = q.resultant(d, includePRS=True) R_map, H = {}, [] for i in R: R_map[i.degree()] = i r = Poly(r, z) Np, Sp = splitfactor_sqf(r, DE, coefficientD=True, z=z) for s, i in Sp: if i == d.degree(DE.t): s = Poly(s, z).monic() H.append((s, d)) else: h = R_map.get(i) if h is None: continue h_lc = Poly(h.as_poly(DE.t).LC(), DE.t, field=True) h_lc_sqf = h_lc.sqf_list_include(all=True) for a, j in h_lc_sqf: h = Poly(h, DE.t, field=True).exquo(Poly(gcd(a, s**j, *kkinv), DE.t)) s = Poly(s, z).monic() if invert: h_lc = Poly(h.as_poly(DE.t).LC(), DE.t, field=True, expand=False) inv, coeffs = h_lc.as_poly(z, field=True).invert(s), [S.One] for coeff in h.coeffs()[1:]: L = reduced(inv*coeff.as_poly(inv.gens), [s])[1] coeffs.append(L.as_expr()) h = Poly(dict(list(zip(h.monoms(), coeffs))), DE.t) H.append((s, h)) b = all([not cancel(i.as_expr()).has(DE.t, z) for i, _ in Np]) return (H, b) def residue_reduce_to_basic(H, DE, z): """ Converts the tuple returned by residue_reduce() into a Basic expression. """ # TODO: check what Lambda does with RootOf i = Dummy('i') s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) return sum(RootSum(a[0].as_poly(z), Lambda(i, i*log(a[1].as_expr()).subs( {z: i}).subs(s))) for a in H) def residue_reduce_derivation(H, DE, z): """ Computes the derivation of an expression returned by residue_reduce(). In general, this is a rational function in t, so this returns an as_expr() result. """ # TODO: verify that this is correct for multiple extensions i = Dummy('i') return S(sum(RootSum(a[0].as_poly(z), Lambda(i, i*derivation(a[1], DE).as_expr().subs(z, i)/a[1].as_expr().subs(z, i))) for a in H)) def integrate_primitive_polynomial(p, DE): """ Integration of primitive polynomials. Explanation =========== Given a primitive monomial t over k, and ``p`` in k[t], return q in k[t], r in k, and a bool b in {True, False} such that r = p - Dq is in k if b is True, or r = p - Dq does not have an elementary integral over k(t) if b is False. """ from sympy.integrals.prde import limited_integrate Zero = Poly(0, DE.t) q = Poly(0, DE.t) if not p.expr.has(DE.t): return (Zero, p, True) while True: if not p.expr.has(DE.t): return (q, p, True) Dta, Dtb = frac_in(DE.d, DE.T[DE.level - 1]) with DecrementLevel(DE): # We had better be integrating the lowest extension (x) # with ratint(). a = p.LC() aa, ad = frac_in(a, DE.t) try: rv = limited_integrate(aa, ad, [(Dta, Dtb)], DE) if rv is None: raise NonElementaryIntegralException (ba, bd), c = rv except NonElementaryIntegralException: return (q, p, False) m = p.degree(DE.t) q0 = c[0].as_poly(DE.t)*Poly(DE.t**(m + 1)/(m + 1), DE.t) + \ (ba.as_expr()/bd.as_expr()).as_poly(DE.t)*Poly(DE.t**m, DE.t) p = p - derivation(q0, DE) q = q + q0 def integrate_primitive(a, d, DE, z=None): """ Integration of primitive functions. Explanation =========== Given a primitive monomial t over k and f in k(t), return g elementary over k(t), i in k(t), and b in {True, False} such that i = f - Dg is in k if b is True or i = f - Dg does not have an elementary integral over k(t) if b is False. This function returns a Basic expression for the first argument. If b is True, the second argument is Basic expression in k to recursively integrate. If b is False, the second argument is an unevaluated Integral, which has been proven to be nonelementary. """ # XXX: a and d must be canceled, or this might return incorrect results z = z or Dummy("z") s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) g1, h, r = hermite_reduce(a, d, DE) g2, b = residue_reduce(h[0], h[1], DE, z=z) if not b: i = cancel(a.as_expr()/d.as_expr() - (g1[1]*derivation(g1[0], DE) - g1[0]*derivation(g1[1], DE)).as_expr()/(g1[1]**2).as_expr() - residue_reduce_derivation(g2, DE, z)) i = NonElementaryIntegral(cancel(i).subs(s), DE.x) return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) + residue_reduce_to_basic(g2, DE, z), i, b) # h - Dg2 + r p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2, DE, z) + r[0].as_expr()/r[1].as_expr()) p = p.as_poly(DE.t) q, i, b = integrate_primitive_polynomial(p, DE) ret = ((g1[0].as_expr()/g1[1].as_expr() + q.as_expr()).subs(s) + residue_reduce_to_basic(g2, DE, z)) if not b: # TODO: This does not do the right thing when b is False i = NonElementaryIntegral(cancel(i.as_expr()).subs(s), DE.x) else: i = cancel(i.as_expr()) return (ret, i, b) def integrate_hyperexponential_polynomial(p, DE, z): """ Integration of hyperexponential polynomials. Explanation =========== Given a hyperexponential monomial t over k and ``p`` in k[t, 1/t], return q in k[t, 1/t] and a bool b in {True, False} such that p - Dq in k if b is True, or p - Dq does not have an elementary integral over k(t) if b is False. """ from sympy.integrals.rde import rischDE t1 = DE.t dtt = DE.d.exquo(Poly(DE.t, DE.t)) qa = Poly(0, DE.t) qd = Poly(1, DE.t) b = True if p.is_zero: return(qa, qd, b) with DecrementLevel(DE): for i in range(-p.degree(z), p.degree(t1) + 1): if not i: continue elif i < 0: # If you get AttributeError: 'NoneType' object has no attribute 'nth' # then this should really not have expand=False # But it shouldn't happen because p is already a Poly in t and z a = p.as_poly(z, expand=False).nth(-i) else: # If you get AttributeError: 'NoneType' object has no attribute 'nth' # then this should really not have expand=False a = p.as_poly(t1, expand=False).nth(i) aa, ad = frac_in(a, DE.t, field=True) aa, ad = aa.cancel(ad, include=True) iDt = Poly(i, t1)*dtt iDta, iDtd = frac_in(iDt, DE.t, field=True) try: va, vd = rischDE(iDta, iDtd, Poly(aa, DE.t), Poly(ad, DE.t), DE) va, vd = frac_in((va, vd), t1, cancel=True) except NonElementaryIntegralException: b = False else: qa = qa*vd + va*Poly(t1**i)*qd qd *= vd return (qa, qd, b) def integrate_hyperexponential(a, d, DE, z=None, conds='piecewise'): """ Integration of hyperexponential functions. Explanation =========== Given a hyperexponential monomial t over k and f in k(t), return g elementary over k(t), i in k(t), and a bool b in {True, False} such that i = f - Dg is in k if b is True or i = f - Dg does not have an elementary integral over k(t) if b is False. This function returns a Basic expression for the first argument. If b is True, the second argument is Basic expression in k to recursively integrate. If b is False, the second argument is an unevaluated Integral, which has been proven to be nonelementary. """ # XXX: a and d must be canceled, or this might return incorrect results z = z or Dummy("z") s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) g1, h, r = hermite_reduce(a, d, DE) g2, b = residue_reduce(h[0], h[1], DE, z=z) if not b: i = cancel(a.as_expr()/d.as_expr() - (g1[1]*derivation(g1[0], DE) - g1[0]*derivation(g1[1], DE)).as_expr()/(g1[1]**2).as_expr() - residue_reduce_derivation(g2, DE, z)) i = NonElementaryIntegral(cancel(i.subs(s)), DE.x) return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) + residue_reduce_to_basic(g2, DE, z), i, b) # p should be a polynomial in t and 1/t, because Sirr == k[t, 1/t] # h - Dg2 + r p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2, DE, z) + r[0].as_expr()/r[1].as_expr()) pp = as_poly_1t(p, DE.t, z) qa, qd, b = integrate_hyperexponential_polynomial(pp, DE, z) i = pp.nth(0, 0) ret = ((g1[0].as_expr()/g1[1].as_expr()).subs(s) \ + residue_reduce_to_basic(g2, DE, z)) qas = qa.as_expr().subs(s) qds = qd.as_expr().subs(s) if conds == 'piecewise' and DE.x not in qds.free_symbols: # We have to be careful if the exponent is S.Zero! # XXX: Does qd = 0 always necessarily correspond to the exponential # equaling 1? ret += Piecewise( (qas/qds, Ne(qds, 0)), (integrate((p - i).subs(DE.t, 1).subs(s), DE.x), True) ) else: ret += qas/qds if not b: i = p - (qd*derivation(qa, DE) - qa*derivation(qd, DE)).as_expr()/\ (qd**2).as_expr() i = NonElementaryIntegral(cancel(i).subs(s), DE.x) return (ret, i, b) def integrate_hypertangent_polynomial(p, DE): """ Integration of hypertangent polynomials. Explanation =========== Given a differential field k such that sqrt(-1) is not in k, a hypertangent monomial t over k, and p in k[t], return q in k[t] and c in k such that p - Dq - c*D(t**2 + 1)/(t**1 + 1) is in k and p - Dq does not have an elementary integral over k(t) if Dc != 0. """ # XXX: Make sure that sqrt(-1) is not in k. q, r = polynomial_reduce(p, DE) a = DE.d.exquo(Poly(DE.t**2 + 1, DE.t)) c = Poly(r.nth(1)/(2*a.as_expr()), DE.t) return (q, c) def integrate_nonlinear_no_specials(a, d, DE, z=None): """ Integration of nonlinear monomials with no specials. Explanation =========== Given a nonlinear monomial t over k such that Sirr ({p in k[t] | p is special, monic, and irreducible}) is empty, and f in k(t), returns g elementary over k(t) and a Boolean b in {True, False} such that f - Dg is in k if b == True, or f - Dg does not have an elementary integral over k(t) if b == False. This function is applicable to all nonlinear extensions, but in the case where it returns b == False, it will only have proven that the integral of f - Dg is nonelementary if Sirr is empty. This function returns a Basic expression. """ # TODO: Integral from k? # TODO: split out nonelementary integral # XXX: a and d must be canceled, or this might not return correct results z = z or Dummy("z") s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs]))) g1, h, r = hermite_reduce(a, d, DE) g2, b = residue_reduce(h[0], h[1], DE, z=z) if not b: return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) + residue_reduce_to_basic(g2, DE, z), b) # Because f has no specials, this should be a polynomial in t, or else # there is a bug. p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2, DE, z).as_expr() + r[0].as_expr()/r[1].as_expr()).as_poly(DE.t) q1, q2 = polynomial_reduce(p, DE) if q2.expr.has(DE.t): b = False else: b = True ret = (cancel(g1[0].as_expr()/g1[1].as_expr() + q1.as_expr()).subs(s) + residue_reduce_to_basic(g2, DE, z)) return (ret, b) class NonElementaryIntegral(Integral): """ Represents a nonelementary Integral. Explanation =========== If the result of integrate() is an instance of this class, it is guaranteed to be nonelementary. Note that integrate() by default will try to find any closed-form solution, even in terms of special functions which may themselves not be elementary. To make integrate() only give elementary solutions, or, in the cases where it can prove the integral to be nonelementary, instances of this class, use integrate(risch=True). In this case, integrate() may raise NotImplementedError if it cannot make such a determination. integrate() uses the deterministic Risch algorithm to integrate elementary functions or prove that they have no elementary integral. In some cases, this algorithm can split an integral into an elementary and nonelementary part, so that the result of integrate will be the sum of an elementary expression and a NonElementaryIntegral. Examples ======== >>> from sympy import integrate, exp, log, Integral >>> from sympy.abc import x >>> a = integrate(exp(-x**2), x, risch=True) >>> print(a) Integral(exp(-x**2), x) >>> type(a) <class 'sympy.integrals.risch.NonElementaryIntegral'> >>> expr = (2*log(x)**2 - log(x) - x**2)/(log(x)**3 - x**2*log(x)) >>> b = integrate(expr, x, risch=True) >>> print(b) -log(-x + log(x))/2 + log(x + log(x))/2 + Integral(1/log(x), x) >>> type(b.atoms(Integral).pop()) <class 'sympy.integrals.risch.NonElementaryIntegral'> """ # TODO: This is useful in and of itself, because isinstance(result, # NonElementaryIntegral) will tell if the integral has been proven to be # elementary. But should we do more? Perhaps a no-op .doit() if # elementary=True? Or maybe some information on why the integral is # nonelementary. pass def risch_integrate(f, x, extension=None, handle_first='log', separate_integral=False, rewrite_complex=None, conds='piecewise'): r""" The Risch Integration Algorithm. Explanation =========== Only transcendental functions are supported. Currently, only exponentials and logarithms are supported, but support for trigonometric functions is forthcoming. If this function returns an unevaluated Integral in the result, it means that it has proven that integral to be nonelementary. Any errors will result in raising NotImplementedError. The unevaluated Integral will be an instance of NonElementaryIntegral, a subclass of Integral. handle_first may be either 'exp' or 'log'. This changes the order in which the extension is built, and may result in a different (but equivalent) solution (for an example of this, see issue 5109). It is also possible that the integral may be computed with one but not the other, because not all cases have been implemented yet. It defaults to 'log' so that the outer extension is exponential when possible, because more of the exponential case has been implemented. If ``separate_integral`` is ``True``, the result is returned as a tuple (ans, i), where the integral is ans + i, ans is elementary, and i is either a NonElementaryIntegral or 0. This useful if you want to try further integrating the NonElementaryIntegral part using other algorithms to possibly get a solution in terms of special functions. It is False by default. Examples ======== >>> from sympy.integrals.risch import risch_integrate >>> from sympy import exp, log, pprint >>> from sympy.abc import x First, we try integrating exp(-x**2). Except for a constant factor of 2/sqrt(pi), this is the famous error function. >>> pprint(risch_integrate(exp(-x**2), x)) / | | 2 | -x | e dx | / The unevaluated Integral in the result means that risch_integrate() has proven that exp(-x**2) does not have an elementary anti-derivative. In many cases, risch_integrate() can split out the elementary anti-derivative part from the nonelementary anti-derivative part. For example, >>> pprint(risch_integrate((2*log(x)**2 - log(x) - x**2)/(log(x)**3 - ... x**2*log(x)), x)) / | log(-x + log(x)) log(x + log(x)) | 1 - ---------------- + --------------- + | ------ dx 2 2 | log(x) | / This means that it has proven that the integral of 1/log(x) is nonelementary. This function is also known as the logarithmic integral, and is often denoted as Li(x). risch_integrate() currently only accepts purely transcendental functions with exponentials and logarithms, though note that this can include nested exponentials and logarithms, as well as exponentials with bases other than E. >>> pprint(risch_integrate(exp(x)*exp(exp(x)), x)) / x\ \e / e >>> pprint(risch_integrate(exp(exp(x)), x)) / | | / x\ | \e / | e dx | / >>> pprint(risch_integrate(x*x**x*log(x) + x**x + x*x**x, x)) x x*x >>> pprint(risch_integrate(x**x, x)) / | | x | x dx | / >>> pprint(risch_integrate(-1/(x*log(x)*log(log(x))**2), x)) 1 ----------- log(log(x)) """ f = S(f) DE = extension or DifferentialExtension(f, x, handle_first=handle_first, dummy=True, rewrite_complex=rewrite_complex) fa, fd = DE.fa, DE.fd result = S.Zero for case in reversed(DE.cases): if not fa.expr.has(DE.t) and not fd.expr.has(DE.t) and not case == 'base': DE.decrement_level() fa, fd = frac_in((fa, fd), DE.t) continue fa, fd = fa.cancel(fd, include=True) if case == 'exp': ans, i, b = integrate_hyperexponential(fa, fd, DE, conds=conds) elif case == 'primitive': ans, i, b = integrate_primitive(fa, fd, DE) elif case == 'base': # XXX: We can't call ratint() directly here because it doesn't # handle polynomials correctly. ans = integrate(fa.as_expr()/fd.as_expr(), DE.x, risch=False) b = False i = S.Zero else: raise NotImplementedError("Only exponential and logarithmic " "extensions are currently supported.") result += ans if b: DE.decrement_level() fa, fd = frac_in(i, DE.t) else: result = result.subs(DE.backsubs) if not i.is_zero: i = NonElementaryIntegral(i.function.subs(DE.backsubs),i.limits) if not separate_integral: result += i return result else: if isinstance(i, NonElementaryIntegral): return (result, i) else: return (result, 0)
a9919c76611bfc335038dba50ae446b542966779bc5a120d12ee13017723fcf2
""" Algorithms for solving Parametric Risch Differential Equations. The methods used for solving Parametric Risch Differential Equations parallel those for solving Risch Differential Equations. See the outline in the docstring of rde.py for more information. The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist. For the algorithms here G is a list of tuples of factions of the terms on the right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on the right hand side of the equation (i.e., qi in k[t]). See the docstring of each function for more information. """ from functools import reduce from sympy.core import Dummy, ilcm, Add, Mul, Pow, S from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices import zeros, eye from sympy.polys import Poly, lcm, cancel, sqf_list from sympy.polys.polymatrix import PolyMatrix as Matrix from sympy.solvers import solve def prde_normal_denom(fa, fd, G, DE): """ Parametric Risch Differential Equation - Normal part of the denominator. Explanation =========== Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly normalized with respect to t, return the tuple (a, b, G, h) such that a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)), q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)). """ dn, ds = splitfactor(fd, DE) Gas, Gds = list(zip(*G)) gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t)) en, es = splitfactor(gd, DE) p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c = a*h ba = a*fa - dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd, include=True) G = [(c*A).cancel(D, include=True) for A, D in G] return (a, (ba, bd), G, h) def real_imag(ba, bd, gen): """ Helper function, to get the real and imaginary part of a rational function evaluated at sqrt(-1) without actually evaluating it at sqrt(-1). Explanation =========== Separates the even and odd power terms by checking the degree of terms wrt mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part of the numerator ba[1] is the imaginary part and bd is the denominator of the rational function. """ bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()] denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()] bd_real = sum(r for r in denom_real) bd_imag = sum(r for r in denom_imag) num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()] num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()] ba_real = sum(r for r in num_real) ba_imag = sum(r for r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd) def prde_special_denom(a, ba, bd, G, DE, case='auto'): """ Parametric Risch Differential Equation - Special part of the denominator. Explanation =========== Case is one of {'exp', 'tan', 'primitive'} for the hyperexponential, hypertangent, and primitive cases, respectively. For the hyperexponential (resp. hypertangent) case, given a derivation D on k[t] and a in k[t], b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp. gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)). For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this case. """ # TODO: Merge this with the very similar special_denom() in rde.py if case == 'auto': case = DE.case if case == 'exp': p = Poly(DE.t, DE.t) elif case == 'tan': p = Poly(DE.t**2 + 1, DE.t) elif case in ['primitive', 'base']: B = ba.quo(bd) return (a, B, G, Poly(1, DE.t)) else: raise ValueError("case must be one of {'exp', 'tan', 'primitive', " "'base'}, not %s." % case) nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t) nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G]) n = min(0, nc - min(0, nb)) if not nb: # Possible cancellation. if case == 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We are guaranteed to not have problems, # because case != 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A is not None: Q, m, z = A if Q == 1: n = min(n, m) elif case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with DecrementLevel(DE): # We are guaranteed to not have problems, # because case != 'base'. betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t) betad = alphad etaa, etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(Poly(2, DE.t)*betaa, betad, DE): A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) B = parametric_log_deriv(betaa, betad, etaa, etad, DE) if A is not None and B is not None: Q, s, z = A # TODO: Add test if Q == 1: n = min(n, s/2) N = max(0, -nb) pN = p**N pn = p**-n # This is 1/h A = a*pN B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G] h = pn # (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n) return (A, B, G, h) def prde_linear_constraints(a, b, G, DE): """ Parametric Risch Differential Equation - Generate linear constraints on the constants. Explanation =========== Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k(t) such that for any solution c1, ..., cm in Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)), (c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy a*Dp + b*p == Sum(ci*qi, (i, 1, m)). Because M has entries in k(t), and because Matrix doesn't play well with Poly, M will be a Matrix of Basic expressions. """ m = len(G) Gns, Gds = list(zip(*G)) d = reduce(lambda i, j: i.lcm(j), Gds) d = Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G] if not all([ri.is_zero for _, ri in Q]): N = max([ri.degree(DE.t) for _, ri in Q]) M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i)) else: M = Matrix(0, m, []) # No constraints, return the empty matrix. qs, _ = list(zip(*Q)) return (qs, M) def poly_linear_constraints(p, d): """ Given p = [p1, ..., pm] in k[t]^m and d in k[t], return q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible by d if and only if (c1, ..., cm) is a solution of Mx = 0, in which case the quotient is Sum(ci*qi, (i, 1, m)). """ m = len(p) q, r = zip(*[pi.div(d) for pi in p]) if not all([ri.is_zero for ri in r]): n = max([ri.degree() for ri in r]) M = Matrix(n + 1, m, lambda i, j: r[j].nth(i)) else: M = Matrix(0, m, []) # No constraints. return q, M def constant_system(A, u, DE): """ Generate a system for the constant solutions. Explanation =========== Given a differential field (K, D) with constant field C = Const(K), a Matrix A, and a vector (Matrix) u with coefficients in K, returns the tuple (B, v, s), where B is a Matrix with coefficients in C and v is a vector (Matrix) such that either v has coefficients in C, in which case s is True and the solutions in C of Ax == u are exactly all the solutions of Bx == v, or v has a non-constant coefficient, in which case s is False Ax == u has no constant solution. This algorithm is used both in solving parametric problems and in determining if an element a of K is a derivative of an element of K or the logarithmic derivative of a K-radical using the structure theorem approach. Because Poly does not play well with Matrix yet, this algorithm assumes that all matrix entries are Basic expressions. """ if not A: return A, u Au = A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This will NOT return correct results if cancel() cannot reduce # an identically zero expression to 0. The danger is that we might # incorrectly prove that an integral is nonelementary (such as # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x). # But this is a limitation in computer algebra in general, and implicit # in the correctness of the Risch Algorithm is the computability of the # constant field (actually, this same correctness problem exists in any # algorithm that uses rref()). # # We therefore limit ourselves to constant fields that are computable # via the cancel() function, in order to prevent a speed bottleneck from # calling some more complex simplification function (rational function # coefficients will fall into this class). Furthermore, (I believe) this # problem will only crop up if the integral explicitly contains an # expression in the constant field that is identically zero, but cannot # be reduced to such by cancel(). Therefore, a careful user can avoid this # problem entirely by being careful with the sorts of expressions that # appear in his integrand in the variables other than the integration # variable (the structure theorems should be able to completely decide these # problems in the integration variable). Au = Au.applyfunc(cancel) A, u = Au[:, :-1], Au[:, -1] for j in range(A.cols): for i in range(A.rows): if A[i, j].has(*DE.T): # This assumes that const(F(t0, ..., tn) == const(K) == F Ri = A[i, :] # Rm+1; m = A.rows Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/ derivation(A[i, j], DE, basic=True)) Rm1 = Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j], DE, basic=True)) for s in range(A.rows): # A[s, :] = A[s, :] - A[s, i]*A[:, m+1] Asj = A[s, j] A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj])) # u[s] = u[s] - A[s, j]*u[m+1 u.row_op(s, lambda r, jj: cancel(r - Asj*um1)) A = A.col_join(Rm1) u = u.col_join(Matrix([um1])) return (A, u) def prde_spde(a, b, Q, n, DE): """ Special Polynomial Differential Equation algorithm: Parametric Version. Explanation =========== Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t] with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution c1, ..., cm in Const(k) and q in k[t] of degree at most n of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m)) """ R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q])) A = a B = b + derivation(a, DE) Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)] R = list(R) n1 = n - a.degree(DE.t) return (A, B, Qq, R, n1) def prde_no_cancel_b_large(b, Q, n, DE): """ Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough. Explanation =========== Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. """ db = b.degree(DE.t) m = len(Q) H = [Poly(0, DE.t)]*m for N in range(n, -1, -1): # [n, ..., 0] for i in range(m): si = Q[i].nth(N + db)/b.LC() sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn Q[i] = Q[i] - derivation(sitn, DE) - b*sitn if all(qi.is_zero for qi in Q): dc = -1 M = zeros(0, 2) else: dc = max([qi.degree(DE.t) for qi in Q]) M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc + 1, 1), DE) c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) def prde_no_cancel_b_small(b, Q, n, DE): """ Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough. Explanation =========== Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. """ m = len(Q) H = [Poly(0, DE.t)]*m for N in range(n, 0, -1): # [n, ..., 1] for i in range(m): si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn Q[i] = Q[i] - derivation(sitn, DE) - b*sitn if b.degree(DE.t) > 0: for i in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i] + si Q[i] = Q[i] - derivation(si, DE) - b*si if all(qi.is_zero for qi in Q): dc = -1 M = Matrix() else: dc = max([qi.degree(DE.t) for qi in Q]) M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc + 1, 1), DE) c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) # else: b is in k, deg(qi) < deg(Dt) t = DE.t if DE.case != 'base': with DecrementLevel(DE): t0 = DE.t # k = k0(t0) ba, bd = frac_in(b, t0, field=True) Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q] f, B = param_rischDE(ba, bd, Q0, DE) # f = [f1, ..., fr] in k^r and B is a matrix with # m + r columns and entries in Const(k) = Const(k0) # such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has # a solution y0 in k with c1, ..., cm in Const(k) # if and only y0 = Sum(dj*fj, (j, 1, r)) where # d1, ..., dr ar in Const(k) and # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0. # Transform fractions (fa, fd) in f into constant # polynomials fa/fd in k[t]. # (Is there a better way?) f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa, fd in f] else: # Base case. Dy == 0 for all y in k and b == 0. # Dy + b*y = Sum(ci*qi) is solvable if and only if # Sum(ci*qi) == 0 in which case the solutions are # y = d1*f1 for f1 = 1 and any d1 in Const(k) = k. f = [Poly(1, t, field=True)] # r = 1 B = Matrix([[qi.TC() for qi in Q] + [S.Zero]]) # The condition for solvability is # B*Matrix([c1, ..., cm, d1]) == 0 # There are no constraints on d1. # Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero. d = max([qi.degree(DE.t) for qi in Q]) if d > 0: M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1)) A, _ = constant_system(M, zeros(d, 1), DE) else: # No constraints on the hj. A = Matrix(0, m, []) # Solutions of the original equation are # y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)), # where ei == ci (i = 1, ..., m), when # A*Matrix([c1, ..., cm]) == 0 and # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0 # Build combined constraint matrix with m + r + m columns. r = len(f) I = eye(m) A = A.row_join(zeros(A.rows, r + m)) B = B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m, r)).row_join(-I) return f + H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n, DE): """ Pg, 237. """ H = [] # Why use DecrementLevel? Below line answers that: # Assuming that we can solve such problems over 'k' (not k[t]) if DE.case == 'primitive': with DecrementLevel(DE): ba, bd = frac_in(b, DE.t, field=True) for i in range(n, -1, -1): if DE.case == 'exp': # this re-checking can be avoided with DecrementLevel(DE): ba, bd = frac_in(b + (i*(derivation(DE.t, DE)/DE.t)).as_poly(b.gens), DE.t, field=True) with DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q] fi, Ai = param_rischDE(ba, bd, Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa, fd in fi] ri = len(fi) if i == n: M = Ai else: M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi = [None]*ri, [None]*ri # from eq. on top of p.238 (unnumbered) for j in range(ri): hji = fi[j] * (DE.t**i).as_poly(fi[j].gens) hi[j] = hji # building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] = -(derivation(hji, DE) - b*hji) H += hi # in the next loop instead of Q it has # to be Q + Fi taking its place Q = Q + Fi return (H, M) def param_poly_rischDE(a, b, q, n, DE): """Polynomial solutions of a parametric Risch differential equation. Explanation =========== Given a derivation D in k[t], a, b in k[t] relatively prime, and q = [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and a matrix A with m + r columns and entries in Const(k) such that a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj, (j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm, d1, ..., dr) is a solution of Ax == 0. """ m = len(q) if n < 0: # Only the trivial zero solution is possible. # Find relations between the qi. if all([qi.is_zero for qi in q]): return [], zeros(1, m) # No constraints. N = max([qi.degree(DE.t) for qi in q]) M = Matrix(N + 1, m, lambda i, j: q[j].nth(i)) A, _ = constant_system(M, zeros(M.rows, 1), DE) return [], A if a.is_ground: # Normalization: a = 1. a = a.LC() b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q] if not b.is_zero and (DE.case == 'base' or b.degree() > max(0, DE.d.degree() - 1)): return prde_no_cancel_b_large(b, q, n, DE) elif ((b.is_zero or b.degree() < DE.d.degree() - 1) and (DE.case == 'base' or DE.d.degree() >= 2)): return prde_no_cancel_b_small(b, q, n, DE) elif (DE.d.degree() >= 2 and b.degree() == DE.d.degree() - 1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError("prde_no_cancel_b_equal() is " "not yet implemented.") else: # Liouvillian cases if DE.case == 'primitive' or DE.case == 'exp': return prde_cancel_liouvillian(b, q, n, DE) else: raise NotImplementedError("non-linear and hypertangent " "cases have not yet been implemented") # else: deg(a) > 0 # Iterate SPDE as long as possible cumulating coefficient # and terms for the recovery of original solutions. alpha, beta = a.one, [a.zero]*m while n >= 0: # and a, b relatively prime a, b, q, r, n = prde_spde(a, b, q, n, DE) beta = [betai + alpha*ri for betai, ri in zip(beta, r)] alpha *= a # Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to # solutions alpha*p + Sum(ci*betai) of the initial equation. d = a.gcd(b) if not d.is_ground: break # a*Dp + b*p = Sum(ci*qi) may have a polynomial solution # only if the sum is divisible by d. qq, M = poly_linear_constraints(q, d) # qq = [qq1, ..., qqm] where qqi = qi.quo(d). # M is a matrix with m columns an entries in k. # Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is # divisible by d if and only if M*Matrix([f1, ..., fm]) == 0, # in which case the quotient is Sum(fi*qqi). A, _ = constant_system(M, zeros(M.rows, 1), DE) # A is a matrix with m columns and entries in Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero # for c1, ..., cm in Const(k) if and only if # A*Matrix([c1, ...,cm]) == 0. V = A.nullspace() # V = [v1, ..., vu] where each vj is a column matrix with # entries aj1, ..., ajm in Const(k). # Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi). # Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji) # (i = 1, ..., m) for some d1, ..., du in Const(k). # In that case, solutions of # a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are the same as those of # (a/d)*Dp + (b/d)*p = Sum(dj*rj) # where rj = Sum(aji*qqi). if not V: # No non-trivial solution. return [], eye(m) # Could return A, but this has # the minimum number of rows. Mqq = Matrix([qq]) # A single row. r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru] # Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to # solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial # equation. These are equal to alpha*p + Sum(dj*fj) where # fj = Sum(aji*betai). Mbeta = Matrix([beta]) f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu] # # Solve the reduced equation recursively. # g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) # g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v # columns and entries in Const(k) such that # (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n # in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. # The solutions of the original equation are then # Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)). # Collect solution components. h = f + [alpha*gk for gk in g] # Build combined relation matrix. A = -eye(m) for vj in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows, m).row_join(B)) return h, A def param_rischDE(fa, fd, G, DE): """ Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)). Explanation =========== Given a derivation D in k(t), f in k(t), and G = [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and a matrix A with m + r columns and entries in Const(k) such that Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj, (j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm, d1, ..., dr) is a solution of Ax == 0. Elements of k(t) are tuples (a, d) with a and d in k[t]. """ m = len(G) q, (fa, fd) = weak_normalizer(fa, fd, DE) # Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi) # correspond to solutions y = z/q of the original equation. gamma = q G = [(q*ga).cancel(gd, include=True) for ga, gd in G] a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE) # Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond # to solutions z = q/hn of the weakly normalized equation. gamma *= hn A, B, G, hs = prde_special_denom(a, ba, bd, G, DE) # Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond # to solutions q = p/hs of the previous equation. gamma *= hs g = A.gcd(B) a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in G] # a*Dp + b*p = Sum(ci*gi) may have a polynomial solution # only if the sum is in k[t]. q, M = prde_linear_constraints(a, b, g, DE) # q = [q1, ..., qm] where qi in k[t] is the polynomial component # of the partial fraction expansion of gi. # M is a matrix with m columns and entries in k. # Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k, # is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0, # in which case the sum is equal to Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows, 1), DE) # M is a matrix with m columns and entries in Const(k). # Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k) # if and only if M*Matrix([c1, ..., cm]) == 0, # in which case the sum is Sum(ci*qi). ## Reduce number of constants at this point V = M.nullspace() # V = [v1, ..., vu] where each vj is a column matrix with # entries aj1, ..., ajm in Const(k). # Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u). # Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji) # (i = 1, ..., m) for some d1, ..., du in Const(k). # In that case, # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj = Sum(aji*qi) (j = 1, ..., u) in k[t]. if not V: # No non-trivial solution return [], eye(m) Mq = Matrix([q]) # A single row. r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru] # Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions # y = p/gamma of the initial equation with ci = Sum(dj*aji). try: # We try n=5. At least for prde_spde, it will always # terminate no matter what n is. n = bound_degree(a, b, r, DE, parametric=True) except NotImplementedError: # A temporary bound is set. Eventually, it will be removed. # the currently added test case takes large time # even with n=5, and much longer with large n's. n = 5 h, B = param_poly_rischDE(a, b, r, n, DE) # h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v # columns and entries in Const(k) such that # a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n # in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. # The solutions of the original equation for ci = Sum(dj*aji) # (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma. ## Build combined relation matrix with m + u + v columns. A = -eye(m) for vj in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ..., du. W = A.nullspace() # W = [w1, ..., wt] where each wl is a column matrix with # entries blk (k = 1, ..., m + u + v) in Const(k). # The vectors (bl1, ..., blm) generate the space of those # constant families (c1, ..., cm) for which a solution of # the equation Dy + f*y == Sum(ci*Gi) exists. They generate # the space and form a basis except possibly when Dy + f*y == 0 # is solvable in k(t}. The corresponding solutions are # y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u. v = len(h) M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's. N = M.nullspace() # N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column # vectors generating the space of linear relations between # c1, ..., cm, e1, ..., ev. C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns. return [hk.cancel(gamma, include=True) for hk in h], C def limited_integrate_reduce(fa, fd, G, DE): """ Simpler version of step 1 & 2 for the limited integration problem. Explanation =========== Given a derivation D on k(t) and f, g1, ..., gn in k(t), return (a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer, g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t), c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore, if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian over k, then deg(p) <= N. So that the special part is always computed, this function calls the more general prde_special_denom() automatically if it cannot determine that S1irr == Sirr. Furthermore, it will automatically call bound_degree() when t is linear and non-Liouvillian, which for the transcendental case, implies that Dt == a*t + b with for some a, b in k*. """ dn, ds = splitfactor(fd, DE) E = [splitfactor(gd, DE) for _, gd in G] En, Es = list(zip(*E)) c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm) hn = c.gcd(c.diff(DE.t)) a = hn b = -derivation(hn, DE) N = 0 # These are the cases where we know that S1irr = Sirr, but there could be # others, and this algorithm will need to be extended to handle them. if DE.case in ['base', 'primitive', 'exp', 'tan']: hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm) a = hn*hs b -= (hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for ga, gd in G])) # So far, all the above are also nonlinear or Liouvillian, but if this # changes, then this will need to be updated to call bound_degree() # as per the docstring of this function (DE.case == 'other_linear'). N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu) else: # TODO: implement this raise NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G] return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa, fd, G, DE): """ Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n)) """ fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpreting limited integration problem as a # parametric Risch DE problem Fa = Poly(0, DE.t) Fd = Poly(1, DE.t) G = [(fa, fd)] + G h, A = param_rischDE(Fa, Fd, G, DE) V = A.nullspace() V = [v for v in V if v[0] != 0] if not V: return None else: # we can take any vector from V, we take V[0] c0 = V[0][0] # v = [-1, c1, ..., cm, d1, ..., dr] v = V[0]/(-c0) r = len(h) m = len(v) - r - 1 C = list(v[1: m + 1]) y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \ for i in range(r)]) y_num, y_den = y.as_numer_denom() Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y, C def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None): """ Parametric logarithmic derivative heuristic. Explanation =========== Given a derivation D on k[t], f in k(t), and a hyperexponential monomial theta over k(t), raises either NotImplementedError, in which case the heuristic failed, or returns None, in which case it has proven that no solution exists, or returns a solution (n, m, v) of the equation n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0. If this heuristic fails, the structure theorem approach will need to be used. The argument w == Dtheta/theta """ # TODO: finish writing this and write tests c1 = c1 or Dummy('c1') p, a = fa.div(fd) q, b = wa.div(wd) B = max(0, derivation(DE.t, DE).degree(DE.t) - 1) C = max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) > B: eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)] s = solve(eqs, c1) if not s or not s[c1].is_Rational: # deg(q) > B, no solution for c. return None M, N = s[c1].as_numer_denom() M_poly = M.as_poly(q.gens) N_poly = N.as_poly(q.gens) nfmwa = N_poly*fa*wd - M_poly*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE, 'auto') if Qv is None: # (N*f - M*w) is not the logarithmic derivative of a k(t)-radical. return None Q, v = Qv if Q.is_zero or v.is_zero: return None return (Q*N, Q*M, v) if p.degree(DE.t) > B: return None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls = splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): # TODO: We treat this as 'no solution', until the structure # theorem version of parametric_log_deriv is implemented. return None u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))] s = solve(eqs, c1) if not s or not s[c1].is_Rational: # deg(q) <= B, no solution for c. return None M, N = s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv is None: # (N*f - M*w) is not the logarithmic derivative of a k(t)-radical. return None Q, v = Qv if Q.is_zero or v.is_zero: return None return (Q*N, Q*M, v) def parametric_log_deriv(fa, fd, wa, wd, DE): # TODO: Write the full algorithm using the structure theorems. # try: A = parametric_log_deriv_heu(fa, fd, wa, wd, DE) # except NotImplementedError: # Heuristic failed, we have to use the full method. # TODO: This could be implemented more efficiently. # It isn't too worrisome, because the heuristic handles most difficult # cases. return A def is_deriv_k(fa, fd, DE): r""" Checks if Df/f is the derivative of an element of k(t). Explanation =========== a in k(t) is the derivative of an element of k(t) if there exists b in k(t) such that a = Db. Either returns (ans, u), such that Df/f == Du, or None, which means that Df/f is not the derivative of an element of k(t). ans is a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful for seeing exactly which elements of k(t) produce u. This function uses the structure theorem approach, which says that for any f in K, Df/f is the derivative of a element of K if and only if there are ri in QQ such that:: --- --- Dt \ r * Dt + \ r * i Df / i i / i --- = --. --- --- t f i in L i in E i K/C(x) K/C(x) Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of hyperexponential monomials of K over C(x)). If K is an elementary extension over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the transcendence degree of K over C(x). Furthermore, because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x) and L_K/C(x) are disjoint. The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed recursively using this same function. Therefore, it is required to pass them as indices to D (or T). E_args are the arguments of the hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] == exp(E_args[i])). This is needed to compute the final answer u such that Df/f == Du. log(f) will be the same as u up to a additive constant. This is because they will both behave the same as monomials. For example, both log(x) and log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant. Therefore, the term const is returned. const is such that log(const) + f == u. This is calculated by dividing the arguments of one logarithm from the other. Therefore, it is necessary to pass the arguments of the logarithmic terms in L_args. To handle the case where we are given Df/f, not f, use is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical """ # Compute Df/f dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa dfa, dfd = dfa.cancel(dfd, include=True) # Our assumption here is that each monomial is recursively transcendental if len(DE.exts) != len(DE.D): if [i for i in DE.cases if i == 'tan'] or \ ({i for i in DE.cases if i == 'primitive'} - set(DE.indices('log'))): raise NotImplementedError("Real version of the structure " "theorems with hypertangent support is not yet implemented.") # TODO: What should really be done in this case? raise NotImplementedError("Nonelementary extensions not supported " "in the structure theorems.") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE) if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A: # If the elements of u are not all constant # Note: See comment in constant_system # Also note: derivation(basic=True) calls cancel() return None else: if not all(i.is_Rational for i in u): raise NotImplementedError("Cannot work with non-rational " "coefficients in this case.") else: terms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) ans = list(zip(terms, u)) result = Add(*[Mul(i, j) for i, j in ans]) argterms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) l = [] ld = [] for i, j in zip(argterms, u): # We need to get around things like sqrt(x**2) != x # and also sqrt(x**2 + 2*x + 1) != x + 1 # Issue 10798: i need not be a polynomial i, d = i.as_numer_denom() icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms]))) dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const) def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r""" Checks if Df is the logarithmic derivative of a k(t)-radical. Explanation =========== b in k(t) can be written as the logarithmic derivative of a k(t) radical if there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u. Either returns (ans, u, n, const) or None, which means that Df cannot be written as the logarithmic derivative of a k(t)-radical. ans is a list of tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for seeing exactly what elements of k(t) produce u. This function uses the structure theorem approach, which says that for any f in K, Df is the logarithmic derivative of a K-radical if and only if there are ri in QQ such that:: --- --- Dt \ r * Dt + \ r * i / i i / i --- = Df. --- --- t i in L i in E i K/C(x) K/C(x) Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of hyperexponential monomials of K over C(x)). If K is an elementary extension over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the transcendence degree of K over C(x). Furthermore, because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x) and L_K/C(x) are disjoint. The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed recursively using this same function. Therefore, it is required to pass them as indices to D (or T). L_args are the arguments of the logarithms indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is needed to compute the final answer u such that n*f == Du/u. exp(f) will be the same as u up to a multiplicative constant. This is because they will both behave the same as monomials. For example, both exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const is returned. const is such that exp(const)*f == u. This is calculated by subtracting the arguments of one exponential from the other. Therefore, it is necessary to pass the arguments of the exponential terms in E_args. To handle the case where we are given Df, not f, use is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k """ if Df: dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa, dfd = fa, fd # Our assumption here is that each monomial is recursively transcendental if len(DE.exts) != len(DE.D): if [i for i in DE.cases if i == 'tan'] or \ ({i for i in DE.cases if i == 'primitive'} - set(DE.indices('log'))): raise NotImplementedError("Real version of the structure " "theorems with hypertangent support is not yet implemented.") # TODO: What should really be done in this case? raise NotImplementedError("Nonelementary extensions not supported " "in the structure theorems.") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE) if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A: # If the elements of u are not all constant # Note: See comment in constant_system # Also note: derivation(basic=True) calls cancel() return None else: if not all(i.is_Rational for i in u): # TODO: But maybe we can tell if they're not rational, like # log(2)/log(3). Also, there should be an option to continue # anyway, even if the result might potentially be wrong. raise NotImplementedError("Cannot work with non-rational " "coefficients in this case.") else: n = reduce(ilcm, [i.as_numer_denom()[1] for i in u]) u *= n terms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) ans = list(zip(terms, u)) result = Mul(*[Pow(i, j) for i, j in ans]) # exp(f) will be the same as result up to a multiplicative # constant. We now find the log of that constant. argterms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for i, j in zip(argterms, u)])) return (ans, result, n, const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): """ Checks if f can be written as the logarithmic derivative of a k(t)-radical. Explanation =========== It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for any given fa, fd, DE in that it finds the solution in the given field not in some (possibly unspecified extension) and "in_field" with the function name is used to indicate that. f in k(t) can be written as the logarithmic derivative of a k(t) radical if there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u. Either returns (n, u) or None, which means that f cannot be written as the logarithmic derivative of a k(t)-radical. case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive, hyperexponential, and hypertangent cases, respectively. If case is 'auto', it will attempt to determine the type of the derivation automatically. See also ======== is_log_deriv_k_t_radical, is_deriv_k """ fa, fd = fa.cancel(fd, include=True) # f must be simple n, s = splitfactor(fd, DE) if not s.is_one: pass z = z or Dummy('z') H, b = residue_reduce(fa, fd, DE, z=z) if not b: # I will have to verify, but I believe that the answer should be # None in this case. This should never happen for the # functions given when solving the parametric logarithmic # derivative problem when integration elementary functions (see # Bronstein's book, page 255), so most likely this indicates a bug. return None roots = [(i, i.real_roots()) for i, _ in H] if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for i, j in roots): # If f is the logarithmic derivative of a k(t)-radical, then all the # roots of the resultant must be rational numbers. return None # [(a, i), ...], where i*log(a) is a term in the log-part of the integral # of f respolys, residues = list(zip(*roots)) or [[], []] # Note: this might be empty, but everything below should work find in that # case (it should be the same as if it were [[1, 1]]) residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for i in residues[j]] # TODO: finish writing this and write tests p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) p = p.as_poly(DE.t) if p is None: # f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical return None if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return None if case == 'auto': case = DE.case if case == 'exp': wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE): pa, pd = frac_in(p, DE.t, cancel=True) wa, wd = frac_in((wa, wd), DE.t) A = parametric_log_deriv(pa, pd, wa, wd, DE) if A is None: return None n, e, u = A u *= DE.t**e elif case == 'primitive': with DecrementLevel(DE): pa, pd = frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if A is None: return None n, u = A elif case == 'base': # TODO: we can use more efficient residue reduction from ratint() if not fd.is_sqf or fa.degree() >= fd.degree(): # f is the logarithmic derivative in the base case if and only if # f = fa/fd, fd is square-free, deg(fa) < deg(fd), and # gcd(fa, fd) == 1. The last condition is handled by cancel() above. return None # Note: if residueterms = [], returns (1, 1) # f had better be 0 in that case. n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S.One) u = Mul(*[Pow(i, j*n) for i, j in residueterms]) return (n, u) elif case == 'tan': raise NotImplementedError("The hypertangent case is " "not yet implemented for is_log_deriv_k_t_radical_in_field()") elif case in ['other_linear', 'other_nonlinear']: # XXX: If these are supported by the structure theorems, change to NotImplementedError. raise ValueError("The %s case is not supported in this function." % case) else: raise ValueError("case must be one of {'primitive', 'exp', 'tan', " "'base', 'auto'}, not %s" % case) common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in residueterms]] + [n], S.One) residueterms = [(i, j*common_denom) for i, j in residueterms] m = common_denom//n if common_denom != n*m: # Verify exact division raise ValueError("Inexact division") u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms])) return (common_denom, u)
4af190ccddad523a23519fa5d420bbde2b936996a51832f467a693570893dc00
""" SymPy core decorators. The purpose of this module is to expose decorators without any other dependencies, so that they can be easily imported anywhere in sympy/core. """ from functools import wraps from .sympify import SympifyError, sympify def deprecated(**decorator_kwargs): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.""" from sympy.utilities.exceptions import SymPyDeprecationWarning def _warn_deprecation(wrapped, stacklevel): decorator_kwargs.setdefault('feature', wrapped.__name__) SymPyDeprecationWarning(**decorator_kwargs).warn(stacklevel=stacklevel) def deprecated_decorator(wrapped): if hasattr(wrapped, '__mro__'): # wrapped is actually a class class wrapper(wrapped): __doc__ = wrapped.__doc__ __name__ = wrapped.__name__ __module__ = wrapped.__module__ _sympy_deprecated_func = wrapped def __init__(self, *args, **kwargs): _warn_deprecation(wrapped, 4) super().__init__(*args, **kwargs) else: @wraps(wrapped) def wrapper(*args, **kwargs): _warn_deprecation(wrapped, 3) return wrapped(*args, **kwargs) wrapper._sympy_deprecated_func = wrapped return wrapper return deprecated_decorator def _sympifyit(arg, retval=None): """ decorator to smartly _sympify function arguments Explanation =========== @_sympifyit('other', NotImplemented) def add(self, other): ... In add, other can be thought of as already being a SymPy object. If it is not, the code is likely to catch an exception, then other will be explicitly _sympified, and the whole code restarted. if _sympify(arg) fails, NotImplemented will be returned See also ======== __sympifyit """ def deco(func): return __sympifyit(func, arg, retval) return deco def __sympifyit(func, arg, retval=None): """decorator to _sympify `arg` argument for function `func` don't use directly -- use _sympifyit instead """ # we support f(a,b) only if not func.__code__.co_argcount: raise LookupError("func not found") # only b is _sympified assert func.__code__.co_varnames[1] == arg if retval is None: @wraps(func) def __sympifyit_wrapper(a, b): return func(a, sympify(b, strict=True)) else: @wraps(func) def __sympifyit_wrapper(a, b): try: # If an external class has _op_priority, it knows how to deal # with sympy objects. Otherwise, it must be converted. if not hasattr(b, '_op_priority'): b = sympify(b, strict=True) return func(a, b) except SympifyError: return retval return __sympifyit_wrapper def call_highest_priority(method_name): """A decorator for binary special methods to handle _op_priority. Explanation =========== Binary special methods in Expr and its subclasses use a special attribute '_op_priority' to determine whose special method will be called to handle the operation. In general, the object having the highest value of '_op_priority' will handle the operation. Expr and subclasses that define custom binary special methods (__mul__, etc.) should decorate those methods with this decorator to add the priority logic. The ``method_name`` argument is the name of the method of the other class that will be called. Use this decorator in the following manner:: # Call other.__rmul__ if other._op_priority > self._op_priority @call_highest_priority('__rmul__') def __mul__(self, other): ... # Call other.__mul__ if other._op_priority > self._op_priority @call_highest_priority('__mul__') def __rmul__(self, other): ... """ def priority_decorator(func): @wraps(func) def binary_op_wrapper(self, other): if hasattr(other, '_op_priority'): if other._op_priority > self._op_priority: f = getattr(other, method_name, None) if f is not None: return f(self) return func(self, other) return binary_op_wrapper return priority_decorator def sympify_method_args(cls): '''Decorator for a class with methods that sympify arguments. Explanation =========== The sympify_method_args decorator is to be used with the sympify_return decorator for automatic sympification of method arguments. This is intended for the common idiom of writing a class like : Examples ======== >>> from sympy.core.basic import Basic >>> from sympy.core.sympify import _sympify, SympifyError >>> class MyTuple(Basic): ... def __add__(self, other): ... try: ... other = _sympify(other) ... except SympifyError: ... return NotImplemented ... if not isinstance(other, MyTuple): ... return NotImplemented ... return MyTuple(*(self.args + other.args)) >>> MyTuple(1, 2) + MyTuple(3, 4) MyTuple(1, 2, 3, 4) In the above it is important that we return NotImplemented when other is not sympifiable and also when the sympified result is not of the expected type. This allows the MyTuple class to be used cooperatively with other classes that overload __add__ and want to do something else in combination with instance of Tuple. Using this decorator the above can be written as >>> from sympy.core.decorators import sympify_method_args, sympify_return >>> @sympify_method_args ... class MyTuple(Basic): ... @sympify_return([('other', 'MyTuple')], NotImplemented) ... def __add__(self, other): ... return MyTuple(*(self.args + other.args)) >>> MyTuple(1, 2) + MyTuple(3, 4) MyTuple(1, 2, 3, 4) The idea here is that the decorators take care of the boiler-plate code for making this happen in each method that potentially needs to accept unsympified arguments. Then the body of e.g. the __add__ method can be written without needing to worry about calling _sympify or checking the type of the resulting object. The parameters for sympify_return are a list of tuples of the form (parameter_name, expected_type) and the value to return (e.g. NotImplemented). The expected_type parameter can be a type e.g. Tuple or a string 'Tuple'. Using a string is useful for specifying a Type within its class body (as in the above example). Notes: Currently sympify_return only works for methods that take a single argument (not including self). Specifying an expected_type as a string only works for the class in which the method is defined. ''' # Extract the wrapped methods from each of the wrapper objects created by # the sympify_return decorator. Doing this here allows us to provide the # cls argument which is used for forward string referencing. for attrname, obj in cls.__dict__.items(): if isinstance(obj, _SympifyWrapper): setattr(cls, attrname, obj.make_wrapped(cls)) return cls def sympify_return(*args): '''Function/method decorator to sympify arguments automatically See the docstring of sympify_method_args for explanation. ''' # Store a wrapper object for the decorated method def wrapper(func): return _SympifyWrapper(func, args) return wrapper class _SympifyWrapper: '''Internal class used by sympify_return and sympify_method_args''' def __init__(self, func, args): self.func = func self.args = args def make_wrapped(self, cls): func = self.func parameters, retval = self.args # XXX: Handle more than one parameter? [(parameter, expectedcls)] = parameters # Handle forward references to the current class using strings if expectedcls == cls.__name__: expectedcls = cls # Raise RuntimeError since this is a failure at import time and should # not be recoverable. nargs = func.__code__.co_argcount # we support f(a, b) only if nargs != 2: raise RuntimeError('sympify_return can only be used with 2 argument functions') # only b is _sympified if func.__code__.co_varnames[1] != parameter: raise RuntimeError('parameter name mismatch "%s" in %s' % (parameter, func.__name__)) @wraps(func) def _func(self, other): # XXX: The check for _op_priority here should be removed. It is # needed to stop mutable matrices from being sympified to # immutable matrices which breaks things in quantum... if not hasattr(other, '_op_priority'): try: other = sympify(other, strict=True) except SympifyError: return retval if not isinstance(other, expectedcls): return retval return func(self, other) return _func
34379a09ceda2f68225ad6ee1ddd4d8c56a242f1ce2d34d5137e8e766379b593
"""Base class for all the objects in SymPy""" from collections import defaultdict from collections.abc import Mapping from itertools import chain, zip_longest from .assumptions import BasicMeta, ManagedProperties from .cache import cacheit from .sympify import _sympify, sympify, SympifyError from .compatibility import iterable, ordered from .singleton import S from ._print_helpers import Printable from inspect import getmro def as_Basic(expr): """Return expr as a Basic instance using strict sympify or raise a TypeError; this is just a wrapper to _sympify, raising a TypeError instead of a SympifyError.""" from sympy.utilities.misc import func_name try: return _sympify(expr) except SympifyError: raise TypeError( 'Argument must be a Basic object, not `%s`' % func_name( expr)) class Basic(Printable, metaclass=ManagedProperties): """ Base class for all SymPy objects. Notes and conventions ===================== 1) Always use ``.args``, when accessing parameters of some instance: >>> from sympy import cot >>> from sympy.abc import x, y >>> cot(x).args (x,) >>> cot(x).args[0] x >>> (x*y).args (x, y) >>> (x*y).args[1] y 2) Never use internal methods or variables (the ones prefixed with ``_``): >>> cot(x)._args # do not use this, use cot(x).args instead (x,) 3) By "SymPy object" we mean something that can be returned by ``sympify``. But not all objects one encounters using SymPy are subclasses of Basic. For example, mutable objects are not: >>> from sympy import Basic, Matrix, sympify >>> A = Matrix([[1, 2], [3, 4]]).as_mutable() >>> isinstance(A, Basic) False >>> B = sympify(A) >>> isinstance(B, Basic) True """ __slots__ = ('_mhash', # hash value '_args', # arguments '_assumptions' ) # To be overridden with True in the appropriate subclasses is_number = False is_Atom = False is_Symbol = False is_symbol = False is_Indexed = False is_Dummy = False is_Wild = False is_Function = False is_Add = False is_Mul = False is_Pow = False is_Number = False is_Float = False is_Rational = False is_Integer = False is_NumberSymbol = False is_Order = False is_Derivative = False is_Piecewise = False is_Poly = False is_AlgebraicNumber = False is_Relational = False is_Equality = False is_Boolean = False is_Not = False is_Matrix = False is_Vector = False is_Point = False is_MatAdd = False is_MatMul = False def __new__(cls, *args): obj = object.__new__(cls) obj._assumptions = cls.default_assumptions obj._mhash = None # will be set by __hash__ method. obj._args = args # all items in args must be Basic objects return obj def copy(self): return self.func(*self.args) def __reduce_ex__(self, proto): """ Pickling support.""" return type(self), self.__getnewargs__(), self.__getstate__() def __getnewargs__(self): return self.args def __getstate__(self): return {} def __setstate__(self, state): for k, v in state.items(): setattr(self, k, v) def __hash__(self): # hash cannot be cached using cache_it because infinite recurrence # occurs as hash is needed for setting cache dictionary keys h = self._mhash if h is None: h = hash((type(self).__name__,) + self._hashable_content()) self._mhash = h return h def _hashable_content(self): """Return a tuple of information about self that can be used to compute the hash. If a class defines additional attributes, like ``name`` in Symbol, then this method should be updated accordingly to return such relevant attributes. Defining more than _hashable_content is necessary if __eq__ has been defined by a class. See note about this in Basic.__eq__.""" return self._args @property def assumptions0(self): """ Return object `type` assumptions. For example: Symbol('x', real=True) Symbol('x', integer=True) are different objects. In other words, besides Python type (Symbol in this case), the initial assumptions are also forming their typeinfo. Examples ======== >>> from sympy import Symbol >>> from sympy.abc import x >>> x.assumptions0 {'commutative': True} >>> x = Symbol("x", positive=True) >>> x.assumptions0 {'commutative': True, 'complex': True, 'extended_negative': False, 'extended_nonnegative': True, 'extended_nonpositive': False, 'extended_nonzero': True, 'extended_positive': True, 'extended_real': True, 'finite': True, 'hermitian': True, 'imaginary': False, 'infinite': False, 'negative': False, 'nonnegative': True, 'nonpositive': False, 'nonzero': True, 'positive': True, 'real': True, 'zero': False} """ return {} def compare(self, other): """ Return -1, 0, 1 if the object is smaller, equal, or greater than other. Not in the mathematical sense. If the object is of a different type from the "other" then their classes are ordered according to the sorted_classes list. Examples ======== >>> from sympy.abc import x, y >>> x.compare(y) -1 >>> x.compare(x) 0 >>> y.compare(x) 1 """ # all redefinitions of __cmp__ method should start with the # following lines: if self is other: return 0 n1 = self.__class__ n2 = other.__class__ c = (n1 > n2) - (n1 < n2) if c: return c # st = self._hashable_content() ot = other._hashable_content() c = (len(st) > len(ot)) - (len(st) < len(ot)) if c: return c for l, r in zip(st, ot): l = Basic(*l) if isinstance(l, frozenset) else l r = Basic(*r) if isinstance(r, frozenset) else r if isinstance(l, Basic): c = l.compare(r) else: c = (l > r) - (l < r) if c: return c return 0 @staticmethod def _compare_pretty(a, b): from sympy.series.order import Order if isinstance(a, Order) and not isinstance(b, Order): return 1 if not isinstance(a, Order) and isinstance(b, Order): return -1 if a.is_Rational and b.is_Rational: l = a.p * b.q r = b.p * a.q return (l > r) - (l < r) else: from sympy.core.symbol import Wild p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3") r_a = a.match(p1 * p2**p3) if r_a and p3 in r_a: a3 = r_a[p3] r_b = b.match(p1 * p2**p3) if r_b and p3 in r_b: b3 = r_b[p3] c = Basic.compare(a3, b3) if c != 0: return c return Basic.compare(a, b) @classmethod def fromiter(cls, args, **assumptions): """ Create a new object from an iterable. This is a convenience function that allows one to create objects from any iterable, without having to convert to a list or tuple first. Examples ======== >>> from sympy import Tuple >>> Tuple.fromiter(i for i in range(5)) (0, 1, 2, 3, 4) """ return cls(*tuple(args), **assumptions) @classmethod def class_key(cls): """Nice order of classes. """ return 5, 0, cls.__name__ @cacheit def sort_key(self, order=None): """ Return a sort key. Examples ======== >>> from sympy.core import S, I >>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key()) [1/2, -I, I] >>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]") [x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)] >>> sorted(_, key=lambda x: x.sort_key()) [x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2] """ # XXX: remove this when issue 5169 is fixed def inner_key(arg): if isinstance(arg, Basic): return arg.sort_key(order) else: return arg args = self._sorted_args args = len(args), tuple([inner_key(arg) for arg in args]) return self.class_key(), args, S.One.sort_key(), S.One def __eq__(self, other): """Return a boolean indicating whether a == b on the basis of their symbolic trees. This is the same as a.compare(b) == 0 but faster. Notes ===== If a class that overrides __eq__() needs to retain the implementation of __hash__() from a parent class, the interpreter must be told this explicitly by setting __hash__ = <ParentClass>.__hash__. Otherwise the inheritance of __hash__() will be blocked, just as if __hash__ had been explicitly set to None. References ========== from http://docs.python.org/dev/reference/datamodel.html#object.__hash__ """ if self is other: return True tself = type(self) tother = type(other) if tself is not tother: try: other = _sympify(other) tother = type(other) except SympifyError: return NotImplemented # As long as we have the ordering of classes (sympy.core), # comparing types will be slow in Python 2, because it uses # __cmp__. Until we can remove it # (https://github.com/sympy/sympy/issues/4269), we only compare # types in Python 2 directly if they actually have __ne__. if type(tself).__ne__ is not type.__ne__: if tself != tother: return False elif tself is not tother: return False return self._hashable_content() == other._hashable_content() def __ne__(self, other): """``a != b`` -> Compare two symbolic trees and see whether they are different this is the same as: ``a.compare(b) != 0`` but faster """ return not self == other def dummy_eq(self, other, symbol=None): """ Compare two expressions and handle dummy symbols. Examples ======== >>> from sympy import Dummy >>> from sympy.abc import x, y >>> u = Dummy('u') >>> (u**2 + 1).dummy_eq(x**2 + 1) True >>> (u**2 + 1) == (x**2 + 1) False >>> (u**2 + y).dummy_eq(x**2 + y, x) True >>> (u**2 + y).dummy_eq(x**2 + y, y) False """ s = self.as_dummy() o = _sympify(other) o = o.as_dummy() dummy_symbols = [i for i in s.free_symbols if i.is_Dummy] if len(dummy_symbols) == 1: dummy = dummy_symbols.pop() else: return s == o if symbol is None: symbols = o.free_symbols if len(symbols) == 1: symbol = symbols.pop() else: return s == o tmp = dummy.__class__() return s.xreplace({dummy: tmp}) == o.xreplace({symbol: tmp}) def atoms(self, *types): """Returns the atoms that form the current object. By default, only objects that are truly atomic and can't be divided into smaller pieces are returned: symbols, numbers, and number symbols like I and pi. It is possible to request atoms of any type, however, as demonstrated below. Examples ======== >>> from sympy import I, pi, sin >>> from sympy.abc import x, y >>> (1 + x + 2*sin(y + I*pi)).atoms() {1, 2, I, pi, x, y} If one or more types are given, the results will contain only those types of atoms. >>> from sympy import Number, NumberSymbol, Symbol >>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol) {x, y} >>> (1 + x + 2*sin(y + I*pi)).atoms(Number) {1, 2} >>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol) {1, 2, pi} >>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I) {1, 2, I, pi} Note that I (imaginary unit) and zoo (complex infinity) are special types of number symbols and are not part of the NumberSymbol class. The type can be given implicitly, too: >>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol {x, y} Be careful to check your assumptions when using the implicit option since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all integers in an expression: >>> from sympy import S >>> (1 + x + 2*sin(y + I*pi)).atoms(S(1)) {1} >>> (1 + x + 2*sin(y + I*pi)).atoms(S(2)) {1, 2} Finally, arguments to atoms() can select more than atomic atoms: any sympy type (loaded in core/__init__.py) can be listed as an argument and those types of "atoms" as found in scanning the arguments of the expression recursively: >>> from sympy import Function, Mul >>> from sympy.core.function import AppliedUndef >>> f = Function('f') >>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function) {f(x), sin(y + I*pi)} >>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef) {f(x)} >>> (1 + x + 2*sin(y + I*pi)).atoms(Mul) {I*pi, 2*sin(y + I*pi)} """ if types: types = tuple( [t if isinstance(t, type) else type(t) for t in types]) nodes = preorder_traversal(self) if types: result = {node for node in nodes if isinstance(node, types)} else: result = {node for node in nodes if not node.args} return result @property def free_symbols(self): """Return from the atoms of self those which are free symbols. For most expressions, all symbols are free symbols. For some classes this is not true. e.g. Integrals use Symbols for the dummy variables which are bound variables, so Integral has a method to return all symbols except those. Derivative keeps track of symbols with respect to which it will perform a derivative; those are bound variables, too, so it has its own free_symbols method. Any other method that uses bound variables should implement a free_symbols method.""" return set().union(*[a.free_symbols for a in self.args]) @property def expr_free_symbols(self): return set() def as_dummy(self): """Return the expression with any objects having structurally bound symbols replaced with unique, canonical symbols within the object in which they appear and having only the default assumption for commutativity being True. When applied to a symbol a new symbol having only the same commutativity will be returned. Examples ======== >>> from sympy import Integral, Symbol >>> from sympy.abc import x >>> r = Symbol('r', real=True) >>> Integral(r, (r, x)).as_dummy() Integral(_0, (_0, x)) >>> _.variables[0].is_real is None True >>> r.as_dummy() _r Notes ===== Any object that has structurally bound variables should have a property, `bound_symbols` that returns those symbols appearing in the object. """ from sympy.core.symbol import Dummy, Symbol def can(x): # mask free that shadow bound free = x.free_symbols bound = set(x.bound_symbols) d = {i: Dummy() for i in bound & free} x = x.subs(d) # replace bound with canonical names x = x.xreplace(x.canonical_variables) # return after undoing masking return x.xreplace({v: k for k, v in d.items()}) if not self.has(Symbol): return self return self.replace( lambda x: hasattr(x, 'bound_symbols'), lambda x: can(x), simultaneous=False) @property def canonical_variables(self): """Return a dictionary mapping any variable defined in ``self.bound_symbols`` to Symbols that do not clash with any free symbols in the expression. Examples ======== >>> from sympy import Lambda >>> from sympy.abc import x >>> Lambda(x, 2*x).canonical_variables {x: _0} """ from sympy.utilities.iterables import numbered_symbols if not hasattr(self, 'bound_symbols'): return {} dums = numbered_symbols('_') reps = {} # watch out for free symbol that are not in bound symbols; # those that are in bound symbols are about to get changed bound = self.bound_symbols names = {i.name for i in self.free_symbols - set(bound)} for b in bound: d = next(dums) if b.is_Symbol: while d.name in names: d = next(dums) reps[b] = d return reps def rcall(self, *args): """Apply on the argument recursively through the expression tree. This method is used to simulate a common abuse of notation for operators. For instance in SymPy the the following will not work: ``(x+Lambda(y, 2*y))(z) == x+2*z``, however you can use >>> from sympy import Lambda >>> from sympy.abc import x, y, z >>> (x + Lambda(y, 2*y)).rcall(z) x + 2*z """ return Basic._recursive_call(self, args) @staticmethod def _recursive_call(expr_to_call, on_args): """Helper for rcall method.""" from sympy import Symbol def the_call_method_is_overridden(expr): for cls in getmro(type(expr)): if '__call__' in cls.__dict__: return cls != Basic if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call): if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is return expr_to_call # transformed into an UndefFunction else: return expr_to_call(*on_args) elif expr_to_call.args: args = [Basic._recursive_call( sub, on_args) for sub in expr_to_call.args] return type(expr_to_call)(*args) else: return expr_to_call def is_hypergeometric(self, k): from sympy.simplify import hypersimp from sympy.functions import Piecewise if self.has(Piecewise): return None return hypersimp(self, k) is not None @property def is_comparable(self): """Return True if self can be computed to a real number (or already is a real number) with precision, else False. Examples ======== >>> from sympy import exp_polar, pi, I >>> (I*exp_polar(I*pi/2)).is_comparable True >>> (I*exp_polar(I*pi*2)).is_comparable False A False result does not mean that `self` cannot be rewritten into a form that would be comparable. For example, the difference computed below is zero but without simplification it does not evaluate to a zero with precision: >>> e = 2**pi*(1 + 2**pi) >>> dif = e - e.expand() >>> dif.is_comparable False >>> dif.n(2)._prec 1 """ is_extended_real = self.is_extended_real if is_extended_real is False: return False if not self.is_number: return False # don't re-eval numbers that are already evaluated since # this will create spurious precision n, i = [p.evalf(2) if not p.is_Number else p for p in self.as_real_imag()] if not (i.is_Number and n.is_Number): return False if i: # if _prec = 1 we can't decide and if not, # the answer is False because numbers with # imaginary parts can't be compared # so return False return False else: return n._prec != 1 @property def func(self): """ The top-level function in an expression. The following should hold for all objects:: >> x == x.func(*x.args) Examples ======== >>> from sympy.abc import x >>> a = 2*x >>> a.func <class 'sympy.core.mul.Mul'> >>> a.args (2, x) >>> a.func(*a.args) 2*x >>> a == a.func(*a.args) True """ return self.__class__ @property def args(self): """Returns a tuple of arguments of 'self'. Examples ======== >>> from sympy import cot >>> from sympy.abc import x, y >>> cot(x).args (x,) >>> cot(x).args[0] x >>> (x*y).args (x, y) >>> (x*y).args[1] y Notes ===== Never use self._args, always use self.args. Only use _args in __new__ when creating a new function. Don't override .args() from Basic (so that it's easy to change the interface in the future if needed). """ return self._args @property def _sorted_args(self): """ The same as ``args``. Derived classes which don't fix an order on their arguments should override this method to produce the sorted representation. """ return self.args def as_content_primitive(self, radical=False, clear=True): """A stub to allow Basic args (like Tuple) to be skipped when computing the content and primitive components of an expression. See Also ======== sympy.core.expr.Expr.as_content_primitive """ return S.One, self def subs(self, *args, **kwargs): """ Substitutes old for new in an expression after sympifying args. `args` is either: - two arguments, e.g. foo.subs(old, new) - one iterable argument, e.g. foo.subs(iterable). The iterable may be o an iterable container with (old, new) pairs. In this case the replacements are processed in the order given with successive patterns possibly affecting replacements already made. o a dict or set whose key/value items correspond to old/new pairs. In this case the old/new pairs will be sorted by op count and in case of a tie, by number of args and the default_sort_key. The resulting sorted list is then processed as an iterable container (see previous). If the keyword ``simultaneous`` is True, the subexpressions will not be evaluated until all the substitutions have been made. Examples ======== >>> from sympy import pi, exp, limit, oo >>> from sympy.abc import x, y >>> (1 + x*y).subs(x, pi) pi*y + 1 >>> (1 + x*y).subs({x:pi, y:2}) 1 + 2*pi >>> (1 + x*y).subs([(x, pi), (y, 2)]) 1 + 2*pi >>> reps = [(y, x**2), (x, 2)] >>> (x + y).subs(reps) 6 >>> (x + y).subs(reversed(reps)) x**2 + 2 >>> (x**2 + x**4).subs(x**2, y) y**2 + y To replace only the x**2 but not the x**4, use xreplace: >>> (x**2 + x**4).xreplace({x**2: y}) x**4 + y To delay evaluation until all substitutions have been made, set the keyword ``simultaneous`` to True: >>> (x/y).subs([(x, 0), (y, 0)]) 0 >>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True) nan This has the added feature of not allowing subsequent substitutions to affect those already made: >>> ((x + y)/y).subs({x + y: y, y: x + y}) 1 >>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True) y/(x + y) In order to obtain a canonical result, unordered iterables are sorted by count_op length, number of arguments and by the default_sort_key to break any ties. All other iterables are left unsorted. >>> from sympy import sqrt, sin, cos >>> from sympy.abc import a, b, c, d, e >>> A = (sqrt(sin(2*x)), a) >>> B = (sin(2*x), b) >>> C = (cos(2*x), c) >>> D = (x, d) >>> E = (exp(x), e) >>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x) >>> expr.subs(dict([A, B, C, D, E])) a*c*sin(d*e) + b The resulting expression represents a literal replacement of the old arguments with the new arguments. This may not reflect the limiting behavior of the expression: >>> (x**3 - 3*x).subs({x: oo}) nan >>> limit(x**3 - 3*x, x, oo) oo If the substitution will be followed by numerical evaluation, it is better to pass the substitution to evalf as >>> (1/x).evalf(subs={x: 3.0}, n=21) 0.333333333333333333333 rather than >>> (1/x).subs({x: 3.0}).evalf(21) 0.333333333333333314830 as the former will ensure that the desired level of precision is obtained. See Also ======== replace: replacement capable of doing wildcard-like matching, parsing of match, and conditional replacements xreplace: exact node replacement in expr tree; also capable of using matching rules sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision """ from sympy.core.compatibility import _nodes, default_sort_key from sympy.core.containers import Dict from sympy.core.symbol import Dummy, Symbol from sympy.utilities.misc import filldedent unordered = False if len(args) == 1: sequence = args[0] if isinstance(sequence, set): unordered = True elif isinstance(sequence, (Dict, Mapping)): unordered = True sequence = sequence.items() elif not iterable(sequence): raise ValueError(filldedent(""" When a single argument is passed to subs it should be a dictionary of old: new pairs or an iterable of (old, new) tuples.""")) elif len(args) == 2: sequence = [args] else: raise ValueError("subs accepts either 1 or 2 arguments") sequence = list(sequence) for i, s in enumerate(sequence): if isinstance(s[0], str): # when old is a string we prefer Symbol s = Symbol(s[0]), s[1] try: s = [sympify(_, strict=not isinstance(_, (str, type))) for _ in s] except SympifyError: # if it can't be sympified, skip it sequence[i] = None continue # skip if there is no change sequence[i] = None if _aresame(*s) else tuple(s) sequence = list(filter(None, sequence)) if unordered: sequence = dict(sequence) # order so more complex items are first and items # of identical complexity are ordered so # f(x) < f(y) < x < y # \___ 2 __/ \_1_/ <- number of nodes # # For more complex ordering use an unordered sequence. k = list(ordered(sequence, default=False, keys=( lambda x: -_nodes(x), lambda x: default_sort_key(x), ))) sequence = [(k, sequence[k]) for k in k] if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs? reps = {} rv = self kwargs['hack2'] = True m = Dummy('subs_m') for old, new in sequence: com = new.is_commutative if com is None: com = True d = Dummy('subs_d', commutative=com) # using d*m so Subs will be used on dummy variables # in things like Derivative(f(x, y), x) in which x # is both free and bound rv = rv._subs(old, d*m, **kwargs) if not isinstance(rv, Basic): break reps[d] = new reps[m] = S.One # get rid of m return rv.xreplace(reps) else: rv = self for old, new in sequence: rv = rv._subs(old, new, **kwargs) if not isinstance(rv, Basic): break return rv @cacheit def _subs(self, old, new, **hints): """Substitutes an expression old -> new. If self is not equal to old then _eval_subs is called. If _eval_subs doesn't want to make any special replacement then a None is received which indicates that the fallback should be applied wherein a search for replacements is made amongst the arguments of self. >>> from sympy import Add >>> from sympy.abc import x, y, z Examples ======== Add's _eval_subs knows how to target x + y in the following so it makes the change: >>> (x + y + z).subs(x + y, 1) z + 1 Add's _eval_subs doesn't need to know how to find x + y in the following: >>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None True The returned None will cause the fallback routine to traverse the args and pass the z*(x + y) arg to Mul where the change will take place and the substitution will succeed: >>> (z*(x + y) + 3).subs(x + y, 1) z + 3 ** Developers Notes ** An _eval_subs routine for a class should be written if: 1) any arguments are not instances of Basic (e.g. bool, tuple); 2) some arguments should not be targeted (as in integration variables); 3) if there is something other than a literal replacement that should be attempted (as in Piecewise where the condition may be updated without doing a replacement). If it is overridden, here are some special cases that might arise: 1) If it turns out that no special change was made and all the original sub-arguments should be checked for replacements then None should be returned. 2) If it is necessary to do substitutions on a portion of the expression then _subs should be called. _subs will handle the case of any sub-expression being equal to old (which usually would not be the case) while its fallback will handle the recursion into the sub-arguments. For example, after Add's _eval_subs removes some matching terms it must process the remaining terms so it calls _subs on each of the un-matched terms and then adds them onto the terms previously obtained. 3) If the initial expression should remain unchanged then the original expression should be returned. (Whenever an expression is returned, modified or not, no further substitution of old -> new is attempted.) Sum's _eval_subs routine uses this strategy when a substitution is attempted on any of its summation variables. """ def fallback(self, old, new): """ Try to replace old with new in any of self's arguments. """ hit = False args = list(self.args) for i, arg in enumerate(args): if not hasattr(arg, '_eval_subs'): continue arg = arg._subs(old, new, **hints) if not _aresame(arg, args[i]): hit = True args[i] = arg if hit: rv = self.func(*args) hack2 = hints.get('hack2', False) if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack coeff = S.One nonnumber = [] for i in args: if i.is_Number: coeff *= i else: nonnumber.append(i) nonnumber = self.func(*nonnumber) if coeff is S.One: return nonnumber else: return self.func(coeff, nonnumber, evaluate=False) return rv return self if _aresame(self, old): return new rv = self._eval_subs(old, new) if rv is None: rv = fallback(self, old, new) return rv def _eval_subs(self, old, new): """Override this stub if you want to do anything more than attempt a replacement of old with new in the arguments of self. See also ======== _subs """ return None def xreplace(self, rule): """ Replace occurrences of objects within the expression. Parameters ========== rule : dict-like Expresses a replacement rule Returns ======= xreplace : the result of the replacement Examples ======== >>> from sympy import symbols, pi, exp >>> x, y, z = symbols('x y z') >>> (1 + x*y).xreplace({x: pi}) pi*y + 1 >>> (1 + x*y).xreplace({x: pi, y: 2}) 1 + 2*pi Replacements occur only if an entire node in the expression tree is matched: >>> (x*y + z).xreplace({x*y: pi}) z + pi >>> (x*y*z).xreplace({x*y: pi}) x*y*z >>> (2*x).xreplace({2*x: y, x: z}) y >>> (2*2*x).xreplace({2*x: y, x: z}) 4*z >>> (x + y + 2).xreplace({x + y: 2}) x + y + 2 >>> (x + 2 + exp(x + 2)).xreplace({x + 2: y}) x + exp(y) + 2 xreplace doesn't differentiate between free and bound symbols. In the following, subs(x, y) would not change x since it is a bound symbol, but xreplace does: >>> from sympy import Integral >>> Integral(x, (x, 1, 2*x)).xreplace({x: y}) Integral(y, (y, 1, 2*y)) Trying to replace x with an expression raises an error: >>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP ValueError: Invalid limits given: ((2*y, 1, 4*y),) See Also ======== replace: replacement capable of doing wildcard-like matching, parsing of match, and conditional replacements subs: substitution of subexpressions as defined by the objects themselves. """ value, _ = self._xreplace(rule) return value def _xreplace(self, rule): """ Helper for xreplace. Tracks whether a replacement actually occurred. """ if self in rule: return rule[self], True elif rule: args = [] changed = False for a in self.args: _xreplace = getattr(a, '_xreplace', None) if _xreplace is not None: a_xr = _xreplace(rule) args.append(a_xr[0]) changed |= a_xr[1] else: args.append(a) args = tuple(args) if changed: return self.func(*args), True return self, False @cacheit def has(self, *patterns): """ Test whether any subexpression matches any of the patterns. Examples ======== >>> from sympy import sin >>> from sympy.abc import x, y, z >>> (x**2 + sin(x*y)).has(z) False >>> (x**2 + sin(x*y)).has(x, y, z) True >>> x.has(x) True Note ``has`` is a structural algorithm with no knowledge of mathematics. Consider the following half-open interval: >>> from sympy.sets import Interval >>> i = Interval.Lopen(0, 5); i Interval.Lopen(0, 5) >>> i.args (0, 5, True, False) >>> i.has(4) # there is no "4" in the arguments False >>> i.has(0) # there *is* a "0" in the arguments True Instead, use ``contains`` to determine whether a number is in the interval or not: >>> i.contains(4) True >>> i.contains(0) False Note that ``expr.has(*patterns)`` is exactly equivalent to ``any(expr.has(p) for p in patterns)``. In particular, ``False`` is returned when the list of patterns is empty. >>> x.has() False """ return any(self._has(pattern) for pattern in patterns) def _has(self, pattern): """Helper for .has()""" from sympy.core.function import UndefinedFunction, Function if isinstance(pattern, UndefinedFunction): return any(f.func == pattern or f == pattern for f in self.atoms(Function, UndefinedFunction)) if isinstance(pattern, BasicMeta): subtrees = preorder_traversal(self) return any(isinstance(arg, pattern) for arg in subtrees) pattern = _sympify(pattern) _has_matcher = getattr(pattern, '_has_matcher', None) if _has_matcher is not None: match = _has_matcher() return any(match(arg) for arg in preorder_traversal(self)) else: return any(arg == pattern for arg in preorder_traversal(self)) def _has_matcher(self): """Helper for .has()""" return lambda other: self == other def replace(self, query, value, map=False, simultaneous=True, exact=None): """ Replace matching subexpressions of ``self`` with ``value``. If ``map = True`` then also return the mapping {old: new} where ``old`` was a sub-expression found with query and ``new`` is the replacement value for it. If the expression itself doesn't match the query, then the returned value will be ``self.xreplace(map)`` otherwise it should be ``self.subs(ordered(map.items()))``. Traverses an expression tree and performs replacement of matching subexpressions from the bottom to the top of the tree. The default approach is to do the replacement in a simultaneous fashion so changes made are targeted only once. If this is not desired or causes problems, ``simultaneous`` can be set to False. In addition, if an expression containing more than one Wild symbol is being used to match subexpressions and the ``exact`` flag is None it will be set to True so the match will only succeed if all non-zero values are received for each Wild that appears in the match pattern. Setting this to False accepts a match of 0; while setting it True accepts all matches that have a 0 in them. See example below for cautions. The list of possible combinations of queries and replacement values is listed below: Examples ======== Initial setup >>> from sympy import log, sin, cos, tan, Wild, Mul, Add >>> from sympy.abc import x, y >>> f = log(sin(x)) + tan(sin(x**2)) 1.1. type -> type obj.replace(type, newtype) When object of type ``type`` is found, replace it with the result of passing its argument(s) to ``newtype``. >>> f.replace(sin, cos) log(cos(x)) + tan(cos(x**2)) >>> sin(x).replace(sin, cos, map=True) (cos(x), {sin(x): cos(x)}) >>> (x*y).replace(Mul, Add) x + y 1.2. type -> func obj.replace(type, func) When object of type ``type`` is found, apply ``func`` to its argument(s). ``func`` must be written to handle the number of arguments of ``type``. >>> f.replace(sin, lambda arg: sin(2*arg)) log(sin(2*x)) + tan(sin(2*x**2)) >>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args))) sin(2*x*y) 2.1. pattern -> expr obj.replace(pattern(wild), expr(wild)) Replace subexpressions matching ``pattern`` with the expression written in terms of the Wild symbols in ``pattern``. >>> a, b = map(Wild, 'ab') >>> f.replace(sin(a), tan(a)) log(tan(x)) + tan(tan(x**2)) >>> f.replace(sin(a), tan(a/2)) log(tan(x/2)) + tan(tan(x**2/2)) >>> f.replace(sin(a), a) log(x) + tan(x**2) >>> (x*y).replace(a*x, a) y Matching is exact by default when more than one Wild symbol is used: matching fails unless the match gives non-zero values for all Wild symbols: >>> (2*x + y).replace(a*x + b, b - a) y - 2 >>> (2*x).replace(a*x + b, b - a) 2*x When set to False, the results may be non-intuitive: >>> (2*x).replace(a*x + b, b - a, exact=False) 2/x 2.2. pattern -> func obj.replace(pattern(wild), lambda wild: expr(wild)) All behavior is the same as in 2.1 but now a function in terms of pattern variables is used rather than an expression: >>> f.replace(sin(a), lambda a: sin(2*a)) log(sin(2*x)) + tan(sin(2*x**2)) 3.1. func -> func obj.replace(filter, func) Replace subexpression ``e`` with ``func(e)`` if ``filter(e)`` is True. >>> g = 2*sin(x**3) >>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2) 4*sin(x**9) The expression itself is also targeted by the query but is done in such a fashion that changes are not made twice. >>> e = x*(x*y + 1) >>> e.replace(lambda x: x.is_Mul, lambda x: 2*x) 2*x*(2*x*y + 1) When matching a single symbol, `exact` will default to True, but this may or may not be the behavior that is desired: Here, we want `exact=False`: >>> from sympy import Function >>> f = Function('f') >>> e = f(1) + f(0) >>> q = f(a), lambda a: f(a + 1) >>> e.replace(*q, exact=False) f(1) + f(2) >>> e.replace(*q, exact=True) f(0) + f(2) But here, the nature of matching makes selecting the right setting tricky: >>> e = x**(1 + y) >>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False) x >>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True) x**(-x - y + 1) >>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False) x >>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True) x**(1 - y) It is probably better to use a different form of the query that describes the target expression more precisely: >>> (1 + x**(1 + y)).replace( ... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1, ... lambda x: x.base**(1 - (x.exp - 1))) ... x**(1 - y) + 1 See Also ======== subs: substitution of subexpressions as defined by the objects themselves. xreplace: exact node replacement in expr tree; also capable of using matching rules """ from sympy.core.symbol import Wild try: query = _sympify(query) except SympifyError: pass try: value = _sympify(value) except SympifyError: pass if isinstance(query, type): _query = lambda expr: isinstance(expr, query) if isinstance(value, type): _value = lambda expr, result: value(*expr.args) elif callable(value): _value = lambda expr, result: value(*expr.args) else: raise TypeError( "given a type, replace() expects another " "type or a callable") elif isinstance(query, Basic): _query = lambda expr: expr.match(query) if exact is None: exact = (len(query.atoms(Wild)) > 1) if isinstance(value, Basic): if exact: _value = lambda expr, result: (value.subs(result) if all(result.values()) else expr) else: _value = lambda expr, result: value.subs(result) elif callable(value): # match dictionary keys get the trailing underscore stripped # from them and are then passed as keywords to the callable; # if ``exact`` is True, only accept match if there are no null # values amongst those matched. if exact: _value = lambda expr, result: (value(** {str(k)[:-1]: v for k, v in result.items()}) if all(val for val in result.values()) else expr) else: _value = lambda expr, result: value(** {str(k)[:-1]: v for k, v in result.items()}) else: raise TypeError( "given an expression, replace() expects " "another expression or a callable") elif callable(query): _query = query if callable(value): _value = lambda expr, result: value(expr) else: raise TypeError( "given a callable, replace() expects " "another callable") else: raise TypeError( "first argument to replace() must be a " "type, an expression or a callable") def walk(rv, F): """Apply ``F`` to args and then to result. """ args = getattr(rv, 'args', None) if args is not None: if args: newargs = tuple([walk(a, F) for a in args]) if args != newargs: rv = rv.func(*newargs) if simultaneous: # if rv is something that was already # matched (that was changed) then skip # applying F again for i, e in enumerate(args): if rv == e and e != newargs[i]: return rv rv = F(rv) return rv mapping = {} # changes that took place def rec_replace(expr): result = _query(expr) if result or result == {}: v = _value(expr, result) if v is not None and v != expr: if map: mapping[expr] = v expr = v return expr rv = walk(self, rec_replace) return (rv, mapping) if map else rv def find(self, query, group=False): """Find all subexpressions matching a query. """ query = _make_find_query(query) results = list(filter(query, preorder_traversal(self))) if not group: return set(results) else: groups = {} for result in results: if result in groups: groups[result] += 1 else: groups[result] = 1 return groups def count(self, query): """Count the number of matching subexpressions. """ query = _make_find_query(query) return sum(bool(query(sub)) for sub in preorder_traversal(self)) def matches(self, expr, repl_dict={}, old=False): """ Helper method for match() that looks for a match between Wild symbols in self and expressions in expr. Examples ======== >>> from sympy import symbols, Wild, Basic >>> a, b, c = symbols('a b c') >>> x = Wild('x') >>> Basic(a + x, x).matches(Basic(a + b, c)) is None True >>> Basic(a + x, x).matches(Basic(a + b + c, b + c)) {x_: b + c} """ repl_dict = repl_dict.copy() expr = sympify(expr) if not isinstance(expr, self.__class__): return None if self == expr: return repl_dict if len(self.args) != len(expr.args): return None d = repl_dict.copy() for arg, other_arg in zip(self.args, expr.args): if arg == other_arg: continue d = arg.xreplace(d).matches(other_arg, d, old=old) if d is None: return None return d def match(self, pattern, old=False): """ Pattern matching. Wild symbols match all. Return ``None`` when expression (self) does not match with pattern. Otherwise return a dictionary such that:: pattern.xreplace(self.match(pattern)) == self Examples ======== >>> from sympy import Wild, Sum >>> from sympy.abc import x, y >>> p = Wild("p") >>> q = Wild("q") >>> r = Wild("r") >>> e = (x+y)**(x+y) >>> e.match(p**p) {p_: x + y} >>> e.match(p**q) {p_: x + y, q_: x + y} >>> e = (2*x)**2 >>> e.match(p*q**r) {p_: 4, q_: x, r_: 2} >>> (p*q**r).xreplace(e.match(p*q**r)) 4*x**2 Structurally bound symbols are ignored during matching: >>> Sum(x, (x, 1, 2)).match(Sum(y, (y, 1, p))) {p_: 2} But they can be identified if desired: >>> Sum(x, (x, 1, 2)).match(Sum(q, (q, 1, p))) {p_: 2, q_: x} The ``old`` flag will give the old-style pattern matching where expressions and patterns are essentially solved to give the match. Both of the following give None unless ``old=True``: >>> (x - 2).match(p - x, old=True) {p_: 2*x - 2} >>> (2/x).match(p*x, old=True) {p_: 2/x**2} """ from sympy.core.symbol import Wild from sympy.core.function import WildFunction from sympy.utilities.misc import filldedent pattern = sympify(pattern) # match non-bound symbols canonical = lambda x: x if x.is_Symbol else x.as_dummy() m = canonical(pattern).matches(canonical(self), old=old) if m is None: return m wild = pattern.atoms(Wild, WildFunction) # sanity check if set(m) - wild: raise ValueError(filldedent(''' Some `matches` routine did not use a copy of repl_dict and injected unexpected symbols. Report this as an error at https://github.com/sympy/sympy/issues''')) # now see if bound symbols were requested bwild = wild - set(m) if not bwild: return m # replace free-Wild symbols in pattern with match result # so they will match but not be in the next match wpat = pattern.xreplace(m) # identify remaining bound wild w = wpat.matches(self, old=old) # add them to m if w: m.update(w) # done return m def count_ops(self, visual=None): """wrapper for count_ops that returns the operation count.""" from sympy import count_ops return count_ops(self, visual) def doit(self, **hints): """Evaluate objects that are not evaluated by default like limits, integrals, sums and products. All objects of this kind will be evaluated recursively, unless some species were excluded via 'hints' or unless the 'deep' hint was set to 'False'. >>> from sympy import Integral >>> from sympy.abc import x >>> 2*Integral(x, x) 2*Integral(x, x) >>> (2*Integral(x, x)).doit() x**2 >>> (2*Integral(x, x)).doit(deep=False) 2*Integral(x, x) """ if hints.get('deep', True): terms = [term.doit(**hints) if isinstance(term, Basic) else term for term in self.args] return self.func(*terms) else: return self def simplify(self, **kwargs): """See the simplify function in sympy.simplify""" from sympy.simplify import simplify return simplify(self, **kwargs) def _eval_rewrite(self, pattern, rule, **hints): if self.is_Atom: if hasattr(self, rule): return getattr(self, rule)() return self if hints.get('deep', True): args = [a._eval_rewrite(pattern, rule, **hints) if isinstance(a, Basic) else a for a in self.args] else: args = self.args if pattern is None or isinstance(self, pattern): if hasattr(self, rule): rewritten = getattr(self, rule)(*args, **hints) if rewritten is not None: return rewritten return self.func(*args) if hints.get('evaluate', True) else self def _eval_derivative_n_times(self, s, n): # This is the default evaluator for derivatives (as called by `diff` # and `Derivative`), it will attempt a loop to derive the expression # `n` times by calling the corresponding `_eval_derivative` method, # while leaving the derivative unevaluated if `n` is symbolic. This # method should be overridden if the object has a closed form for its # symbolic n-th derivative. from sympy import Integer if isinstance(n, (int, Integer)): obj = self for i in range(n): obj2 = obj._eval_derivative(s) if obj == obj2 or obj2 is None: break obj = obj2 return obj2 else: return None def rewrite(self, *args, **hints): """ Rewrite functions in terms of other functions. Rewrites expression containing applications of functions of one kind in terms of functions of different kind. For example you can rewrite trigonometric functions as complex exponentials or combinatorial functions as gamma function. As a pattern this function accepts a list of functions to to rewrite (instances of DefinedFunction class). As rule you can use string or a destination function instance (in this case rewrite() will use the str() function). There is also the possibility to pass hints on how to rewrite the given expressions. For now there is only one such hint defined called 'deep'. When 'deep' is set to False it will forbid functions to rewrite their contents. Examples ======== >>> from sympy import sin, exp >>> from sympy.abc import x Unspecified pattern: >>> sin(x).rewrite(exp) -I*(exp(I*x) - exp(-I*x))/2 Pattern as a single function: >>> sin(x).rewrite(sin, exp) -I*(exp(I*x) - exp(-I*x))/2 Pattern as a list of functions: >>> sin(x).rewrite([sin, ], exp) -I*(exp(I*x) - exp(-I*x))/2 """ if not args: return self else: pattern = args[:-1] if isinstance(args[-1], str): rule = '_eval_rewrite_as_' + args[-1] else: # rewrite arg is usually a class but can also be a # singleton (e.g. GoldenRatio) so we check # __name__ or __class__.__name__ clsname = getattr(args[-1], "__name__", None) if clsname is None: clsname = args[-1].__class__.__name__ rule = '_eval_rewrite_as_' + clsname if not pattern: return self._eval_rewrite(None, rule, **hints) else: if iterable(pattern[0]): pattern = pattern[0] pattern = [p for p in pattern if self.has(p)] if pattern: return self._eval_rewrite(tuple(pattern), rule, **hints) else: return self _constructor_postprocessor_mapping = {} # type: ignore @classmethod def _exec_constructor_postprocessors(cls, obj): # WARNING: This API is experimental. # This is an experimental API that introduces constructor # postprosessors for SymPy Core elements. If an argument of a SymPy # expression has a `_constructor_postprocessor_mapping` attribute, it will # be interpreted as a dictionary containing lists of postprocessing # functions for matching expression node names. clsname = obj.__class__.__name__ postprocessors = defaultdict(list) for i in obj.args: try: postprocessor_mappings = ( Basic._constructor_postprocessor_mapping[cls].items() for cls in type(i).mro() if cls in Basic._constructor_postprocessor_mapping ) for k, v in chain.from_iterable(postprocessor_mappings): postprocessors[k].extend([j for j in v if j not in postprocessors[k]]) except TypeError: pass for f in postprocessors.get(clsname, []): obj = f(obj) return obj class Atom(Basic): """ A parent class for atomic things. An atom is an expression with no subexpressions. Examples ======== Symbol, Number, Rational, Integer, ... But not: Add, Mul, Pow, ... """ is_Atom = True __slots__ = () def matches(self, expr, repl_dict={}, old=False): if self == expr: return repl_dict.copy() def xreplace(self, rule, hack2=False): return rule.get(self, self) def doit(self, **hints): return self @classmethod def class_key(cls): return 2, 0, cls.__name__ @cacheit def sort_key(self, order=None): return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One def _eval_simplify(self, **kwargs): return self @property def _sorted_args(self): # this is here as a safeguard against accidentally using _sorted_args # on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args) # since there are no args. So the calling routine should be checking # to see that this property is not called for Atoms. raise AttributeError('Atoms have no args. It might be necessary' ' to make a check for Atoms in the calling code.') def _aresame(a, b): """Return True if a and b are structurally the same, else False. Examples ======== In SymPy (as in Python) two numbers compare the same if they have the same underlying base-2 representation even though they may not be the same type: >>> from sympy import S >>> 2.0 == S(2) True >>> 0.5 == S.Half True This routine was written to provide a query for such cases that would give false when the types do not match: >>> from sympy.core.basic import _aresame >>> _aresame(S(2.0), S(2)) False """ from .numbers import Number from .function import AppliedUndef, UndefinedFunction as UndefFunc if isinstance(a, Number) and isinstance(b, Number): return a == b and a.__class__ == b.__class__ for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)): if i != j or type(i) != type(j): if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or (isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))): if i.class_key() != j.class_key(): return False else: return False return True def _atomic(e, recursive=False): """Return atom-like quantities as far as substitution is concerned: Derivatives, Functions and Symbols. Don't return any 'atoms' that are inside such quantities unless they also appear outside, too, unless `recursive` is True. Examples ======== >>> from sympy import Derivative, Function, cos >>> from sympy.abc import x, y >>> from sympy.core.basic import _atomic >>> f = Function('f') >>> _atomic(x + y) {x, y} >>> _atomic(x + f(y)) {x, f(y)} >>> _atomic(Derivative(f(x), x) + cos(x) + y) {y, cos(x), Derivative(f(x), x)} """ from sympy import Derivative, Function, Symbol pot = preorder_traversal(e) seen = set() if isinstance(e, Basic): free = getattr(e, "free_symbols", None) if free is None: return {e} else: return set() atoms = set() for p in pot: if p in seen: pot.skip() continue seen.add(p) if isinstance(p, Symbol) and p in free: atoms.add(p) elif isinstance(p, (Derivative, Function)): if not recursive: pot.skip() atoms.add(p) return atoms class preorder_traversal: """ Do a pre-order traversal of a tree. This iterator recursively yields nodes that it has visited in a pre-order fashion. That is, it yields the current node then descends through the tree breadth-first to yield all of a node's children's pre-order traversal. For an expression, the order of the traversal depends on the order of .args, which in many cases can be arbitrary. Parameters ========== node : sympy expression The expression to traverse. keys : (default None) sort key(s) The key(s) used to sort args of Basic objects. When None, args of Basic objects are processed in arbitrary order. If key is defined, it will be passed along to ordered() as the only key(s) to use to sort the arguments; if ``key`` is simply True then the default keys of ordered will be used. Yields ====== subtree : sympy expression All of the subtrees in the tree. Examples ======== >>> from sympy import symbols >>> from sympy.core.basic import preorder_traversal >>> x, y, z = symbols('x y z') The nodes are returned in the order that they are encountered unless key is given; simply passing key=True will guarantee that the traversal is unique. >>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP [z*(x + y), z, x + y, y, x] >>> list(preorder_traversal((x + y)*z, keys=True)) [z*(x + y), z, x + y, x, y] """ def __init__(self, node, keys=None): self._skip_flag = False self._pt = self._preorder_traversal(node, keys) def _preorder_traversal(self, node, keys): yield node if self._skip_flag: self._skip_flag = False return if isinstance(node, Basic): if not keys and hasattr(node, '_argset'): # LatticeOp keeps args as a set. We should use this if we # don't care about the order, to prevent unnecessary sorting. args = node._argset else: args = node.args if keys: if keys != True: args = ordered(args, keys, default=False) else: args = ordered(args) for arg in args: yield from self._preorder_traversal(arg, keys) elif iterable(node): for item in node: yield from self._preorder_traversal(item, keys) def skip(self): """ Skip yielding current node's (last yielded node's) subtrees. Examples ======== >>> from sympy.core import symbols >>> from sympy.core.basic import preorder_traversal >>> x, y, z = symbols('x y z') >>> pt = preorder_traversal((x+y*z)*z) >>> for i in pt: ... print(i) ... if i == x+y*z: ... pt.skip() z*(x + y*z) z x + y*z """ self._skip_flag = True def __next__(self): return next(self._pt) def __iter__(self): return self def _make_find_query(query): """Convert the argument of Basic.find() into a callable""" try: query = _sympify(query) except SympifyError: pass if isinstance(query, type): return lambda expr: isinstance(expr, query) elif isinstance(query, Basic): return lambda expr: expr.match(query) is not None return query
aaccb7404eaf04de498204d56867f240ab42177d2ecfaeffa92973070bd1dff4
from collections import defaultdict from functools import cmp_to_key, reduce from .basic import Basic from .compatibility import is_sequence from .parameters import global_parameters from .logic import _fuzzy_group, fuzzy_or, fuzzy_not from .singleton import S from .operations import AssocOp, AssocOpDispatcher from .cache import cacheit from .numbers import ilcm, igcd from .expr import Expr # Key for sorting commutative args in canonical order _args_sortkey = cmp_to_key(Basic.compare) def _addsort(args): # in-place sorting of args args.sort(key=_args_sortkey) def _unevaluated_Add(*args): """Return a well-formed unevaluated Add: Numbers are collected and put in slot 0 and args are sorted. Use this when args have changed but you still want to return an unevaluated Add. Examples ======== >>> from sympy.core.add import _unevaluated_Add as uAdd >>> from sympy import S, Add >>> from sympy.abc import x, y >>> a = uAdd(*[S(1.0), x, S(2)]) >>> a.args[0] 3.00000000000000 >>> a.args[1] x Beyond the Number being in slot 0, there is no other assurance of order for the arguments since they are hash sorted. So, for testing purposes, output produced by this in some other function can only be tested against the output of this function or as one of several options: >>> opts = (Add(x, y, evaluate=False), Add(y, x, evaluate=False)) >>> a = uAdd(x, y) >>> assert a in opts and a == uAdd(x, y) >>> uAdd(x + 1, x + 2) x + x + 3 """ args = list(args) newargs = [] co = S.Zero while args: a = args.pop() if a.is_Add: # this will keep nesting from building up # so that x + (x + 1) -> x + x + 1 (3 args) args.extend(a.args) elif a.is_Number: co += a else: newargs.append(a) _addsort(newargs) if co: newargs.insert(0, co) return Add._from_args(newargs) class Add(Expr, AssocOp): __slots__ = () is_Add = True _args_type = Expr @classmethod def flatten(cls, seq): """ Takes the sequence "seq" of nested Adds and returns a flatten list. Returns: (commutative_part, noncommutative_part, order_symbols) Applies associativity, all terms are commutable with respect to addition. NB: the removal of 0 is already handled by AssocOp.__new__ See also ======== sympy.core.mul.Mul.flatten """ from sympy.calculus.util import AccumBounds from sympy.matrices.expressions import MatrixExpr from sympy.tensor.tensor import TensExpr rv = None if len(seq) == 2: a, b = seq if b.is_Rational: a, b = b, a if a.is_Rational: if b.is_Mul: rv = [a, b], [], None if rv: if all(s.is_commutative for s in rv[0]): return rv return [], rv[0], None terms = {} # term -> coeff # e.g. x**2 -> 5 for ... + 5*x**2 + ... coeff = S.Zero # coefficient (Number or zoo) to always be in slot 0 # e.g. 3 + ... order_factors = [] extra = [] for o in seq: # O(x) if o.is_Order: if o.expr.is_zero: continue for o1 in order_factors: if o1.contains(o): o = None break if o is None: continue order_factors = [o] + [ o1 for o1 in order_factors if not o.contains(o1)] continue # 3 or NaN elif o.is_Number: if (o is S.NaN or coeff is S.ComplexInfinity and o.is_finite is False) and not extra: # we know for sure the result will be nan return [S.NaN], [], None if coeff.is_Number or isinstance(coeff, AccumBounds): coeff += o if coeff is S.NaN and not extra: # we know for sure the result will be nan return [S.NaN], [], None continue elif isinstance(o, AccumBounds): coeff = o.__add__(coeff) continue elif isinstance(o, MatrixExpr): # can't add 0 to Matrix so make sure coeff is not 0 extra.append(o) continue elif isinstance(o, TensExpr): coeff = o.__add__(coeff) if coeff else o continue elif o is S.ComplexInfinity: if coeff.is_finite is False and not extra: # we know for sure the result will be nan return [S.NaN], [], None coeff = S.ComplexInfinity continue # Add([...]) elif o.is_Add: # NB: here we assume Add is always commutative seq.extend(o.args) # TODO zerocopy? continue # Mul([...]) elif o.is_Mul: c, s = o.as_coeff_Mul() # check for unevaluated Pow, e.g. 2**3 or 2**(-1/2) elif o.is_Pow: b, e = o.as_base_exp() if b.is_Number and (e.is_Integer or (e.is_Rational and e.is_negative)): seq.append(b**e) continue c, s = S.One, o else: # everything else c = S.One s = o # now we have: # o = c*s, where # # c is a Number # s is an expression with number factor extracted # let's collect terms with the same s, so e.g. # 2*x**2 + 3*x**2 -> 5*x**2 if s in terms: terms[s] += c if terms[s] is S.NaN and not extra: # we know for sure the result will be nan return [S.NaN], [], None else: terms[s] = c # now let's construct new args: # [2*x**2, x**3, 7*x**4, pi, ...] newseq = [] noncommutative = False for s, c in terms.items(): # 0*s if c.is_zero: continue # 1*s elif c is S.One: newseq.append(s) # c*s else: if s.is_Mul: # Mul, already keeps its arguments in perfect order. # so we can simply put c in slot0 and go the fast way. cs = s._new_rawargs(*((c,) + s.args)) newseq.append(cs) elif s.is_Add: # we just re-create the unevaluated Mul newseq.append(Mul(c, s, evaluate=False)) else: # alternatively we have to call all Mul's machinery (slow) newseq.append(Mul(c, s)) noncommutative = noncommutative or not s.is_commutative # oo, -oo if coeff is S.Infinity: newseq = [f for f in newseq if not (f.is_extended_nonnegative or f.is_real)] elif coeff is S.NegativeInfinity: newseq = [f for f in newseq if not (f.is_extended_nonpositive or f.is_real)] if coeff is S.ComplexInfinity: # zoo might be # infinite_real + finite_im # finite_real + infinite_im # infinite_real + infinite_im # addition of a finite real or imaginary number won't be able to # change the zoo nature; adding an infinite qualtity would result # in a NaN condition if it had sign opposite of the infinite # portion of zoo, e.g., infinite_real - infinite_real. newseq = [c for c in newseq if not (c.is_finite and c.is_extended_real is not None)] # process O(x) if order_factors: newseq2 = [] for t in newseq: for o in order_factors: # x + O(x) -> O(x) if o.contains(t): t = None break # x + O(x**2) -> x + O(x**2) if t is not None: newseq2.append(t) newseq = newseq2 + order_factors # 1 + O(1) -> O(1) for o in order_factors: if o.contains(coeff): coeff = S.Zero break # order args canonically _addsort(newseq) # current code expects coeff to be first if coeff is not S.Zero: newseq.insert(0, coeff) if extra: newseq += extra noncommutative = True # we are done if noncommutative: return [], newseq, None else: return newseq, [], None @classmethod def class_key(cls): """Nice order of classes""" return 3, 1, cls.__name__ def as_coefficients_dict(a): """Return a dictionary mapping terms to their Rational coefficient. Since the dictionary is a defaultdict, inquiries about terms which were not present will return a coefficient of 0. If an expression is not an Add it is considered to have a single term. Examples ======== >>> from sympy.abc import a, x >>> (3*x + a*x + 4).as_coefficients_dict() {1: 4, x: 3, a*x: 1} >>> _[a] 0 >>> (3*a*x).as_coefficients_dict() {a*x: 3} """ d = defaultdict(list) for ai in a.args: c, m = ai.as_coeff_Mul() d[m].append(c) for k, v in d.items(): if len(v) == 1: d[k] = v[0] else: d[k] = Add(*v) di = defaultdict(int) di.update(d) return di @cacheit def as_coeff_add(self, *deps): """ Returns a tuple (coeff, args) where self is treated as an Add and coeff is the Number term and args is a tuple of all other terms. Examples ======== >>> from sympy.abc import x >>> (7 + 3*x).as_coeff_add() (7, (3*x,)) >>> (7*x).as_coeff_add() (0, (7*x,)) """ if deps: from sympy.utilities.iterables import sift l1, l2 = sift(self.args, lambda x: x.has(*deps), binary=True) return self._new_rawargs(*l2), tuple(l1) coeff, notrat = self.args[0].as_coeff_add() if coeff is not S.Zero: return coeff, notrat + self.args[1:] return S.Zero, self.args def as_coeff_Add(self, rational=False, deps=None): """ Efficiently extract the coefficient of a summation. """ coeff, args = self.args[0], self.args[1:] if coeff.is_Number and not rational or coeff.is_Rational: return coeff, self._new_rawargs(*args) return S.Zero, self # Note, we intentionally do not implement Add.as_coeff_mul(). Rather, we # let Expr.as_coeff_mul() just always return (S.One, self) for an Add. See # issue 5524. def _eval_power(self, e): if e.is_Rational and self.is_number: from sympy.core.evalf import pure_complex from sympy.core.mul import _unevaluated_Mul from sympy.core.exprtools import factor_terms from sympy.core.function import expand_multinomial from sympy.functions.elementary.complexes import sign from sympy.functions.elementary.miscellaneous import sqrt ri = pure_complex(self) if ri: r, i = ri if e.q == 2: D = sqrt(r**2 + i**2) if D.is_Rational: # (r, i, D) is a Pythagorean triple root = sqrt(factor_terms((D - r)/2))**e.p return root*expand_multinomial(( # principle value (D + r)/abs(i) + sign(i)*S.ImaginaryUnit)**e.p) elif e == -1: return _unevaluated_Mul( r - i*S.ImaginaryUnit, 1/(r**2 + i**2)) elif e.is_Number and abs(e) != 1: # handle the Float case: (2.0 + 4*x)**e -> 4**e*(0.5 + x)**e c, m = zip(*[i.as_coeff_Mul() for i in self.args]) if any(i.is_Float for i in c): # XXX should this always be done? big = -1 for i in c: if abs(i) >= big: big = abs(i) if big > 0 and big != 1: from sympy.functions.elementary.complexes import sign bigs = (big, -big) c = [sign(i) if i in bigs else i/big for i in c] addpow = Add(*[c*m for c, m in zip(c, m)])**e return big**e*addpow @cacheit def _eval_derivative(self, s): return self.func(*[a.diff(s) for a in self.args]) def _eval_nseries(self, x, n, logx, cdir=0): terms = [t.nseries(x, n=n, logx=logx, cdir=cdir) for t in self.args] return self.func(*terms) def _matches_simple(self, expr, repl_dict): # handle (w+3).matches('x+5') -> {w: x+2} coeff, terms = self.as_coeff_add() if len(terms) == 1: return terms[0].matches(expr - coeff, repl_dict) return def matches(self, expr, repl_dict={}, old=False): return self._matches_commutative(expr, repl_dict, old) @staticmethod def _combine_inverse(lhs, rhs): """ Returns lhs - rhs, but treats oo like a symbol so oo - oo returns 0, instead of a nan. """ from sympy.simplify.simplify import signsimp from sympy.core.symbol import Dummy inf = (S.Infinity, S.NegativeInfinity) if lhs.has(*inf) or rhs.has(*inf): oo = Dummy('oo') reps = { S.Infinity: oo, S.NegativeInfinity: -oo} ireps = {v: k for k, v in reps.items()} eq = signsimp(lhs.xreplace(reps) - rhs.xreplace(reps)) if eq.has(oo): eq = eq.replace( lambda x: x.is_Pow and x.base is oo, lambda x: x.base) return eq.xreplace(ireps) else: return signsimp(lhs - rhs) @cacheit def as_two_terms(self): """Return head and tail of self. This is the most efficient way to get the head and tail of an expression. - if you want only the head, use self.args[0]; - if you want to process the arguments of the tail then use self.as_coef_add() which gives the head and a tuple containing the arguments of the tail when treated as an Add. - if you want the coefficient when self is treated as a Mul then use self.as_coeff_mul()[0] >>> from sympy.abc import x, y >>> (3*x - 2*y + 5).as_two_terms() (5, 3*x - 2*y) """ return self.args[0], self._new_rawargs(*self.args[1:]) def as_numer_denom(self): """ Decomposes an expression to its numerator part and its denominator part. Examples ======== >>> from sympy.abc import x, y, z >>> (x*y/z).as_numer_denom() (x*y, z) >>> (x*(y + 1)/y**7).as_numer_denom() (x*(y + 1), y**7) See Also ======== sympy.core.expr.Expr.as_numer_denom """ # clear rational denominator content, expr = self.primitive() ncon, dcon = content.as_numer_denom() # collect numerators and denominators of the terms nd = defaultdict(list) for f in expr.args: ni, di = f.as_numer_denom() nd[di].append(ni) # check for quick exit if len(nd) == 1: d, n = nd.popitem() return self.func( *[_keep_coeff(ncon, ni) for ni in n]), _keep_coeff(dcon, d) # sum up the terms having a common denominator for d, n in nd.items(): if len(n) == 1: nd[d] = n[0] else: nd[d] = self.func(*n) # assemble single numerator and denominator denoms, numers = [list(i) for i in zip(*iter(nd.items()))] n, d = self.func(*[Mul(*(denoms[:i] + [numers[i]] + denoms[i + 1:])) for i in range(len(numers))]), Mul(*denoms) return _keep_coeff(ncon, n), _keep_coeff(dcon, d) def _eval_is_polynomial(self, syms): return all(term._eval_is_polynomial(syms) for term in self.args) def _eval_is_rational_function(self, syms): return all(term._eval_is_rational_function(syms) for term in self.args) def _eval_is_meromorphic(self, x, a): return _fuzzy_group((arg.is_meromorphic(x, a) for arg in self.args), quick_exit=True) def _eval_is_algebraic_expr(self, syms): return all(term._eval_is_algebraic_expr(syms) for term in self.args) # assumption methods _eval_is_real = lambda self: _fuzzy_group( (a.is_real for a in self.args), quick_exit=True) _eval_is_extended_real = lambda self: _fuzzy_group( (a.is_extended_real for a in self.args), quick_exit=True) _eval_is_complex = lambda self: _fuzzy_group( (a.is_complex for a in self.args), quick_exit=True) _eval_is_antihermitian = lambda self: _fuzzy_group( (a.is_antihermitian for a in self.args), quick_exit=True) _eval_is_finite = lambda self: _fuzzy_group( (a.is_finite for a in self.args), quick_exit=True) _eval_is_hermitian = lambda self: _fuzzy_group( (a.is_hermitian for a in self.args), quick_exit=True) _eval_is_integer = lambda self: _fuzzy_group( (a.is_integer for a in self.args), quick_exit=True) _eval_is_rational = lambda self: _fuzzy_group( (a.is_rational for a in self.args), quick_exit=True) _eval_is_algebraic = lambda self: _fuzzy_group( (a.is_algebraic for a in self.args), quick_exit=True) _eval_is_commutative = lambda self: _fuzzy_group( a.is_commutative for a in self.args) def _eval_is_infinite(self): sawinf = False for a in self.args: ainf = a.is_infinite if ainf is None: return None elif ainf is True: # infinite+infinite might not be infinite if sawinf is True: return None sawinf = True return sawinf def _eval_is_imaginary(self): nz = [] im_I = [] for a in self.args: if a.is_extended_real: if a.is_zero: pass elif a.is_zero is False: nz.append(a) else: return elif a.is_imaginary: im_I.append(a*S.ImaginaryUnit) elif (S.ImaginaryUnit*a).is_extended_real: im_I.append(a*S.ImaginaryUnit) else: return b = self.func(*nz) if b.is_zero: return fuzzy_not(self.func(*im_I).is_zero) elif b.is_zero is False: return False def _eval_is_zero(self): if self.is_commutative is False: # issue 10528: there is no way to know if a nc symbol # is zero or not return nz = [] z = 0 im_or_z = False im = False for a in self.args: if a.is_extended_real: if a.is_zero: z += 1 elif a.is_zero is False: nz.append(a) else: return elif a.is_imaginary: im = True elif (S.ImaginaryUnit*a).is_extended_real: im_or_z = True else: return if z == len(self.args): return True if len(nz) == 0 or len(nz) == len(self.args): return None b = self.func(*nz) if b.is_zero: if not im_or_z and not im: return True if im and not im_or_z: return False if b.is_zero is False: return False def _eval_is_odd(self): l = [f for f in self.args if not (f.is_even is True)] if not l: return False if l[0].is_odd: return self._new_rawargs(*l[1:]).is_even def _eval_is_irrational(self): for t in self.args: a = t.is_irrational if a: others = list(self.args) others.remove(t) if all(x.is_rational is True for x in others): return True return None if a is None: return return False def _eval_is_extended_positive(self): from sympy.core.exprtools import _monotonic_sign if self.is_number: return super()._eval_is_extended_positive() c, a = self.as_coeff_Add() if not c.is_zero: v = _monotonic_sign(a) if v is not None: s = v + c if s != self and s.is_extended_positive and a.is_extended_nonnegative: return True if len(self.free_symbols) == 1: v = _monotonic_sign(self) if v is not None and v != self and v.is_extended_positive: return True pos = nonneg = nonpos = unknown_sign = False saw_INF = set() args = [a for a in self.args if not a.is_zero] if not args: return False for a in args: ispos = a.is_extended_positive infinite = a.is_infinite if infinite: saw_INF.add(fuzzy_or((ispos, a.is_extended_nonnegative))) if True in saw_INF and False in saw_INF: return if ispos: pos = True continue elif a.is_extended_nonnegative: nonneg = True continue elif a.is_extended_nonpositive: nonpos = True continue if infinite is None: return unknown_sign = True if saw_INF: if len(saw_INF) > 1: return return saw_INF.pop() elif unknown_sign: return elif not nonpos and not nonneg and pos: return True elif not nonpos and pos: return True elif not pos and not nonneg: return False def _eval_is_extended_nonnegative(self): from sympy.core.exprtools import _monotonic_sign if not self.is_number: c, a = self.as_coeff_Add() if not c.is_zero and a.is_extended_nonnegative: v = _monotonic_sign(a) if v is not None: s = v + c if s != self and s.is_extended_nonnegative: return True if len(self.free_symbols) == 1: v = _monotonic_sign(self) if v is not None and v != self and v.is_extended_nonnegative: return True def _eval_is_extended_nonpositive(self): from sympy.core.exprtools import _monotonic_sign if not self.is_number: c, a = self.as_coeff_Add() if not c.is_zero and a.is_extended_nonpositive: v = _monotonic_sign(a) if v is not None: s = v + c if s != self and s.is_extended_nonpositive: return True if len(self.free_symbols) == 1: v = _monotonic_sign(self) if v is not None and v != self and v.is_extended_nonpositive: return True def _eval_is_extended_negative(self): from sympy.core.exprtools import _monotonic_sign if self.is_number: return super()._eval_is_extended_negative() c, a = self.as_coeff_Add() if not c.is_zero: v = _monotonic_sign(a) if v is not None: s = v + c if s != self and s.is_extended_negative and a.is_extended_nonpositive: return True if len(self.free_symbols) == 1: v = _monotonic_sign(self) if v is not None and v != self and v.is_extended_negative: return True neg = nonpos = nonneg = unknown_sign = False saw_INF = set() args = [a for a in self.args if not a.is_zero] if not args: return False for a in args: isneg = a.is_extended_negative infinite = a.is_infinite if infinite: saw_INF.add(fuzzy_or((isneg, a.is_extended_nonpositive))) if True in saw_INF and False in saw_INF: return if isneg: neg = True continue elif a.is_extended_nonpositive: nonpos = True continue elif a.is_extended_nonnegative: nonneg = True continue if infinite is None: return unknown_sign = True if saw_INF: if len(saw_INF) > 1: return return saw_INF.pop() elif unknown_sign: return elif not nonneg and not nonpos and neg: return True elif not nonneg and neg: return True elif not neg and not nonpos: return False def _eval_subs(self, old, new): if not old.is_Add: if old is S.Infinity and -old in self.args: # foo - oo is foo + (-oo) internally return self.xreplace({-old: -new}) return None coeff_self, terms_self = self.as_coeff_Add() coeff_old, terms_old = old.as_coeff_Add() if coeff_self.is_Rational and coeff_old.is_Rational: if terms_self == terms_old: # (2 + a).subs( 3 + a, y) -> -1 + y return self.func(new, coeff_self, -coeff_old) if terms_self == -terms_old: # (2 + a).subs(-3 - a, y) -> -1 - y return self.func(-new, coeff_self, coeff_old) if coeff_self.is_Rational and coeff_old.is_Rational \ or coeff_self == coeff_old: args_old, args_self = self.func.make_args( terms_old), self.func.make_args(terms_self) if len(args_old) < len(args_self): # (a+b+c).subs(b+c,x) -> a+x self_set = set(args_self) old_set = set(args_old) if old_set < self_set: ret_set = self_set - old_set return self.func(new, coeff_self, -coeff_old, *[s._subs(old, new) for s in ret_set]) args_old = self.func.make_args( -terms_old) # (a+b+c+d).subs(-b-c,x) -> a-x+d old_set = set(args_old) if old_set < self_set: ret_set = self_set - old_set return self.func(-new, coeff_self, coeff_old, *[s._subs(old, new) for s in ret_set]) def removeO(self): args = [a for a in self.args if not a.is_Order] return self._new_rawargs(*args) def getO(self): args = [a for a in self.args if a.is_Order] if args: return self._new_rawargs(*args) @cacheit def extract_leading_order(self, symbols, point=None): """ Returns the leading term and its order. Examples ======== >>> from sympy.abc import x >>> (x + 1 + 1/x**5).extract_leading_order(x) ((x**(-5), O(x**(-5))),) >>> (1 + x).extract_leading_order(x) ((1, O(1)),) >>> (x + x**2).extract_leading_order(x) ((x, O(x)),) """ from sympy import Order lst = [] symbols = list(symbols if is_sequence(symbols) else [symbols]) if not point: point = [0]*len(symbols) seq = [(f, Order(f, *zip(symbols, point))) for f in self.args] for ef, of in seq: for e, o in lst: if o.contains(of) and o != of: of = None break if of is None: continue new_lst = [(ef, of)] for e, o in lst: if of.contains(o) and o != of: continue new_lst.append((e, o)) lst = new_lst return tuple(lst) def as_real_imag(self, deep=True, **hints): """ returns a tuple representing a complex number Examples ======== >>> from sympy import I >>> (7 + 9*I).as_real_imag() (7, 9) >>> ((1 + I)/(1 - I)).as_real_imag() (0, 1) >>> ((1 + 2*I)*(1 + 3*I)).as_real_imag() (-5, 5) """ sargs = self.args re_part, im_part = [], [] for term in sargs: re, im = term.as_real_imag(deep=deep) re_part.append(re) im_part.append(im) return (self.func(*re_part), self.func(*im_part)) def _eval_as_leading_term(self, x, cdir=0): from sympy import expand_mul, Order old = self expr = expand_mul(self) if not expr.is_Add: return expr.as_leading_term(x, cdir=cdir) infinite = [t for t in expr.args if t.is_infinite] leading_terms = [t.as_leading_term(x, cdir=cdir) for t in expr.args] min, new_expr = Order(0), 0 try: for term in leading_terms: order = Order(term, x) if not min or order not in min: min = order new_expr = term elif min in order: new_expr += term except TypeError: return expr new_expr=new_expr.together() if new_expr.is_Add: new_expr = new_expr.simplify() if not new_expr: # simple leading term analysis gave us cancelled terms but we have to send # back a term, so compute the leading term (via series) n0 = min.getn() res = Order(1) incr = S.One while res.is_Order: res = old._eval_nseries(x, n=n0+incr, logx=None, cdir=cdir).cancel().powsimp().trigsimp() incr *= 2 return res.as_leading_term(x, cdir=cdir) elif new_expr is S.NaN: return old.func._from_args(infinite) else: return new_expr def _eval_adjoint(self): return self.func(*[t.adjoint() for t in self.args]) def _eval_conjugate(self): return self.func(*[t.conjugate() for t in self.args]) def _eval_transpose(self): return self.func(*[t.transpose() for t in self.args]) def _sage_(self): s = 0 for x in self.args: s += x._sage_() return s def primitive(self): """ Return ``(R, self/R)`` where ``R``` is the Rational GCD of ``self```. ``R`` is collected only from the leading coefficient of each term. Examples ======== >>> from sympy.abc import x, y >>> (2*x + 4*y).primitive() (2, x + 2*y) >>> (2*x/3 + 4*y/9).primitive() (2/9, 3*x + 2*y) >>> (2*x/3 + 4.2*y).primitive() (1/3, 2*x + 12.6*y) No subprocessing of term factors is performed: >>> ((2 + 2*x)*x + 2).primitive() (1, x*(2*x + 2) + 2) Recursive processing can be done with the ``as_content_primitive()`` method: >>> ((2 + 2*x)*x + 2).as_content_primitive() (2, x*(x + 1) + 1) See also: primitive() function in polytools.py """ terms = [] inf = False for a in self.args: c, m = a.as_coeff_Mul() if not c.is_Rational: c = S.One m = a inf = inf or m is S.ComplexInfinity terms.append((c.p, c.q, m)) if not inf: ngcd = reduce(igcd, [t[0] for t in terms], 0) dlcm = reduce(ilcm, [t[1] for t in terms], 1) else: ngcd = reduce(igcd, [t[0] for t in terms if t[1]], 0) dlcm = reduce(ilcm, [t[1] for t in terms if t[1]], 1) if ngcd == dlcm == 1: return S.One, self if not inf: for i, (p, q, term) in enumerate(terms): terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term) else: for i, (p, q, term) in enumerate(terms): if q: terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term) else: terms[i] = _keep_coeff(Rational(p, q), term) # we don't need a complete re-flattening since no new terms will join # so we just use the same sort as is used in Add.flatten. When the # coefficient changes, the ordering of terms may change, e.g. # (3*x, 6*y) -> (2*y, x) # # We do need to make sure that term[0] stays in position 0, however. # if terms[0].is_Number or terms[0] is S.ComplexInfinity: c = terms.pop(0) else: c = None _addsort(terms) if c: terms.insert(0, c) return Rational(ngcd, dlcm), self._new_rawargs(*terms) def as_content_primitive(self, radical=False, clear=True): """Return the tuple (R, self/R) where R is the positive Rational extracted from self. If radical is True (default is False) then common radicals will be removed and included as a factor of the primitive expression. Examples ======== >>> from sympy import sqrt >>> (3 + 3*sqrt(2)).as_content_primitive() (3, 1 + sqrt(2)) Radical content can also be factored out of the primitive: >>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True) (2, sqrt(2)*(1 + 2*sqrt(5))) See docstring of Expr.as_content_primitive for more examples. """ con, prim = self.func(*[_keep_coeff(*a.as_content_primitive( radical=radical, clear=clear)) for a in self.args]).primitive() if not clear and not con.is_Integer and prim.is_Add: con, d = con.as_numer_denom() _p = prim/d if any(a.as_coeff_Mul()[0].is_Integer for a in _p.args): prim = _p else: con /= d if radical and prim.is_Add: # look for common radicals that can be removed args = prim.args rads = [] common_q = None for m in args: term_rads = defaultdict(list) for ai in Mul.make_args(m): if ai.is_Pow: b, e = ai.as_base_exp() if e.is_Rational and b.is_Integer: term_rads[e.q].append(abs(int(b))**e.p) if not term_rads: break if common_q is None: common_q = set(term_rads.keys()) else: common_q = common_q & set(term_rads.keys()) if not common_q: break rads.append(term_rads) else: # process rads # keep only those in common_q for r in rads: for q in list(r.keys()): if q not in common_q: r.pop(q) for q in r: r[q] = prod(r[q]) # find the gcd of bases for each q G = [] for q in common_q: g = reduce(igcd, [r[q] for r in rads], 0) if g != 1: G.append(g**Rational(1, q)) if G: G = Mul(*G) args = [ai/G for ai in args] prim = G*prim.func(*args) return con, prim @property def _sorted_args(self): from sympy.core.compatibility import default_sort_key return tuple(sorted(self.args, key=default_sort_key)) def _eval_difference_delta(self, n, step): from sympy.series.limitseq import difference_delta as dd return self.func(*[dd(a, n, step) for a in self.args]) @property def _mpc_(self): """ Convert self to an mpmath mpc if possible """ from sympy.core.numbers import I, Float re_part, rest = self.as_coeff_Add() im_part, imag_unit = rest.as_coeff_Mul() if not imag_unit == I: # ValueError may seem more reasonable but since it's a @property, # we need to use AttributeError to keep from confusing things like # hasattr. raise AttributeError("Cannot convert Add to mpc. Must be of the form Number + Number*I") return (Float(re_part)._mpf_, Float(im_part)._mpf_) def __neg__(self): if not global_parameters.distribute: return super().__neg__() return Add(*[-i for i in self.args]) add = AssocOpDispatcher('add') from .mul import Mul, _keep_coeff, prod from sympy.core.numbers import Rational
750d89b4699d21b439b893d82d329760331e9e572010c5174fee3fd6eda3b300
from typing import Tuple as tTuple from collections.abc import Iterable from functools import reduce from .sympify import sympify, _sympify, SympifyError from .basic import Basic, Atom from .singleton import S from .evalf import EvalfMixin, pure_complex from .decorators import call_highest_priority, sympify_method_args, sympify_return from .cache import cacheit from .compatibility import as_int, default_sort_key from sympy.utilities.misc import func_name from mpmath.libmp import mpf_log, prec_to_dps from collections import defaultdict @sympify_method_args class Expr(Basic, EvalfMixin): """ Base class for algebraic expressions. Explanation =========== Everything that requires arithmetic operations to be defined should subclass this class, instead of Basic (which should be used only for argument storage and expression manipulation, i.e. pattern matching, substitutions, etc). If you want to override the comparisons of expressions: Should use _eval_is_ge for inequality, or _eval_is_eq, with multiple dispatch. _eval_is_ge return true if x >= y, false if x < y, and None if the two types are not comparable or the comparison is indeterminate See Also ======== sympy.core.basic.Basic """ __slots__ = () # type: tTuple[str, ...] is_scalar = True # self derivative is 1 @property def _diff_wrt(self): """Return True if one can differentiate with respect to this object, else False. Explanation =========== Subclasses such as Symbol, Function and Derivative return True to enable derivatives wrt them. The implementation in Derivative separates the Symbol and non-Symbol (_diff_wrt=True) variables and temporarily converts the non-Symbols into Symbols when performing the differentiation. By default, any object deriving from Expr will behave like a scalar with self.diff(self) == 1. If this is not desired then the object must also set `is_scalar = False` or else define an _eval_derivative routine. Note, see the docstring of Derivative for how this should work mathematically. In particular, note that expr.subs(yourclass, Symbol) should be well-defined on a structural level, or this will lead to inconsistent results. Examples ======== >>> from sympy import Expr >>> e = Expr() >>> e._diff_wrt False >>> class MyScalar(Expr): ... _diff_wrt = True ... >>> MyScalar().diff(MyScalar()) 1 >>> class MySymbol(Expr): ... _diff_wrt = True ... is_scalar = False ... >>> MySymbol().diff(MySymbol()) Derivative(MySymbol(), MySymbol()) """ return False @cacheit def sort_key(self, order=None): coeff, expr = self.as_coeff_Mul() if expr.is_Pow: expr, exp = expr.args else: expr, exp = expr, S.One if expr.is_Dummy: args = (expr.sort_key(),) elif expr.is_Atom: args = (str(expr),) else: if expr.is_Add: args = expr.as_ordered_terms(order=order) elif expr.is_Mul: args = expr.as_ordered_factors(order=order) else: args = expr.args args = tuple( [ default_sort_key(arg, order=order) for arg in args ]) args = (len(args), tuple(args)) exp = exp.sort_key(order=order) return expr.class_key(), args, exp, coeff def __hash__(self) -> int: # hash cannot be cached using cache_it because infinite recurrence # occurs as hash is needed for setting cache dictionary keys h = self._mhash if h is None: h = hash((type(self).__name__,) + self._hashable_content()) self._mhash = h return h def _hashable_content(self): """Return a tuple of information about self that can be used to compute the hash. If a class defines additional attributes, like ``name`` in Symbol, then this method should be updated accordingly to return such relevant attributes. Defining more than _hashable_content is necessary if __eq__ has been defined by a class. See note about this in Basic.__eq__.""" return self._args def __eq__(self, other): try: other = _sympify(other) if not isinstance(other, Expr): return False except (SympifyError, SyntaxError): return False # check for pure number expr if not (self.is_Number and other.is_Number) and ( type(self) != type(other)): return False a, b = self._hashable_content(), other._hashable_content() if a != b: return False # check number *in* an expression for a, b in zip(a, b): if not isinstance(a, Expr): continue if a.is_Number and type(a) != type(b): return False return True # *************** # * Arithmetics * # *************** # Expr and its sublcasses use _op_priority to determine which object # passed to a binary special method (__mul__, etc.) will handle the # operation. In general, the 'call_highest_priority' decorator will choose # the object with the highest _op_priority to handle the call. # Custom subclasses that want to define their own binary special methods # should set an _op_priority value that is higher than the default. # # **NOTE**: # This is a temporary fix, and will eventually be replaced with # something better and more powerful. See issue 5510. _op_priority = 10.0 @property def _add_handler(self): return Add @property def _mul_handler(self): return Mul def __pos__(self): return self def __neg__(self): # Mul has its own __neg__ routine, so we just # create a 2-args Mul with the -1 in the canonical # slot 0. c = self.is_commutative return Mul._from_args((S.NegativeOne, self), c) def __abs__(self): from sympy import Abs return Abs(self) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__radd__') def __add__(self, other): return Add(self, other) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__add__') def __radd__(self, other): return Add(other, self) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__rsub__') def __sub__(self, other): return Add(self, -other) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__sub__') def __rsub__(self, other): return Add(other, -self) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__rmul__') def __mul__(self, other): return Mul(self, other) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__mul__') def __rmul__(self, other): return Mul(other, self) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__rpow__') def _pow(self, other): return Pow(self, other) def __pow__(self, other, mod=None): if mod is None: return self._pow(other) try: _self, other, mod = as_int(self), as_int(other), as_int(mod) if other >= 0: return pow(_self, other, mod) else: from sympy.core.numbers import mod_inverse return mod_inverse(pow(_self, -other, mod), mod) except ValueError: power = self._pow(other) try: return power%mod except TypeError: return NotImplemented @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__pow__') def __rpow__(self, other): return Pow(other, self) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__rtruediv__') def __truediv__(self, other): denom = Pow(other, S.NegativeOne) if self is S.One: return denom else: return Mul(self, denom) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__truediv__') def __rtruediv__(self, other): denom = Pow(self, S.NegativeOne) if other is S.One: return denom else: return Mul(other, denom) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__rmod__') def __mod__(self, other): return Mod(self, other) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__mod__') def __rmod__(self, other): return Mod(other, self) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__rfloordiv__') def __floordiv__(self, other): from sympy.functions.elementary.integers import floor return floor(self / other) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__floordiv__') def __rfloordiv__(self, other): from sympy.functions.elementary.integers import floor return floor(other / self) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__rdivmod__') def __divmod__(self, other): from sympy.functions.elementary.integers import floor return floor(self / other), Mod(self, other) @sympify_return([('other', 'Expr')], NotImplemented) @call_highest_priority('__divmod__') def __rdivmod__(self, other): from sympy.functions.elementary.integers import floor return floor(other / self), Mod(other, self) def __int__(self): # Although we only need to round to the units position, we'll # get one more digit so the extra testing below can be avoided # unless the rounded value rounded to an integer, e.g. if an # expression were equal to 1.9 and we rounded to the unit position # we would get a 2 and would not know if this rounded up or not # without doing a test (as done below). But if we keep an extra # digit we know that 1.9 is not the same as 1 and there is no # need for further testing: our int value is correct. If the value # were 1.99, however, this would round to 2.0 and our int value is # off by one. So...if our round value is the same as the int value # (regardless of how much extra work we do to calculate extra decimal # places) we need to test whether we are off by one. from sympy import Dummy if not self.is_number: raise TypeError("can't convert symbols to int") r = self.round(2) if not r.is_Number: raise TypeError("can't convert complex to int") if r in (S.NaN, S.Infinity, S.NegativeInfinity): raise TypeError("can't convert %s to int" % r) i = int(r) if not i: return 0 # off-by-one check if i == r and not (self - i).equals(0): isign = 1 if i > 0 else -1 x = Dummy() # in the following (self - i).evalf(2) will not always work while # (self - r).evalf(2) and the use of subs does; if the test that # was added when this comment was added passes, it might be safe # to simply use sign to compute this rather than doing this by hand: diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1 if diff_sign != isign: i -= isign return i def __float__(self): # Don't bother testing if it's a number; if it's not this is going # to fail, and if it is we still need to check that it evalf'ed to # a number. result = self.evalf() if result.is_Number: return float(result) if result.is_number and result.as_real_imag()[1]: raise TypeError("can't convert complex to float") raise TypeError("can't convert expression to float") def __complex__(self): result = self.evalf() re, im = result.as_real_imag() return complex(float(re), float(im)) @sympify_return([('other', 'Expr')], NotImplemented) def __ge__(self, other): from .relational import GreaterThan return GreaterThan(self, other) @sympify_return([('other', 'Expr')], NotImplemented) def __le__(self, other): from .relational import LessThan return LessThan(self, other) @sympify_return([('other', 'Expr')], NotImplemented) def __gt__(self, other): from .relational import StrictGreaterThan return StrictGreaterThan(self, other) @sympify_return([('other', 'Expr')], NotImplemented) def __lt__(self, other): from .relational import StrictLessThan return StrictLessThan(self, other) def __trunc__(self): if not self.is_number: raise TypeError("can't truncate symbols and expressions") else: return Integer(self) @staticmethod def _from_mpmath(x, prec): from sympy import Float if hasattr(x, "_mpf_"): return Float._new(x._mpf_, prec) elif hasattr(x, "_mpc_"): re, im = x._mpc_ re = Float._new(re, prec) im = Float._new(im, prec)*S.ImaginaryUnit return re + im else: raise TypeError("expected mpmath number (mpf or mpc)") @property def is_number(self): """Returns True if ``self`` has no free symbols and no undefined functions (AppliedUndef, to be precise). It will be faster than ``if not self.free_symbols``, however, since ``is_number`` will fail as soon as it hits a free symbol or undefined function. Examples ======== >>> from sympy import Integral, cos, sin, pi >>> from sympy.core.function import Function >>> from sympy.abc import x >>> f = Function('f') >>> x.is_number False >>> f(1).is_number False >>> (2*x).is_number False >>> (2 + Integral(2, x)).is_number False >>> (2 + Integral(2, (x, 1, 2))).is_number True Not all numbers are Numbers in the SymPy sense: >>> pi.is_number, pi.is_Number (True, False) If something is a number it should evaluate to a number with real and imaginary parts that are Numbers; the result may not be comparable, however, since the real and/or imaginary part of the result may not have precision. >>> cos(1).is_number and cos(1).is_comparable True >>> z = cos(1)**2 + sin(1)**2 - 1 >>> z.is_number True >>> z.is_comparable False See Also ======== sympy.core.basic.Basic.is_comparable """ return all(obj.is_number for obj in self.args) def _random(self, n=None, re_min=-1, im_min=-1, re_max=1, im_max=1): """Return self evaluated, if possible, replacing free symbols with random complex values, if necessary. Explanation =========== The random complex value for each free symbol is generated by the random_complex_number routine giving real and imaginary parts in the range given by the re_min, re_max, im_min, and im_max values. The returned value is evaluated to a precision of n (if given) else the maximum of 15 and the precision needed to get more than 1 digit of precision. If the expression could not be evaluated to a number, or could not be evaluated to more than 1 digit of precision, then None is returned. Examples ======== >>> from sympy import sqrt >>> from sympy.abc import x, y >>> x._random() # doctest: +SKIP 0.0392918155679172 + 0.916050214307199*I >>> x._random(2) # doctest: +SKIP -0.77 - 0.87*I >>> (x + y/2)._random(2) # doctest: +SKIP -0.57 + 0.16*I >>> sqrt(2)._random(2) 1.4 See Also ======== sympy.testing.randtest.random_complex_number """ free = self.free_symbols prec = 1 if free: from sympy.testing.randtest import random_complex_number a, c, b, d = re_min, re_max, im_min, im_max reps = dict(list(zip(free, [random_complex_number(a, b, c, d, rational=True) for zi in free]))) try: nmag = abs(self.evalf(2, subs=reps)) except (ValueError, TypeError): # if an out of range value resulted in evalf problems # then return None -- XXX is there a way to know how to # select a good random number for a given expression? # e.g. when calculating n! negative values for n should not # be used return None else: reps = {} nmag = abs(self.evalf(2)) if not hasattr(nmag, '_prec'): # e.g. exp_polar(2*I*pi) doesn't evaluate but is_number is True return None if nmag._prec == 1: # increase the precision up to the default maximum # precision to see if we can get any significance from mpmath.libmp.libintmath import giant_steps from sympy.core.evalf import DEFAULT_MAXPREC as target # evaluate for prec in giant_steps(2, target): nmag = abs(self.evalf(prec, subs=reps)) if nmag._prec != 1: break if nmag._prec != 1: if n is None: n = max(prec, 15) return self.evalf(n, subs=reps) # never got any significance return None def is_constant(self, *wrt, **flags): """Return True if self is constant, False if not, or None if the constancy could not be determined conclusively. Explanation =========== If an expression has no free symbols then it is a constant. If there are free symbols it is possible that the expression is a constant, perhaps (but not necessarily) zero. To test such expressions, a few strategies are tried: 1) numerical evaluation at two random points. If two such evaluations give two different values and the values have a precision greater than 1 then self is not constant. If the evaluations agree or could not be obtained with any precision, no decision is made. The numerical testing is done only if ``wrt`` is different than the free symbols. 2) differentiation with respect to variables in 'wrt' (or all free symbols if omitted) to see if the expression is constant or not. This will not always lead to an expression that is zero even though an expression is constant (see added test in test_expr.py). If all derivatives are zero then self is constant with respect to the given symbols. 3) finding out zeros of denominator expression with free_symbols. It won't be constant if there are zeros. It gives more negative answers for expression that are not constant. If neither evaluation nor differentiation can prove the expression is constant, None is returned unless two numerical values happened to be the same and the flag ``failing_number`` is True -- in that case the numerical value will be returned. If flag simplify=False is passed, self will not be simplified; the default is True since self should be simplified before testing. Examples ======== >>> from sympy import cos, sin, Sum, S, pi >>> from sympy.abc import a, n, x, y >>> x.is_constant() False >>> S(2).is_constant() True >>> Sum(x, (x, 1, 10)).is_constant() True >>> Sum(x, (x, 1, n)).is_constant() False >>> Sum(x, (x, 1, n)).is_constant(y) True >>> Sum(x, (x, 1, n)).is_constant(n) False >>> Sum(x, (x, 1, n)).is_constant(x) True >>> eq = a*cos(x)**2 + a*sin(x)**2 - a >>> eq.is_constant() True >>> eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0 True >>> (0**x).is_constant() False >>> x.is_constant() False >>> (x**x).is_constant() False >>> one = cos(x)**2 + sin(x)**2 >>> one.is_constant() True >>> ((one - 1)**(x + 1)).is_constant() in (True, False) # could be 0 or 1 True """ def check_denominator_zeros(expression): from sympy.solvers.solvers import denoms retNone = False for den in denoms(expression): z = den.is_zero if z is True: return True if z is None: retNone = True if retNone: return None return False simplify = flags.get('simplify', True) if self.is_number: return True free = self.free_symbols if not free: return True # assume f(1) is some constant # if we are only interested in some symbols and they are not in the # free symbols then this expression is constant wrt those symbols wrt = set(wrt) if wrt and not wrt & free: return True wrt = wrt or free # simplify unless this has already been done expr = self if simplify: expr = expr.simplify() # is_zero should be a quick assumptions check; it can be wrong for # numbers (see test_is_not_constant test), giving False when it # shouldn't, but hopefully it will never give True unless it is sure. if expr.is_zero: return True # try numerical evaluation to see if we get two different values failing_number = None if wrt == free: # try 0 (for a) and 1 (for b) try: a = expr.subs(list(zip(free, [0]*len(free))), simultaneous=True) if a is S.NaN: # evaluation may succeed when substitution fails a = expr._random(None, 0, 0, 0, 0) except ZeroDivisionError: a = None if a is not None and a is not S.NaN: try: b = expr.subs(list(zip(free, [1]*len(free))), simultaneous=True) if b is S.NaN: # evaluation may succeed when substitution fails b = expr._random(None, 1, 0, 1, 0) except ZeroDivisionError: b = None if b is not None and b is not S.NaN and b.equals(a) is False: return False # try random real b = expr._random(None, -1, 0, 1, 0) if b is not None and b is not S.NaN and b.equals(a) is False: return False # try random complex b = expr._random() if b is not None and b is not S.NaN: if b.equals(a) is False: return False failing_number = a if a.is_number else b # now we will test each wrt symbol (or all free symbols) to see if the # expression depends on them or not using differentiation. This is # not sufficient for all expressions, however, so we don't return # False if we get a derivative other than 0 with free symbols. for w in wrt: deriv = expr.diff(w) if simplify: deriv = deriv.simplify() if deriv != 0: if not (pure_complex(deriv, or_real=True)): if flags.get('failing_number', False): return failing_number elif deriv.free_symbols: # dead line provided _random returns None in such cases return None return False cd = check_denominator_zeros(self) if cd is True: return False elif cd is None: return None return True def equals(self, other, failing_expression=False): """Return True if self == other, False if it doesn't, or None. If failing_expression is True then the expression which did not simplify to a 0 will be returned instead of None. Explanation =========== If ``self`` is a Number (or complex number) that is not zero, then the result is False. If ``self`` is a number and has not evaluated to zero, evalf will be used to test whether the expression evaluates to zero. If it does so and the result has significance (i.e. the precision is either -1, for a Rational result, or is greater than 1) then the evalf value will be used to return True or False. """ from sympy.simplify.simplify import nsimplify, simplify from sympy.solvers.solvers import solve from sympy.polys.polyerrors import NotAlgebraic from sympy.polys.numberfields import minimal_polynomial other = sympify(other) if self == other: return True # they aren't the same so see if we can make the difference 0; # don't worry about doing simplification steps one at a time # because if the expression ever goes to 0 then the subsequent # simplification steps that are done will be very fast. diff = factor_terms(simplify(self - other), radical=True) if not diff: return True if not diff.has(Add, Mod): # if there is no expanding to be done after simplifying # then this can't be a zero return False constant = diff.is_constant(simplify=False, failing_number=True) if constant is False: return False if not diff.is_number: if constant is None: # e.g. unless the right simplification is done, a symbolic # zero is possible (see expression of issue 6829: without # simplification constant will be None). return if constant is True: # this gives a number whether there are free symbols or not ndiff = diff._random() # is_comparable will work whether the result is real # or complex; it could be None, however. if ndiff and ndiff.is_comparable: return False # sometimes we can use a simplified result to give a clue as to # what the expression should be; if the expression is *not* zero # then we should have been able to compute that and so now # we can just consider the cases where the approximation appears # to be zero -- we try to prove it via minimal_polynomial. # # removed # ns = nsimplify(diff) # if diff.is_number and (not ns or ns == diff): # # The thought was that if it nsimplifies to 0 that's a sure sign # to try the following to prove it; or if it changed but wasn't # zero that might be a sign that it's not going to be easy to # prove. But tests seem to be working without that logic. # if diff.is_number: # try to prove via self-consistency surds = [s for s in diff.atoms(Pow) if s.args[0].is_Integer] # it seems to work better to try big ones first surds.sort(key=lambda x: -x.args[0]) for s in surds: try: # simplify is False here -- this expression has already # been identified as being hard to identify as zero; # we will handle the checking ourselves using nsimplify # to see if we are in the right ballpark or not and if so # *then* the simplification will be attempted. sol = solve(diff, s, simplify=False) if sol: if s in sol: # the self-consistent result is present return True if all(si.is_Integer for si in sol): # perfect powers are removed at instantiation # so surd s cannot be an integer return False if all(i.is_algebraic is False for i in sol): # a surd is algebraic return False if any(si in surds for si in sol): # it wasn't equal to s but it is in surds # and different surds are not equal return False if any(nsimplify(s - si) == 0 and simplify(s - si) == 0 for si in sol): return True if s.is_real: if any(nsimplify(si, [s]) == s and simplify(si) == s for si in sol): return True except NotImplementedError: pass # try to prove with minimal_polynomial but know when # *not* to use this or else it can take a long time. e.g. issue 8354 if True: # change True to condition that assures non-hang try: mp = minimal_polynomial(diff) if mp.is_Symbol: return True return False except (NotAlgebraic, NotImplementedError): pass # diff has not simplified to zero; constant is either None, True # or the number with significance (is_comparable) that was randomly # calculated twice as the same value. if constant not in (True, None) and constant != 0: return False if failing_expression: return diff return None def _eval_is_positive(self): finite = self.is_finite if finite is False: return False extended_positive = self.is_extended_positive if finite is True: return extended_positive if extended_positive is False: return False def _eval_is_negative(self): finite = self.is_finite if finite is False: return False extended_negative = self.is_extended_negative if finite is True: return extended_negative if extended_negative is False: return False def _eval_is_extended_positive_negative(self, positive): from sympy.polys.numberfields import minimal_polynomial from sympy.polys.polyerrors import NotAlgebraic if self.is_number: if self.is_extended_real is False: return False # check to see that we can get a value try: n2 = self._eval_evalf(2) # XXX: This shouldn't be caught here # Catches ValueError: hypsum() failed to converge to the requested # 34 bits of accuracy except ValueError: return None if n2 is None: return None if getattr(n2, '_prec', 1) == 1: # no significance return None if n2 is S.NaN: return None r, i = self.evalf(2).as_real_imag() if not i.is_Number or not r.is_Number: return False if r._prec != 1 and i._prec != 1: return bool(not i and ((r > 0) if positive else (r < 0))) elif r._prec == 1 and (not i or i._prec == 1) and \ self.is_algebraic and not self.has(Function): try: if minimal_polynomial(self).is_Symbol: return False except (NotAlgebraic, NotImplementedError): pass def _eval_is_extended_positive(self): return self._eval_is_extended_positive_negative(positive=True) def _eval_is_extended_negative(self): return self._eval_is_extended_positive_negative(positive=False) def _eval_interval(self, x, a, b): """ Returns evaluation over an interval. For most functions this is: self.subs(x, b) - self.subs(x, a), possibly using limit() if NaN is returned from subs, or if singularities are found between a and b. If b or a is None, it only evaluates -self.subs(x, a) or self.subs(b, x), respectively. """ from sympy.series import limit, Limit from sympy.solvers.solveset import solveset from sympy.sets.sets import Interval from sympy.functions.elementary.exponential import log from sympy.calculus.util import AccumBounds if (a is None and b is None): raise ValueError('Both interval ends cannot be None.') def _eval_endpoint(left): c = a if left else b if c is None: return 0 else: C = self.subs(x, c) if C.has(S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity, AccumBounds): if (a < b) != False: C = limit(self, x, c, "+" if left else "-") else: C = limit(self, x, c, "-" if left else "+") if isinstance(C, Limit): raise NotImplementedError("Could not compute limit") return C if a == b: return 0 A = _eval_endpoint(left=True) if A is S.NaN: return A B = _eval_endpoint(left=False) if (a and b) is None: return B - A value = B - A if a.is_comparable and b.is_comparable: if a < b: domain = Interval(a, b) else: domain = Interval(b, a) # check the singularities of self within the interval # if singularities is a ConditionSet (not iterable), catch the exception and pass singularities = solveset(self.cancel().as_numer_denom()[1], x, domain=domain) for logterm in self.atoms(log): singularities = singularities | solveset(logterm.args[0], x, domain=domain) try: for s in singularities: if value is S.NaN: # no need to keep adding, it will stay NaN break if not s.is_comparable: continue if (a < s) == (s < b) == True: value += -limit(self, x, s, "+") + limit(self, x, s, "-") elif (b < s) == (s < a) == True: value += limit(self, x, s, "+") - limit(self, x, s, "-") except TypeError: pass return value def _eval_power(self, other): # subclass to compute self**other for cases when # other is not NaN, 0, or 1 return None def _eval_conjugate(self): if self.is_extended_real: return self elif self.is_imaginary: return -self def conjugate(self): """Returns the complex conjugate of 'self'.""" from sympy.functions.elementary.complexes import conjugate as c return c(self) def dir(self, x, cdir): from sympy import log minexp = S.Zero if self.is_zero: return S.Zero arg = self while arg: minexp += S.One arg = arg.diff(x) coeff = arg.subs(x, 0) if coeff in (S.NaN, S.ComplexInfinity): try: coeff, _ = arg.leadterm(x) if coeff.has(log(x)): raise ValueError() except ValueError: coeff = arg.limit(x, 0) if coeff != S.Zero: break return coeff*cdir**minexp def _eval_transpose(self): from sympy.functions.elementary.complexes import conjugate if (self.is_complex or self.is_infinite): return self elif self.is_hermitian: return conjugate(self) elif self.is_antihermitian: return -conjugate(self) def transpose(self): from sympy.functions.elementary.complexes import transpose return transpose(self) def _eval_adjoint(self): from sympy.functions.elementary.complexes import conjugate, transpose if self.is_hermitian: return self elif self.is_antihermitian: return -self obj = self._eval_conjugate() if obj is not None: return transpose(obj) obj = self._eval_transpose() if obj is not None: return conjugate(obj) def adjoint(self): from sympy.functions.elementary.complexes import adjoint return adjoint(self) @classmethod def _parse_order(cls, order): """Parse and configure the ordering of terms. """ from sympy.polys.orderings import monomial_key startswith = getattr(order, "startswith", None) if startswith is None: reverse = False else: reverse = startswith('rev-') if reverse: order = order[4:] monom_key = monomial_key(order) def neg(monom): result = [] for m in monom: if isinstance(m, tuple): result.append(neg(m)) else: result.append(-m) return tuple(result) def key(term): _, ((re, im), monom, ncpart) = term monom = neg(monom_key(monom)) ncpart = tuple([e.sort_key(order=order) for e in ncpart]) coeff = ((bool(im), im), (re, im)) return monom, ncpart, coeff return key, reverse def as_ordered_factors(self, order=None): """Return list of ordered factors (if Mul) else [self].""" return [self] def as_poly(self, *gens, **args): """Converts ``self`` to a polynomial or returns ``None``. Explanation =========== >>> from sympy import sin >>> from sympy.abc import x, y >>> print((x**2 + x*y).as_poly()) Poly(x**2 + x*y, x, y, domain='ZZ') >>> print((x**2 + x*y).as_poly(x, y)) Poly(x**2 + x*y, x, y, domain='ZZ') >>> print((x**2 + sin(y)).as_poly(x, y)) None """ from sympy.polys import Poly, PolynomialError try: poly = Poly(self, *gens, **args) if not poly.is_Poly: return None else: return poly except PolynomialError: return None def as_ordered_terms(self, order=None, data=False): """ Transform an expression to an ordered list of terms. Examples ======== >>> from sympy import sin, cos >>> from sympy.abc import x >>> (sin(x)**2*cos(x) + sin(x)**2 + 1).as_ordered_terms() [sin(x)**2*cos(x), sin(x)**2, 1] """ from .numbers import Number, NumberSymbol if order is None and self.is_Add: # Spot the special case of Add(Number, Mul(Number, expr)) with the # first number positive and thhe second number nagative key = lambda x:not isinstance(x, (Number, NumberSymbol)) add_args = sorted(Add.make_args(self), key=key) if (len(add_args) == 2 and isinstance(add_args[0], (Number, NumberSymbol)) and isinstance(add_args[1], Mul)): mul_args = sorted(Mul.make_args(add_args[1]), key=key) if (len(mul_args) == 2 and isinstance(mul_args[0], Number) and add_args[0].is_positive and mul_args[0].is_negative): return add_args key, reverse = self._parse_order(order) terms, gens = self.as_terms() if not any(term.is_Order for term, _ in terms): ordered = sorted(terms, key=key, reverse=reverse) else: _terms, _order = [], [] for term, repr in terms: if not term.is_Order: _terms.append((term, repr)) else: _order.append((term, repr)) ordered = sorted(_terms, key=key, reverse=True) \ + sorted(_order, key=key, reverse=True) if data: return ordered, gens else: return [term for term, _ in ordered] def as_terms(self): """Transform an expression to a list of terms. """ from .add import Add from .mul import Mul from .exprtools import decompose_power gens, terms = set(), [] for term in Add.make_args(self): coeff, _term = term.as_coeff_Mul() coeff = complex(coeff) cpart, ncpart = {}, [] if _term is not S.One: for factor in Mul.make_args(_term): if factor.is_number: try: coeff *= complex(factor) except (TypeError, ValueError): pass else: continue if factor.is_commutative: base, exp = decompose_power(factor) cpart[base] = exp gens.add(base) else: ncpart.append(factor) coeff = coeff.real, coeff.imag ncpart = tuple(ncpart) terms.append((term, (coeff, cpart, ncpart))) gens = sorted(gens, key=default_sort_key) k, indices = len(gens), {} for i, g in enumerate(gens): indices[g] = i result = [] for term, (coeff, cpart, ncpart) in terms: monom = [0]*k for base, exp in cpart.items(): monom[indices[base]] = exp result.append((term, (coeff, tuple(monom), ncpart))) return result, gens def removeO(self): """Removes the additive O(..) symbol if there is one""" return self def getO(self): """Returns the additive O(..) symbol if there is one, else None.""" return None def getn(self): """ Returns the order of the expression. Explanation =========== The order is determined either from the O(...) term. If there is no O(...) term, it returns None. Examples ======== >>> from sympy import O >>> from sympy.abc import x >>> (1 + x + O(x**2)).getn() 2 >>> (1 + x).getn() """ from sympy import Dummy, Symbol o = self.getO() if o is None: return None elif o.is_Order: o = o.expr if o is S.One: return S.Zero if o.is_Symbol: return S.One if o.is_Pow: return o.args[1] if o.is_Mul: # x**n*log(x)**n or x**n/log(x)**n for oi in o.args: if oi.is_Symbol: return S.One if oi.is_Pow: syms = oi.atoms(Symbol) if len(syms) == 1: x = syms.pop() oi = oi.subs(x, Dummy('x', positive=True)) if oi.base.is_Symbol and oi.exp.is_Rational: return abs(oi.exp) raise NotImplementedError('not sure of order of %s' % o) def count_ops(self, visual=None): """wrapper for count_ops that returns the operation count.""" from .function import count_ops return count_ops(self, visual) def args_cnc(self, cset=False, warn=True, split_1=True): """Return [commutative factors, non-commutative factors] of self. Explanation =========== self is treated as a Mul and the ordering of the factors is maintained. If ``cset`` is True the commutative factors will be returned in a set. If there were repeated factors (as may happen with an unevaluated Mul) then an error will be raised unless it is explicitly suppressed by setting ``warn`` to False. Note: -1 is always separated from a Number unless split_1 is False. Examples ======== >>> from sympy import symbols, oo >>> A, B = symbols('A B', commutative=0) >>> x, y = symbols('x y') >>> (-2*x*y).args_cnc() [[-1, 2, x, y], []] >>> (-2.5*x).args_cnc() [[-1, 2.5, x], []] >>> (-2*x*A*B*y).args_cnc() [[-1, 2, x, y], [A, B]] >>> (-2*x*A*B*y).args_cnc(split_1=False) [[-2, x, y], [A, B]] >>> (-2*x*y).args_cnc(cset=True) [{-1, 2, x, y}, []] The arg is always treated as a Mul: >>> (-2 + x + A).args_cnc() [[], [x - 2 + A]] >>> (-oo).args_cnc() # -oo is a singleton [[-1, oo], []] """ if self.is_Mul: args = list(self.args) else: args = [self] for i, mi in enumerate(args): if not mi.is_commutative: c = args[:i] nc = args[i:] break else: c = args nc = [] if c and split_1 and ( c[0].is_Number and c[0].is_extended_negative and c[0] is not S.NegativeOne): c[:1] = [S.NegativeOne, -c[0]] if cset: clen = len(c) c = set(c) if clen and warn and len(c) != clen: raise ValueError('repeated commutative arguments: %s' % [ci for ci in c if list(self.args).count(ci) > 1]) return [c, nc] def coeff(self, x, n=1, right=False): """ Returns the coefficient from the term(s) containing ``x**n``. If ``n`` is zero then all terms independent of ``x`` will be returned. Explanation =========== When ``x`` is noncommutative, the coefficient to the left (default) or right of ``x`` can be returned. The keyword 'right' is ignored when ``x`` is commutative. Examples ======== >>> from sympy import symbols >>> from sympy.abc import x, y, z You can select terms that have an explicit negative in front of them: >>> (-x + 2*y).coeff(-1) x >>> (x - 2*y).coeff(-1) 2*y You can select terms with no Rational coefficient: >>> (x + 2*y).coeff(1) x >>> (3 + 2*x + 4*x**2).coeff(1) 0 You can select terms independent of x by making n=0; in this case expr.as_independent(x)[0] is returned (and 0 will be returned instead of None): >>> (3 + 2*x + 4*x**2).coeff(x, 0) 3 >>> eq = ((x + 1)**3).expand() + 1 >>> eq x**3 + 3*x**2 + 3*x + 2 >>> [eq.coeff(x, i) for i in reversed(range(4))] [1, 3, 3, 2] >>> eq -= 2 >>> [eq.coeff(x, i) for i in reversed(range(4))] [1, 3, 3, 0] You can select terms that have a numerical term in front of them: >>> (-x - 2*y).coeff(2) -y >>> from sympy import sqrt >>> (x + sqrt(2)*x).coeff(sqrt(2)) x The matching is exact: >>> (3 + 2*x + 4*x**2).coeff(x) 2 >>> (3 + 2*x + 4*x**2).coeff(x**2) 4 >>> (3 + 2*x + 4*x**2).coeff(x**3) 0 >>> (z*(x + y)**2).coeff((x + y)**2) z >>> (z*(x + y)**2).coeff(x + y) 0 In addition, no factoring is done, so 1 + z*(1 + y) is not obtained from the following: >>> (x + z*(x + x*y)).coeff(x) 1 If such factoring is desired, factor_terms can be used first: >>> from sympy import factor_terms >>> factor_terms(x + z*(x + x*y)).coeff(x) z*(y + 1) + 1 >>> n, m, o = symbols('n m o', commutative=False) >>> n.coeff(n) 1 >>> (3*n).coeff(n) 3 >>> (n*m + m*n*m).coeff(n) # = (1 + m)*n*m 1 + m >>> (n*m + m*n*m).coeff(n, right=True) # = (1 + m)*n*m m If there is more than one possible coefficient 0 is returned: >>> (n*m + m*n).coeff(n) 0 If there is only one possible coefficient, it is returned: >>> (n*m + x*m*n).coeff(m*n) x >>> (n*m + x*m*n).coeff(m*n, right=1) 1 See Also ======== as_coefficient: separate the expression into a coefficient and factor as_coeff_Add: separate the additive constant from an expression as_coeff_Mul: separate the multiplicative constant from an expression as_independent: separate x-dependent terms/factors from others sympy.polys.polytools.Poly.coeff_monomial: efficiently find the single coefficient of a monomial in Poly sympy.polys.polytools.Poly.nth: like coeff_monomial but powers of monomial terms are used """ x = sympify(x) if not isinstance(x, Basic): return S.Zero n = as_int(n) if not x: return S.Zero if x == self: if n == 1: return S.One return S.Zero if x is S.One: co = [a for a in Add.make_args(self) if a.as_coeff_Mul()[0] is S.One] if not co: return S.Zero return Add(*co) if n == 0: if x.is_Add and self.is_Add: c = self.coeff(x, right=right) if not c: return S.Zero if not right: return self - Add(*[a*x for a in Add.make_args(c)]) return self - Add(*[x*a for a in Add.make_args(c)]) return self.as_independent(x, as_Add=True)[0] # continue with the full method, looking for this power of x: x = x**n def incommon(l1, l2): if not l1 or not l2: return [] n = min(len(l1), len(l2)) for i in range(n): if l1[i] != l2[i]: return l1[:i] return l1[:] def find(l, sub, first=True): """ Find where list sub appears in list l. When ``first`` is True the first occurrence from the left is returned, else the last occurrence is returned. Return None if sub is not in l. Examples ======== >> l = range(5)*2 >> find(l, [2, 3]) 2 >> find(l, [2, 3], first=0) 7 >> find(l, [2, 4]) None """ if not sub or not l or len(sub) > len(l): return None n = len(sub) if not first: l.reverse() sub.reverse() for i in range(0, len(l) - n + 1): if all(l[i + j] == sub[j] for j in range(n)): break else: i = None if not first: l.reverse() sub.reverse() if i is not None and not first: i = len(l) - (i + n) return i co = [] args = Add.make_args(self) self_c = self.is_commutative x_c = x.is_commutative if self_c and not x_c: return S.Zero one_c = self_c or x_c xargs, nx = x.args_cnc(cset=True, warn=bool(not x_c)) # find the parts that pass the commutative terms for a in args: margs, nc = a.args_cnc(cset=True, warn=bool(not self_c)) if nc is None: nc = [] if len(xargs) > len(margs): continue resid = margs.difference(xargs) if len(resid) + len(xargs) == len(margs): if one_c: co.append(Mul(*(list(resid) + nc))) else: co.append((resid, nc)) if one_c: if co == []: return S.Zero elif co: return Add(*co) else: # both nc # now check the non-comm parts if not co: return S.Zero if all(n == co[0][1] for r, n in co): ii = find(co[0][1], nx, right) if ii is not None: if not right: return Mul(Add(*[Mul(*r) for r, c in co]), Mul(*co[0][1][:ii])) else: return Mul(*co[0][1][ii + len(nx):]) beg = reduce(incommon, (n[1] for n in co)) if beg: ii = find(beg, nx, right) if ii is not None: if not right: gcdc = co[0][0] for i in range(1, len(co)): gcdc = gcdc.intersection(co[i][0]) if not gcdc: break return Mul(*(list(gcdc) + beg[:ii])) else: m = ii + len(nx) return Add(*[Mul(*(list(r) + n[m:])) for r, n in co]) end = list(reversed( reduce(incommon, (list(reversed(n[1])) for n in co)))) if end: ii = find(end, nx, right) if ii is not None: if not right: return Add(*[Mul(*(list(r) + n[:-len(end) + ii])) for r, n in co]) else: return Mul(*end[ii + len(nx):]) # look for single match hit = None for i, (r, n) in enumerate(co): ii = find(n, nx, right) if ii is not None: if not hit: hit = ii, r, n else: break else: if hit: ii, r, n = hit if not right: return Mul(*(list(r) + n[:ii])) else: return Mul(*n[ii + len(nx):]) return S.Zero def as_expr(self, *gens): """ Convert a polynomial to a SymPy expression. Examples ======== >>> from sympy import sin >>> from sympy.abc import x, y >>> f = (x**2 + x*y).as_poly(x, y) >>> f.as_expr() x**2 + x*y >>> sin(x).as_expr() sin(x) """ return self def as_coefficient(self, expr): """ Extracts symbolic coefficient at the given expression. In other words, this functions separates 'self' into the product of 'expr' and 'expr'-free coefficient. If such separation is not possible it will return None. Examples ======== >>> from sympy import E, pi, sin, I, Poly >>> from sympy.abc import x >>> E.as_coefficient(E) 1 >>> (2*E).as_coefficient(E) 2 >>> (2*sin(E)*E).as_coefficient(E) Two terms have E in them so a sum is returned. (If one were desiring the coefficient of the term exactly matching E then the constant from the returned expression could be selected. Or, for greater precision, a method of Poly can be used to indicate the desired term from which the coefficient is desired.) >>> (2*E + x*E).as_coefficient(E) x + 2 >>> _.args[0] # just want the exact match 2 >>> p = Poly(2*E + x*E); p Poly(x*E + 2*E, x, E, domain='ZZ') >>> p.coeff_monomial(E) 2 >>> p.nth(0, 1) 2 Since the following cannot be written as a product containing E as a factor, None is returned. (If the coefficient ``2*x`` is desired then the ``coeff`` method should be used.) >>> (2*E*x + x).as_coefficient(E) >>> (2*E*x + x).coeff(E) 2*x >>> (E*(x + 1) + x).as_coefficient(E) >>> (2*pi*I).as_coefficient(pi*I) 2 >>> (2*I).as_coefficient(pi*I) See Also ======== coeff: return sum of terms have a given factor as_coeff_Add: separate the additive constant from an expression as_coeff_Mul: separate the multiplicative constant from an expression as_independent: separate x-dependent terms/factors from others sympy.polys.polytools.Poly.coeff_monomial: efficiently find the single coefficient of a monomial in Poly sympy.polys.polytools.Poly.nth: like coeff_monomial but powers of monomial terms are used """ r = self.extract_multiplicatively(expr) if r and not r.has(expr): return r def as_independent(self, *deps, **hint): """ A mostly naive separation of a Mul or Add into arguments that are not are dependent on deps. To obtain as complete a separation of variables as possible, use a separation method first, e.g.: * separatevars() to change Mul, Add and Pow (including exp) into Mul * .expand(mul=True) to change Add or Mul into Add * .expand(log=True) to change log expr into an Add The only non-naive thing that is done here is to respect noncommutative ordering of variables and to always return (0, 0) for `self` of zero regardless of hints. For nonzero `self`, the returned tuple (i, d) has the following interpretation: * i will has no variable that appears in deps * d will either have terms that contain variables that are in deps, or be equal to 0 (when self is an Add) or 1 (when self is a Mul) * if self is an Add then self = i + d * if self is a Mul then self = i*d * otherwise (self, S.One) or (S.One, self) is returned. To force the expression to be treated as an Add, use the hint as_Add=True Examples ======== -- self is an Add >>> from sympy import sin, cos, exp >>> from sympy.abc import x, y, z >>> (x + x*y).as_independent(x) (0, x*y + x) >>> (x + x*y).as_independent(y) (x, x*y) >>> (2*x*sin(x) + y + x + z).as_independent(x) (y + z, 2*x*sin(x) + x) >>> (2*x*sin(x) + y + x + z).as_independent(x, y) (z, 2*x*sin(x) + x + y) -- self is a Mul >>> (x*sin(x)*cos(y)).as_independent(x) (cos(y), x*sin(x)) non-commutative terms cannot always be separated out when self is a Mul >>> from sympy import symbols >>> n1, n2, n3 = symbols('n1 n2 n3', commutative=False) >>> (n1 + n1*n2).as_independent(n2) (n1, n1*n2) >>> (n2*n1 + n1*n2).as_independent(n2) (0, n1*n2 + n2*n1) >>> (n1*n2*n3).as_independent(n1) (1, n1*n2*n3) >>> (n1*n2*n3).as_independent(n2) (n1, n2*n3) >>> ((x-n1)*(x-y)).as_independent(x) (1, (x - y)*(x - n1)) -- self is anything else: >>> (sin(x)).as_independent(x) (1, sin(x)) >>> (sin(x)).as_independent(y) (sin(x), 1) >>> exp(x+y).as_independent(x) (1, exp(x + y)) -- force self to be treated as an Add: >>> (3*x).as_independent(x, as_Add=True) (0, 3*x) -- force self to be treated as a Mul: >>> (3+x).as_independent(x, as_Add=False) (1, x + 3) >>> (-3+x).as_independent(x, as_Add=False) (1, x - 3) Note how the below differs from the above in making the constant on the dep term positive. >>> (y*(-3+x)).as_independent(x) (y, x - 3) -- use .as_independent() for true independence testing instead of .has(). The former considers only symbols in the free symbols while the latter considers all symbols >>> from sympy import Integral >>> I = Integral(x, (x, 1, 2)) >>> I.has(x) True >>> x in I.free_symbols False >>> I.as_independent(x) == (I, 1) True >>> (I + x).as_independent(x) == (I, x) True Note: when trying to get independent terms, a separation method might need to be used first. In this case, it is important to keep track of what you send to this routine so you know how to interpret the returned values >>> from sympy import separatevars, log >>> separatevars(exp(x+y)).as_independent(x) (exp(y), exp(x)) >>> (x + x*y).as_independent(y) (x, x*y) >>> separatevars(x + x*y).as_independent(y) (x, y + 1) >>> (x*(1 + y)).as_independent(y) (x, y + 1) >>> (x*(1 + y)).expand(mul=True).as_independent(y) (x, x*y) >>> a, b=symbols('a b', positive=True) >>> (log(a*b).expand(log=True)).as_independent(b) (log(a), log(b)) See Also ======== .separatevars(), .expand(log=True), sympy.core.add.Add.as_two_terms(), sympy.core.mul.Mul.as_two_terms(), .as_coeff_add(), .as_coeff_mul() """ from .symbol import Symbol from .add import _unevaluated_Add from .mul import _unevaluated_Mul from sympy.utilities.iterables import sift if self.is_zero: return S.Zero, S.Zero func = self.func if hint.get('as_Add', isinstance(self, Add) ): want = Add else: want = Mul # sift out deps into symbolic and other and ignore # all symbols but those that are in the free symbols sym = set() other = [] for d in deps: if isinstance(d, Symbol): # Symbol.is_Symbol is True sym.add(d) else: other.append(d) def has(e): """return the standard has() if there are no literal symbols, else check to see that symbol-deps are in the free symbols.""" has_other = e.has(*other) if not sym: return has_other return has_other or e.has(*(e.free_symbols & sym)) if (want is not func or func is not Add and func is not Mul): if has(self): return (want.identity, self) else: return (self, want.identity) else: if func is Add: args = list(self.args) else: args, nc = self.args_cnc() d = sift(args, lambda x: has(x)) depend = d[True] indep = d[False] if func is Add: # all terms were treated as commutative return (Add(*indep), _unevaluated_Add(*depend)) else: # handle noncommutative by stopping at first dependent term for i, n in enumerate(nc): if has(n): depend.extend(nc[i:]) break indep.append(n) return Mul(*indep), ( Mul(*depend, evaluate=False) if nc else _unevaluated_Mul(*depend)) def as_real_imag(self, deep=True, **hints): """Performs complex expansion on 'self' and returns a tuple containing collected both real and imaginary parts. This method can't be confused with re() and im() functions, which does not perform complex expansion at evaluation. However it is possible to expand both re() and im() functions and get exactly the same results as with a single call to this function. >>> from sympy import symbols, I >>> x, y = symbols('x,y', real=True) >>> (x + y*I).as_real_imag() (x, y) >>> from sympy.abc import z, w >>> (z + w*I).as_real_imag() (re(z) - im(w), re(w) + im(z)) """ from sympy import im, re if hints.get('ignore') == self: return None else: return (re(self), im(self)) def as_powers_dict(self): """Return self as a dictionary of factors with each factor being treated as a power. The keys are the bases of the factors and the values, the corresponding exponents. The resulting dictionary should be used with caution if the expression is a Mul and contains non- commutative factors since the order that they appeared will be lost in the dictionary. See Also ======== as_ordered_factors: An alternative for noncommutative applications, returning an ordered list of factors. args_cnc: Similar to as_ordered_factors, but guarantees separation of commutative and noncommutative factors. """ d = defaultdict(int) d.update(dict([self.as_base_exp()])) return d def as_coefficients_dict(self): """Return a dictionary mapping terms to their Rational coefficient. Since the dictionary is a defaultdict, inquiries about terms which were not present will return a coefficient of 0. If an expression is not an Add it is considered to have a single term. Examples ======== >>> from sympy.abc import a, x >>> (3*x + a*x + 4).as_coefficients_dict() {1: 4, x: 3, a*x: 1} >>> _[a] 0 >>> (3*a*x).as_coefficients_dict() {a*x: 3} """ c, m = self.as_coeff_Mul() if not c.is_Rational: c = S.One m = self d = defaultdict(int) d.update({m: c}) return d def as_base_exp(self): # a -> b ** e return self, S.One def as_coeff_mul(self, *deps, **kwargs): """Return the tuple (c, args) where self is written as a Mul, ``m``. c should be a Rational multiplied by any factors of the Mul that are independent of deps. args should be a tuple of all other factors of m; args is empty if self is a Number or if self is independent of deps (when given). This should be used when you don't know if self is a Mul or not but you want to treat self as a Mul or if you want to process the individual arguments of the tail of self as a Mul. - if you know self is a Mul and want only the head, use self.args[0]; - if you don't want to process the arguments of the tail but need the tail then use self.as_two_terms() which gives the head and tail; - if you want to split self into an independent and dependent parts use ``self.as_independent(*deps)`` >>> from sympy import S >>> from sympy.abc import x, y >>> (S(3)).as_coeff_mul() (3, ()) >>> (3*x*y).as_coeff_mul() (3, (x, y)) >>> (3*x*y).as_coeff_mul(x) (3*y, (x,)) >>> (3*y).as_coeff_mul(x) (3*y, ()) """ if deps: if not self.has(*deps): return self, tuple() return S.One, (self,) def as_coeff_add(self, *deps): """Return the tuple (c, args) where self is written as an Add, ``a``. c should be a Rational added to any terms of the Add that are independent of deps. args should be a tuple of all other terms of ``a``; args is empty if self is a Number or if self is independent of deps (when given). This should be used when you don't know if self is an Add or not but you want to treat self as an Add or if you want to process the individual arguments of the tail of self as an Add. - if you know self is an Add and want only the head, use self.args[0]; - if you don't want to process the arguments of the tail but need the tail then use self.as_two_terms() which gives the head and tail. - if you want to split self into an independent and dependent parts use ``self.as_independent(*deps)`` >>> from sympy import S >>> from sympy.abc import x, y >>> (S(3)).as_coeff_add() (3, ()) >>> (3 + x).as_coeff_add() (3, (x,)) >>> (3 + x + y).as_coeff_add(x) (y + 3, (x,)) >>> (3 + y).as_coeff_add(x) (y + 3, ()) """ if deps: if not self.has(*deps): return self, tuple() return S.Zero, (self,) def primitive(self): """Return the positive Rational that can be extracted non-recursively from every term of self (i.e., self is treated like an Add). This is like the as_coeff_Mul() method but primitive always extracts a positive Rational (never a negative or a Float). Examples ======== >>> from sympy.abc import x >>> (3*(x + 1)**2).primitive() (3, (x + 1)**2) >>> a = (6*x + 2); a.primitive() (2, 3*x + 1) >>> b = (x/2 + 3); b.primitive() (1/2, x + 6) >>> (a*b).primitive() == (1, a*b) True """ if not self: return S.One, S.Zero c, r = self.as_coeff_Mul(rational=True) if c.is_negative: c, r = -c, -r return c, r def as_content_primitive(self, radical=False, clear=True): """This method should recursively remove a Rational from all arguments and return that (content) and the new self (primitive). The content should always be positive and ``Mul(*foo.as_content_primitive()) == foo``. The primitive need not be in canonical form and should try to preserve the underlying structure if possible (i.e. expand_mul should not be applied to self). Examples ======== >>> from sympy import sqrt >>> from sympy.abc import x, y, z >>> eq = 2 + 2*x + 2*y*(3 + 3*y) The as_content_primitive function is recursive and retains structure: >>> eq.as_content_primitive() (2, x + 3*y*(y + 1) + 1) Integer powers will have Rationals extracted from the base: >>> ((2 + 6*x)**2).as_content_primitive() (4, (3*x + 1)**2) >>> ((2 + 6*x)**(2*y)).as_content_primitive() (1, (2*(3*x + 1))**(2*y)) Terms may end up joining once their as_content_primitives are added: >>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive() (11, x*(y + 1)) >>> ((3*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive() (9, x*(y + 1)) >>> ((3*(z*(1 + y)) + 2.0*x*(3 + 3*y))).as_content_primitive() (1, 6.0*x*(y + 1) + 3*z*(y + 1)) >>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive() (121, x**2*(y + 1)**2) >>> ((x*(1 + y) + 0.4*x*(3 + 3*y))**2).as_content_primitive() (1, 4.84*x**2*(y + 1)**2) Radical content can also be factored out of the primitive: >>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True) (2, sqrt(2)*(1 + 2*sqrt(5))) If clear=False (default is True) then content will not be removed from an Add if it can be distributed to leave one or more terms with integer coefficients. >>> (x/2 + y).as_content_primitive() (1/2, x + 2*y) >>> (x/2 + y).as_content_primitive(clear=False) (1, x/2 + y) """ return S.One, self def as_numer_denom(self): """ expression -> a/b -> a, b This is just a stub that should be defined by an object's class methods to get anything else. See Also ======== normal: return a/b instead of a, b """ return self, S.One def normal(self): from .mul import _unevaluated_Mul n, d = self.as_numer_denom() if d is S.One: return n if d.is_Number: return _unevaluated_Mul(n, 1/d) else: return n/d def extract_multiplicatively(self, c): """Return None if it's not possible to make self in the form c * something in a nice way, i.e. preserving the properties of arguments of self. Examples ======== >>> from sympy import symbols, Rational >>> x, y = symbols('x,y', real=True) >>> ((x*y)**3).extract_multiplicatively(x**2 * y) x*y**2 >>> ((x*y)**3).extract_multiplicatively(x**4 * y) >>> (2*x).extract_multiplicatively(2) x >>> (2*x).extract_multiplicatively(3) >>> (Rational(1, 2)*x).extract_multiplicatively(3) x/6 """ from .add import _unevaluated_Add c = sympify(c) if self is S.NaN: return None if c is S.One: return self elif c == self: return S.One if c.is_Add: cc, pc = c.primitive() if cc is not S.One: c = Mul(cc, pc, evaluate=False) if c.is_Mul: a, b = c.as_two_terms() x = self.extract_multiplicatively(a) if x is not None: return x.extract_multiplicatively(b) else: return x quotient = self / c if self.is_Number: if self is S.Infinity: if c.is_positive: return S.Infinity elif self is S.NegativeInfinity: if c.is_negative: return S.Infinity elif c.is_positive: return S.NegativeInfinity elif self is S.ComplexInfinity: if not c.is_zero: return S.ComplexInfinity elif self.is_Integer: if not quotient.is_Integer: return None elif self.is_positive and quotient.is_negative: return None else: return quotient elif self.is_Rational: if not quotient.is_Rational: return None elif self.is_positive and quotient.is_negative: return None else: return quotient elif self.is_Float: if not quotient.is_Float: return None elif self.is_positive and quotient.is_negative: return None else: return quotient elif self.is_NumberSymbol or self.is_Symbol or self is S.ImaginaryUnit: if quotient.is_Mul and len(quotient.args) == 2: if quotient.args[0].is_Integer and quotient.args[0].is_positive and quotient.args[1] == self: return quotient elif quotient.is_Integer and c.is_Number: return quotient elif self.is_Add: cs, ps = self.primitive() # assert cs >= 1 if c.is_Number and c is not S.NegativeOne: # assert c != 1 (handled at top) if cs is not S.One: if c.is_negative: xc = -(cs.extract_multiplicatively(-c)) else: xc = cs.extract_multiplicatively(c) if xc is not None: return xc*ps # rely on 2-arg Mul to restore Add return # |c| != 1 can only be extracted from cs if c == ps: return cs # check args of ps newargs = [] for arg in ps.args: newarg = arg.extract_multiplicatively(c) if newarg is None: return # all or nothing newargs.append(newarg) if cs is not S.One: args = [cs*t for t in newargs] # args may be in different order return _unevaluated_Add(*args) else: return Add._from_args(newargs) elif self.is_Mul: args = list(self.args) for i, arg in enumerate(args): newarg = arg.extract_multiplicatively(c) if newarg is not None: args[i] = newarg return Mul(*args) elif self.is_Pow: if c.is_Pow and c.base == self.base: new_exp = self.exp.extract_additively(c.exp) if new_exp is not None: return self.base ** (new_exp) elif c == self.base: new_exp = self.exp.extract_additively(1) if new_exp is not None: return self.base ** (new_exp) def extract_additively(self, c): """Return self - c if it's possible to subtract c from self and make all matching coefficients move towards zero, else return None. Examples ======== >>> from sympy.abc import x, y >>> e = 2*x + 3 >>> e.extract_additively(x + 1) x + 2 >>> e.extract_additively(3*x) >>> e.extract_additively(4) >>> (y*(x + 1)).extract_additively(x + 1) >>> ((x + 1)*(x + 2*y + 1) + 3).extract_additively(x + 1) (x + 1)*(x + 2*y) + 3 Sometimes auto-expansion will return a less simplified result than desired; gcd_terms might be used in such cases: >>> from sympy import gcd_terms >>> (4*x*(y + 1) + y).extract_additively(x) 4*x*(y + 1) + x*(4*y + 3) - x*(4*y + 4) + y >>> gcd_terms(_) x*(4*y + 3) + y See Also ======== extract_multiplicatively coeff as_coefficient """ c = sympify(c) if self is S.NaN: return None if c.is_zero: return self elif c == self: return S.Zero elif self == S.Zero: return None if self.is_Number: if not c.is_Number: return None co = self diff = co - c # XXX should we match types? i.e should 3 - .1 succeed? if (co > 0 and diff > 0 and diff < co or co < 0 and diff < 0 and diff > co): return diff return None if c.is_Number: co, t = self.as_coeff_Add() xa = co.extract_additively(c) if xa is None: return None return xa + t # handle the args[0].is_Number case separately # since we will have trouble looking for the coeff of # a number. if c.is_Add and c.args[0].is_Number: # whole term as a term factor co = self.coeff(c) xa0 = (co.extract_additively(1) or 0)*c if xa0: diff = self - co*c return (xa0 + (diff.extract_additively(c) or diff)) or None # term-wise h, t = c.as_coeff_Add() sh, st = self.as_coeff_Add() xa = sh.extract_additively(h) if xa is None: return None xa2 = st.extract_additively(t) if xa2 is None: return None return xa + xa2 # whole term as a term factor co = self.coeff(c) xa0 = (co.extract_additively(1) or 0)*c if xa0: diff = self - co*c return (xa0 + (diff.extract_additively(c) or diff)) or None # term-wise coeffs = [] for a in Add.make_args(c): ac, at = a.as_coeff_Mul() co = self.coeff(at) if not co: return None coc, cot = co.as_coeff_Add() xa = coc.extract_additively(ac) if xa is None: return None self -= co*at coeffs.append((cot + xa)*at) coeffs.append(self) return Add(*coeffs) @property def expr_free_symbols(self): """ Like ``free_symbols``, but returns the free symbols only if they are contained in an expression node. Examples ======== >>> from sympy.abc import x, y >>> (x + y).expr_free_symbols {x, y} If the expression is contained in a non-expression object, don't return the free symbols. Compare: >>> from sympy import Tuple >>> t = Tuple(x + y) >>> t.expr_free_symbols set() >>> t.free_symbols {x, y} """ return {j for i in self.args for j in i.expr_free_symbols} def could_extract_minus_sign(self): """Return True if self is not in a canonical form with respect to its sign. For most expressions, e, there will be a difference in e and -e. When there is, True will be returned for one and False for the other; False will be returned if there is no difference. Examples ======== >>> from sympy.abc import x, y >>> e = x - y >>> {i.could_extract_minus_sign() for i in (e, -e)} {False, True} """ negative_self = -self if self == negative_self: return False # e.g. zoo*x == -zoo*x self_has_minus = (self.extract_multiplicatively(-1) is not None) negative_self_has_minus = ( (negative_self).extract_multiplicatively(-1) is not None) if self_has_minus != negative_self_has_minus: return self_has_minus else: if self.is_Add: # We choose the one with less arguments with minus signs all_args = len(self.args) negative_args = len([False for arg in self.args if arg.could_extract_minus_sign()]) positive_args = all_args - negative_args if positive_args > negative_args: return False elif positive_args < negative_args: return True elif self.is_Mul: # We choose the one with an odd number of minus signs num, den = self.as_numer_denom() args = Mul.make_args(num) + Mul.make_args(den) arg_signs = [arg.could_extract_minus_sign() for arg in args] negative_args = list(filter(None, arg_signs)) return len(negative_args) % 2 == 1 # As a last resort, we choose the one with greater value of .sort_key() return bool(self.sort_key() < negative_self.sort_key()) def extract_branch_factor(self, allow_half=False): """ Try to write self as ``exp_polar(2*pi*I*n)*z`` in a nice way. Return (z, n). >>> from sympy import exp_polar, I, pi >>> from sympy.abc import x, y >>> exp_polar(I*pi).extract_branch_factor() (exp_polar(I*pi), 0) >>> exp_polar(2*I*pi).extract_branch_factor() (1, 1) >>> exp_polar(-pi*I).extract_branch_factor() (exp_polar(I*pi), -1) >>> exp_polar(3*pi*I + x).extract_branch_factor() (exp_polar(x + I*pi), 1) >>> (y*exp_polar(-5*pi*I)*exp_polar(3*pi*I + 2*pi*x)).extract_branch_factor() (y*exp_polar(2*pi*x), -1) >>> exp_polar(-I*pi/2).extract_branch_factor() (exp_polar(-I*pi/2), 0) If allow_half is True, also extract exp_polar(I*pi): >>> exp_polar(I*pi).extract_branch_factor(allow_half=True) (1, 1/2) >>> exp_polar(2*I*pi).extract_branch_factor(allow_half=True) (1, 1) >>> exp_polar(3*I*pi).extract_branch_factor(allow_half=True) (1, 3/2) >>> exp_polar(-I*pi).extract_branch_factor(allow_half=True) (1, -1/2) """ from sympy import exp_polar, pi, I, ceiling, Add n = S.Zero res = S.One args = Mul.make_args(self) exps = [] for arg in args: if isinstance(arg, exp_polar): exps += [arg.exp] else: res *= arg piimult = S.Zero extras = [] while exps: exp = exps.pop() if exp.is_Add: exps += exp.args continue if exp.is_Mul: coeff = exp.as_coefficient(pi*I) if coeff is not None: piimult += coeff continue extras += [exp] if piimult.is_number: coeff = piimult tail = () else: coeff, tail = piimult.as_coeff_add(*piimult.free_symbols) # round down to nearest multiple of 2 branchfact = ceiling(coeff/2 - S.Half)*2 n += branchfact/2 c = coeff - branchfact if allow_half: nc = c.extract_additively(1) if nc is not None: n += S.Half c = nc newexp = pi*I*Add(*((c, ) + tail)) + Add(*extras) if newexp != 0: res *= exp_polar(newexp) return res, n def _eval_is_polynomial(self, syms): if self.free_symbols.intersection(syms) == set(): return True return False def is_polynomial(self, *syms): r""" Return True if self is a polynomial in syms and False otherwise. This checks if self is an exact polynomial in syms. This function returns False for expressions that are "polynomials" with symbolic exponents. Thus, you should be able to apply polynomial algorithms to expressions for which this returns True, and Poly(expr, \*syms) should work if and only if expr.is_polynomial(\*syms) returns True. The polynomial does not have to be in expanded form. If no symbols are given, all free symbols in the expression will be used. This is not part of the assumptions system. You cannot do Symbol('z', polynomial=True). Examples ======== >>> from sympy import Symbol >>> x = Symbol('x') >>> ((x**2 + 1)**4).is_polynomial(x) True >>> ((x**2 + 1)**4).is_polynomial() True >>> (2**x + 1).is_polynomial(x) False >>> n = Symbol('n', nonnegative=True, integer=True) >>> (x**n + 1).is_polynomial(x) False This function does not attempt any nontrivial simplifications that may result in an expression that does not appear to be a polynomial to become one. >>> from sympy import sqrt, factor, cancel >>> y = Symbol('y', positive=True) >>> a = sqrt(y**2 + 2*y + 1) >>> a.is_polynomial(y) False >>> factor(a) y + 1 >>> factor(a).is_polynomial(y) True >>> b = (y**2 + 2*y + 1)/(y + 1) >>> b.is_polynomial(y) False >>> cancel(b) y + 1 >>> cancel(b).is_polynomial(y) True See also .is_rational_function() """ if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if syms.intersection(self.free_symbols) == set(): # constant polynomial return True else: return self._eval_is_polynomial(syms) def _eval_is_rational_function(self, syms): if self.free_symbols.intersection(syms) == set(): return True return False def is_rational_function(self, *syms): """ Test whether function is a ratio of two polynomials in the given symbols, syms. When syms is not given, all free symbols will be used. The rational function does not have to be in expanded or in any kind of canonical form. This function returns False for expressions that are "rational functions" with symbolic exponents. Thus, you should be able to call .as_numer_denom() and apply polynomial algorithms to the result for expressions for which this returns True. This is not part of the assumptions system. You cannot do Symbol('z', rational_function=True). Examples ======== >>> from sympy import Symbol, sin >>> from sympy.abc import x, y >>> (x/y).is_rational_function() True >>> (x**2).is_rational_function() True >>> (x/sin(y)).is_rational_function(y) False >>> n = Symbol('n', integer=True) >>> (x**n + 1).is_rational_function(x) False This function does not attempt any nontrivial simplifications that may result in an expression that does not appear to be a rational function to become one. >>> from sympy import sqrt, factor >>> y = Symbol('y', positive=True) >>> a = sqrt(y**2 + 2*y + 1)/y >>> a.is_rational_function(y) False >>> factor(a) (y + 1)/y >>> factor(a).is_rational_function(y) True See also is_algebraic_expr(). """ if self in [S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity]: return False if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if syms.intersection(self.free_symbols) == set(): # constant rational function return True else: return self._eval_is_rational_function(syms) def _eval_is_meromorphic(self, x, a): # Default implementation, return True for constants. return None if self.has(x) else True def is_meromorphic(self, x, a): """ This tests whether an expression is meromorphic as a function of the given symbol ``x`` at the point ``a``. This method is intended as a quick test that will return None if no decision can be made without simplification or more detailed analysis. Examples ======== >>> from sympy import zoo, log, sin, sqrt >>> from sympy.abc import x >>> f = 1/x**2 + 1 - 2*x**3 >>> f.is_meromorphic(x, 0) True >>> f.is_meromorphic(x, 1) True >>> f.is_meromorphic(x, zoo) True >>> g = x**log(3) >>> g.is_meromorphic(x, 0) False >>> g.is_meromorphic(x, 1) True >>> g.is_meromorphic(x, zoo) False >>> h = sin(1/x)*x**2 >>> h.is_meromorphic(x, 0) False >>> h.is_meromorphic(x, 1) True >>> h.is_meromorphic(x, zoo) True Multivalued functions are considered meromorphic when their branches are meromorphic. Thus most functions are meromorphic everywhere except at essential singularities and branch points. In particular, they will be meromorphic also on branch cuts except at their endpoints. >>> log(x).is_meromorphic(x, -1) True >>> log(x).is_meromorphic(x, 0) False >>> sqrt(x).is_meromorphic(x, -1) True >>> sqrt(x).is_meromorphic(x, 0) False """ if not x.is_symbol: raise TypeError("{} should be of symbol type".format(x)) a = sympify(a) return self._eval_is_meromorphic(x, a) def _eval_is_algebraic_expr(self, syms): if self.free_symbols.intersection(syms) == set(): return True return False def is_algebraic_expr(self, *syms): """ This tests whether a given expression is algebraic or not, in the given symbols, syms. When syms is not given, all free symbols will be used. The rational function does not have to be in expanded or in any kind of canonical form. This function returns False for expressions that are "algebraic expressions" with symbolic exponents. This is a simple extension to the is_rational_function, including rational exponentiation. Examples ======== >>> from sympy import Symbol, sqrt >>> x = Symbol('x', real=True) >>> sqrt(1 + x).is_rational_function() False >>> sqrt(1 + x).is_algebraic_expr() True This function does not attempt any nontrivial simplifications that may result in an expression that does not appear to be an algebraic expression to become one. >>> from sympy import exp, factor >>> a = sqrt(exp(x)**2 + 2*exp(x) + 1)/(exp(x) + 1) >>> a.is_algebraic_expr(x) False >>> factor(a).is_algebraic_expr() True See Also ======== is_rational_function() References ========== - https://en.wikipedia.org/wiki/Algebraic_expression """ if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if syms.intersection(self.free_symbols) == set(): # constant algebraic expression return True else: return self._eval_is_algebraic_expr(syms) ################################################################################### ##################### SERIES, LEADING TERM, LIMIT, ORDER METHODS ################## ################################################################################### def series(self, x=None, x0=0, n=6, dir="+", logx=None, cdir=0): """ Series expansion of "self" around ``x = x0`` yielding either terms of the series one by one (the lazy series given when n=None), else all the terms at once when n != None. Returns the series expansion of "self" around the point ``x = x0`` with respect to ``x`` up to ``O((x - x0)**n, x, x0)`` (default n is 6). If ``x=None`` and ``self`` is univariate, the univariate symbol will be supplied, otherwise an error will be raised. Parameters ========== expr : Expression The expression whose series is to be expanded. x : Symbol It is the variable of the expression to be calculated. x0 : Value The value around which ``x`` is calculated. Can be any value from ``-oo`` to ``oo``. n : Value The number of terms upto which the series is to be expanded. dir : String, optional The series-expansion can be bi-directional. If ``dir="+"``, then (x->x0+). If ``dir="-", then (x->x0-). For infinite ``x0`` (``oo`` or ``-oo``), the ``dir`` argument is determined from the direction of the infinity (i.e., ``dir="-"`` for ``oo``). logx : optional It is used to replace any log(x) in the returned series with a symbolic value rather than evaluating the actual value. cdir : optional It stands for complex direction, and indicates the direction from which the expansion needs to be evaluated. Examples ======== >>> from sympy import cos, exp, tan >>> from sympy.abc import x, y >>> cos(x).series() 1 - x**2/2 + x**4/24 + O(x**6) >>> cos(x).series(n=4) 1 - x**2/2 + O(x**4) >>> cos(x).series(x, x0=1, n=2) cos(1) - (x - 1)*sin(1) + O((x - 1)**2, (x, 1)) >>> e = cos(x + exp(y)) >>> e.series(y, n=2) cos(x + 1) - y*sin(x + 1) + O(y**2) >>> e.series(x, n=2) cos(exp(y)) - x*sin(exp(y)) + O(x**2) If ``n=None`` then a generator of the series terms will be returned. >>> term=cos(x).series(n=None) >>> [next(term) for i in range(2)] [1, -x**2/2] For ``dir=+`` (default) the series is calculated from the right and for ``dir=-`` the series from the left. For smooth functions this flag will not alter the results. >>> abs(x).series(dir="+") x >>> abs(x).series(dir="-") -x >>> f = tan(x) >>> f.series(x, 2, 6, "+") tan(2) + (1 + tan(2)**2)*(x - 2) + (x - 2)**2*(tan(2)**3 + tan(2)) + (x - 2)**3*(1/3 + 4*tan(2)**2/3 + tan(2)**4) + (x - 2)**4*(tan(2)**5 + 5*tan(2)**3/3 + 2*tan(2)/3) + (x - 2)**5*(2/15 + 17*tan(2)**2/15 + 2*tan(2)**4 + tan(2)**6) + O((x - 2)**6, (x, 2)) >>> f.series(x, 2, 3, "-") tan(2) + (2 - x)*(-tan(2)**2 - 1) + (2 - x)**2*(tan(2)**3 + tan(2)) + O((x - 2)**3, (x, 2)) Returns ======= Expr : Expression Series expansion of the expression about x0 Raises ====== TypeError If "n" and "x0" are infinity objects PoleError If "x0" is an infinity object """ from sympy import collect, Dummy, Order, Rational, Symbol, ceiling if x is None: syms = self.free_symbols if not syms: return self elif len(syms) > 1: raise ValueError('x must be given for multivariate functions.') x = syms.pop() if isinstance(x, Symbol): dep = x in self.free_symbols else: d = Dummy() dep = d in self.xreplace({x: d}).free_symbols if not dep: if n is None: return (s for s in [self]) else: return self if len(dir) != 1 or dir not in '+-': raise ValueError("Dir must be '+' or '-'") if x0 in [S.Infinity, S.NegativeInfinity]: sgn = 1 if x0 is S.Infinity else -1 s = self.subs(x, sgn/x).series(x, n=n, dir='+', cdir=cdir) if n is None: return (si.subs(x, sgn/x) for si in s) return s.subs(x, sgn/x) # use rep to shift origin to x0 and change sign (if dir is negative) # and undo the process with rep2 if x0 or dir == '-': if dir == '-': rep = -x + x0 rep2 = -x rep2b = x0 else: rep = x + x0 rep2 = x rep2b = -x0 s = self.subs(x, rep).series(x, x0=0, n=n, dir='+', logx=logx, cdir=cdir) if n is None: # lseries... return (si.subs(x, rep2 + rep2b) for si in s) return s.subs(x, rep2 + rep2b) # from here on it's x0=0 and dir='+' handling if x.is_positive is x.is_negative is None or x.is_Symbol is not True: # replace x with an x that has a positive assumption xpos = Dummy('x', positive=True, finite=True) rv = self.subs(x, xpos).series(xpos, x0, n, dir, logx=logx, cdir=cdir) if n is None: return (s.subs(xpos, x) for s in rv) else: return rv.subs(xpos, x) if n is not None: # nseries handling s1 = self._eval_nseries(x, n=n, logx=logx, cdir=cdir) o = s1.getO() or S.Zero if o: # make sure the requested order is returned ngot = o.getn() if ngot > n: # leave o in its current form (e.g. with x*log(x)) so # it eats terms properly, then replace it below if n != 0: s1 += o.subs(x, x**Rational(n, ngot)) else: s1 += Order(1, x) elif ngot < n: # increase the requested number of terms to get the desired # number keep increasing (up to 9) until the received order # is different than the original order and then predict how # many additional terms are needed for more in range(1, 9): s1 = self._eval_nseries(x, n=n + more, logx=logx, cdir=cdir) newn = s1.getn() if newn != ngot: ndo = n + ceiling((n - ngot)*more/(newn - ngot)) s1 = self._eval_nseries(x, n=ndo, logx=logx, cdir=cdir) while s1.getn() < n: s1 = self._eval_nseries(x, n=ndo, logx=logx, cdir=cdir) ndo += 1 break else: raise ValueError('Could not calculate %s terms for %s' % (str(n), self)) s1 += Order(x**n, x) o = s1.getO() s1 = s1.removeO() else: o = Order(x**n, x) s1done = s1.doit() if (s1done + o).removeO() == s1done: o = S.Zero try: return collect(s1, x) + o except NotImplementedError: return s1 + o else: # lseries handling def yield_lseries(s): """Return terms of lseries one at a time.""" for si in s: if not si.is_Add: yield si continue # yield terms 1 at a time if possible # by increasing order until all the # terms have been returned yielded = 0 o = Order(si, x)*x ndid = 0 ndo = len(si.args) while 1: do = (si - yielded + o).removeO() o *= x if not do or do.is_Order: continue if do.is_Add: ndid += len(do.args) else: ndid += 1 yield do if ndid == ndo: break yielded += do return yield_lseries(self.removeO()._eval_lseries(x, logx=logx, cdir=cdir)) def aseries(self, x=None, n=6, bound=0, hir=False): """Asymptotic Series expansion of self. This is equivalent to ``self.series(x, oo, n)``. Parameters ========== self : Expression The expression whose series is to be expanded. x : Symbol It is the variable of the expression to be calculated. n : Value The number of terms upto which the series is to be expanded. hir : Boolean Set this parameter to be True to produce hierarchical series. It stops the recursion at an early level and may provide nicer and more useful results. bound : Value, Integer Use the ``bound`` parameter to give limit on rewriting coefficients in its normalised form. Examples ======== >>> from sympy import sin, exp >>> from sympy.abc import x >>> e = sin(1/x + exp(-x)) - sin(1/x) >>> e.aseries(x) (1/(24*x**4) - 1/(2*x**2) + 1 + O(x**(-6), (x, oo)))*exp(-x) >>> e.aseries(x, n=3, hir=True) -exp(-2*x)*sin(1/x)/2 + exp(-x)*cos(1/x) + O(exp(-3*x), (x, oo)) >>> e = exp(exp(x)/(1 - 1/x)) >>> e.aseries(x) exp(exp(x)/(1 - 1/x)) >>> e.aseries(x, bound=3) exp(exp(x)/x**2)*exp(exp(x)/x)*exp(-exp(x) + exp(x)/(1 - 1/x) - exp(x)/x - exp(x)/x**2)*exp(exp(x)) Returns ======= Expr Asymptotic series expansion of the expression. Notes ===== This algorithm is directly induced from the limit computational algorithm provided by Gruntz. It majorly uses the mrv and rewrite sub-routines. The overall idea of this algorithm is first to look for the most rapidly varying subexpression w of a given expression f and then expands f in a series in w. Then same thing is recursively done on the leading coefficient till we get constant coefficients. If the most rapidly varying subexpression of a given expression f is f itself, the algorithm tries to find a normalised representation of the mrv set and rewrites f using this normalised representation. If the expansion contains an order term, it will be either ``O(x ** (-n))`` or ``O(w ** (-n))`` where ``w`` belongs to the most rapidly varying expression of ``self``. References ========== .. [1] A New Algorithm for Computing Asymptotic Series - Dominik Gruntz .. [2] Gruntz thesis - p90 .. [3] http://en.wikipedia.org/wiki/Asymptotic_expansion See Also ======== Expr.aseries: See the docstring of this function for complete details of this wrapper. """ from sympy import Order, Dummy from sympy.functions import exp, log from sympy.series.gruntz import mrv, rewrite if x.is_positive is x.is_negative is None: xpos = Dummy('x', positive=True) return self.subs(x, xpos).aseries(xpos, n, bound, hir).subs(xpos, x) om, exps = mrv(self, x) # We move one level up by replacing `x` by `exp(x)`, and then # computing the asymptotic series for f(exp(x)). Then asymptotic series # can be obtained by moving one-step back, by replacing x by ln(x). if x in om: s = self.subs(x, exp(x)).aseries(x, n, bound, hir).subs(x, log(x)) if s.getO(): return s + Order(1/x**n, (x, S.Infinity)) return s k = Dummy('k', positive=True) # f is rewritten in terms of omega func, logw = rewrite(exps, om, x, k) if self in om: if bound <= 0: return self s = (self.exp).aseries(x, n, bound=bound) s = s.func(*[t.removeO() for t in s.args]) res = exp(s.subs(x, 1/x).as_leading_term(x).subs(x, 1/x)) func = exp(self.args[0] - res.args[0]) / k logw = log(1/res) s = func.series(k, 0, n) # Hierarchical series if hir: return s.subs(k, exp(logw)) o = s.getO() terms = sorted(Add.make_args(s.removeO()), key=lambda i: int(i.as_coeff_exponent(k)[1])) s = S.Zero has_ord = False # Then we recursively expand these coefficients one by one into # their asymptotic series in terms of their most rapidly varying subexpressions. for t in terms: coeff, expo = t.as_coeff_exponent(k) if coeff.has(x): # Recursive step snew = coeff.aseries(x, n, bound=bound-1) if has_ord and snew.getO(): break elif snew.getO(): has_ord = True s += (snew * k**expo) else: s += t if not o or has_ord: return s.subs(k, exp(logw)) return (s + o).subs(k, exp(logw)) def taylor_term(self, n, x, *previous_terms): """General method for the taylor term. This method is slow, because it differentiates n-times. Subclasses can redefine it to make it faster by using the "previous_terms". """ from sympy import Dummy, factorial x = sympify(x) _x = Dummy('x') return self.subs(x, _x).diff(_x, n).subs(_x, x).subs(x, 0) * x**n / factorial(n) def lseries(self, x=None, x0=0, dir='+', logx=None, cdir=0): """ Wrapper for series yielding an iterator of the terms of the series. Note: an infinite series will yield an infinite iterator. The following, for exaxmple, will never terminate. It will just keep printing terms of the sin(x) series:: for term in sin(x).lseries(x): print term The advantage of lseries() over nseries() is that many times you are just interested in the next term in the series (i.e. the first term for example), but you don't know how many you should ask for in nseries() using the "n" parameter. See also nseries(). """ return self.series(x, x0, n=None, dir=dir, logx=logx, cdir=cdir) def _eval_lseries(self, x, logx=None, cdir=0): # default implementation of lseries is using nseries(), and adaptively # increasing the "n". As you can see, it is not very efficient, because # we are calculating the series over and over again. Subclasses should # override this method and implement much more efficient yielding of # terms. n = 0 series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir) while series.is_Order: n += 1 series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir) e = series.removeO() yield e if e is S.Zero: return while 1: while 1: n += 1 series = self._eval_nseries(x, n=n, logx=logx, cdir=cdir).removeO() if e != series: break if (series - self).cancel() is S.Zero: return yield series - e e = series def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0): """ Wrapper to _eval_nseries if assumptions allow, else to series. If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is called. This calculates "n" terms in the innermost expressions and then builds up the final series just by "cross-multiplying" everything out. The optional ``logx`` parameter can be used to replace any log(x) in the returned series with a symbolic value to avoid evaluating log(x) at 0. A symbol to use in place of log(x) should be provided. Advantage -- it's fast, because we don't have to determine how many terms we need to calculate in advance. Disadvantage -- you may end up with less terms than you may have expected, but the O(x**n) term appended will always be correct and so the result, though perhaps shorter, will also be correct. If any of those assumptions is not met, this is treated like a wrapper to series which will try harder to return the correct number of terms. See also lseries(). Examples ======== >>> from sympy import sin, log, Symbol >>> from sympy.abc import x, y >>> sin(x).nseries(x, 0, 6) x - x**3/6 + x**5/120 + O(x**6) >>> log(x+1).nseries(x, 0, 5) x - x**2/2 + x**3/3 - x**4/4 + O(x**5) Handling of the ``logx`` parameter --- in the following example the expansion fails since ``sin`` does not have an asymptotic expansion at -oo (the limit of log(x) as x approaches 0): >>> e = sin(log(x)) >>> e.nseries(x, 0, 6) Traceback (most recent call last): ... PoleError: ... ... >>> logx = Symbol('logx') >>> e.nseries(x, 0, 6, logx=logx) sin(logx) In the following example, the expansion works but gives only an Order term unless the ``logx`` parameter is used: >>> e = x**y >>> e.nseries(x, 0, 2) O(log(x)**2) >>> e.nseries(x, 0, 2, logx=logx) exp(logx*y) """ if x and not x in self.free_symbols: return self if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None): return self.series(x, x0, n, dir, cdir=cdir) else: return self._eval_nseries(x, n=n, logx=logx, cdir=cdir) def _eval_nseries(self, x, n, logx, cdir): """ Return terms of series for self up to O(x**n) at x=0 from the positive direction. This is a method that should be overridden in subclasses. Users should never call this method directly (use .nseries() instead), so you don't have to write docstrings for _eval_nseries(). """ from sympy.utilities.misc import filldedent raise NotImplementedError(filldedent(""" The _eval_nseries method should be added to %s to give terms up to O(x**n) at x=0 from the positive direction so it is available when nseries calls it.""" % self.func) ) def limit(self, x, xlim, dir='+'): """ Compute limit x->xlim. """ from sympy.series.limits import limit return limit(self, x, xlim, dir) def compute_leading_term(self, x, logx=None): """ as_leading_term is only allowed for results of .series() This is a wrapper to compute a series first. """ from sympy import Dummy, log, Piecewise, piecewise_fold from sympy.series.gruntz import calculate_series if self.has(Piecewise): expr = piecewise_fold(self) else: expr = self if self.removeO() == 0: return self if logx is None: d = Dummy('logx') s = calculate_series(expr, x, d).subs(d, log(x)) else: s = calculate_series(expr, x, logx) return s.as_leading_term(x) @cacheit def as_leading_term(self, *symbols, cdir=0): """ Returns the leading (nonzero) term of the series expansion of self. The _eval_as_leading_term routines are used to do this, and they must always return a non-zero value. Examples ======== >>> from sympy.abc import x >>> (1 + x + x**2).as_leading_term(x) 1 >>> (1/x**2 + x + x**2).as_leading_term(x) x**(-2) """ from sympy import powsimp if len(symbols) > 1: c = self for x in symbols: c = c.as_leading_term(x, cdir=cdir) return c elif not symbols: return self x = sympify(symbols[0]) if not x.is_symbol: raise ValueError('expecting a Symbol but got %s' % x) if x not in self.free_symbols: return self obj = self._eval_as_leading_term(x, cdir=cdir) if obj is not None: return powsimp(obj, deep=True, combine='exp') raise NotImplementedError('as_leading_term(%s, %s)' % (self, x)) def _eval_as_leading_term(self, x, cdir=0): return self def as_coeff_exponent(self, x): """ ``c*x**e -> c,e`` where x can be any symbolic expression. """ from sympy import collect s = collect(self, x) c, p = s.as_coeff_mul(x) if len(p) == 1: b, e = p[0].as_base_exp() if b == x: return c, e return s, S.Zero def leadterm(self, x, cdir=0): """ Returns the leading term a*x**b as a tuple (a, b). Examples ======== >>> from sympy.abc import x >>> (1+x+x**2).leadterm(x) (1, 0) >>> (1/x**2+x+x**2).leadterm(x) (1, -2) """ from sympy import Dummy, log l = self.as_leading_term(x, cdir=cdir) d = Dummy('logx') if l.has(log(x)): l = l.subs(log(x), d) c, e = l.as_coeff_exponent(x) if x in c.free_symbols: from sympy.utilities.misc import filldedent raise ValueError(filldedent(""" cannot compute leadterm(%s, %s). The coefficient should have been free of %s but got %s""" % (self, x, x, c))) c = c.subs(d, log(x)) return c, e def as_coeff_Mul(self, rational=False): """Efficiently extract the coefficient of a product. """ return S.One, self def as_coeff_Add(self, rational=False): """Efficiently extract the coefficient of a summation. """ return S.Zero, self def fps(self, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, full=False): """ Compute formal power power series of self. See the docstring of the :func:`fps` function in sympy.series.formal for more information. """ from sympy.series.formal import fps return fps(self, x, x0, dir, hyper, order, rational, full) def fourier_series(self, limits=None): """Compute fourier sine/cosine series of self. See the docstring of the :func:`fourier_series` in sympy.series.fourier for more information. """ from sympy.series.fourier import fourier_series return fourier_series(self, limits) ################################################################################### ##################### DERIVATIVE, INTEGRAL, FUNCTIONAL METHODS #################### ################################################################################### def diff(self, *symbols, **assumptions): assumptions.setdefault("evaluate", True) return _derivative_dispatch(self, *symbols, **assumptions) ########################################################################### ###################### EXPRESSION EXPANSION METHODS ####################### ########################################################################### # Relevant subclasses should override _eval_expand_hint() methods. See # the docstring of expand() for more info. def _eval_expand_complex(self, **hints): real, imag = self.as_real_imag(**hints) return real + S.ImaginaryUnit*imag @staticmethod def _expand_hint(expr, hint, deep=True, **hints): """ Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``. Returns ``(expr, hit)``, where expr is the (possibly) expanded ``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and ``False`` otherwise. """ hit = False # XXX: Hack to support non-Basic args # | # V if deep and getattr(expr, 'args', ()) and not expr.is_Atom: sargs = [] for arg in expr.args: arg, arghit = Expr._expand_hint(arg, hint, **hints) hit |= arghit sargs.append(arg) if hit: expr = expr.func(*sargs) if hasattr(expr, hint): newexpr = getattr(expr, hint)(**hints) if newexpr != expr: return (newexpr, True) return (expr, hit) @cacheit def expand(self, deep=True, modulus=None, power_base=True, power_exp=True, mul=True, log=True, multinomial=True, basic=True, **hints): """ Expand an expression using hints. See the docstring of the expand() function in sympy.core.function for more information. """ from sympy.simplify.radsimp import fraction hints.update(power_base=power_base, power_exp=power_exp, mul=mul, log=log, multinomial=multinomial, basic=basic) expr = self if hints.pop('frac', False): n, d = [a.expand(deep=deep, modulus=modulus, **hints) for a in fraction(self)] return n/d elif hints.pop('denom', False): n, d = fraction(self) return n/d.expand(deep=deep, modulus=modulus, **hints) elif hints.pop('numer', False): n, d = fraction(self) return n.expand(deep=deep, modulus=modulus, **hints)/d # Although the hints are sorted here, an earlier hint may get applied # at a given node in the expression tree before another because of how # the hints are applied. e.g. expand(log(x*(y + z))) -> log(x*y + # x*z) because while applying log at the top level, log and mul are # applied at the deeper level in the tree so that when the log at the # upper level gets applied, the mul has already been applied at the # lower level. # Additionally, because hints are only applied once, the expression # may not be expanded all the way. For example, if mul is applied # before multinomial, x*(x + 1)**2 won't be expanded all the way. For # now, we just use a special case to make multinomial run before mul, # so that at least polynomials will be expanded all the way. In the # future, smarter heuristics should be applied. # TODO: Smarter heuristics def _expand_hint_key(hint): """Make multinomial come before mul""" if hint == 'mul': return 'mulz' return hint for hint in sorted(hints.keys(), key=_expand_hint_key): use_hint = hints[hint] if use_hint: hint = '_eval_expand_' + hint expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints) while True: was = expr if hints.get('multinomial', False): expr, _ = Expr._expand_hint( expr, '_eval_expand_multinomial', deep=deep, **hints) if hints.get('mul', False): expr, _ = Expr._expand_hint( expr, '_eval_expand_mul', deep=deep, **hints) if hints.get('log', False): expr, _ = Expr._expand_hint( expr, '_eval_expand_log', deep=deep, **hints) if expr == was: break if modulus is not None: modulus = sympify(modulus) if not modulus.is_Integer or modulus <= 0: raise ValueError( "modulus must be a positive integer, got %s" % modulus) terms = [] for term in Add.make_args(expr): coeff, tail = term.as_coeff_Mul(rational=True) coeff %= modulus if coeff: terms.append(coeff*tail) expr = Add(*terms) return expr ########################################################################### ################### GLOBAL ACTION VERB WRAPPER METHODS #################### ########################################################################### def integrate(self, *args, **kwargs): """See the integrate function in sympy.integrals""" from sympy.integrals import integrate return integrate(self, *args, **kwargs) def nsimplify(self, constants=[], tolerance=None, full=False): """See the nsimplify function in sympy.simplify""" from sympy.simplify import nsimplify return nsimplify(self, constants, tolerance, full) def separate(self, deep=False, force=False): """See the separate function in sympy.simplify""" from sympy.core.function import expand_power_base return expand_power_base(self, deep=deep, force=force) def collect(self, syms, func=None, evaluate=True, exact=False, distribute_order_term=True): """See the collect function in sympy.simplify""" from sympy.simplify import collect return collect(self, syms, func, evaluate, exact, distribute_order_term) def together(self, *args, **kwargs): """See the together function in sympy.polys""" from sympy.polys import together return together(self, *args, **kwargs) def apart(self, x=None, **args): """See the apart function in sympy.polys""" from sympy.polys import apart return apart(self, x, **args) def ratsimp(self): """See the ratsimp function in sympy.simplify""" from sympy.simplify import ratsimp return ratsimp(self) def trigsimp(self, **args): """See the trigsimp function in sympy.simplify""" from sympy.simplify import trigsimp return trigsimp(self, **args) def radsimp(self, **kwargs): """See the radsimp function in sympy.simplify""" from sympy.simplify import radsimp return radsimp(self, **kwargs) def powsimp(self, *args, **kwargs): """See the powsimp function in sympy.simplify""" from sympy.simplify import powsimp return powsimp(self, *args, **kwargs) def combsimp(self): """See the combsimp function in sympy.simplify""" from sympy.simplify import combsimp return combsimp(self) def gammasimp(self): """See the gammasimp function in sympy.simplify""" from sympy.simplify import gammasimp return gammasimp(self) def factor(self, *gens, **args): """See the factor() function in sympy.polys.polytools""" from sympy.polys import factor return factor(self, *gens, **args) def refine(self, assumption=True): """See the refine function in sympy.assumptions""" from sympy.assumptions import refine return refine(self, assumption) def cancel(self, *gens, **args): """See the cancel function in sympy.polys""" from sympy.polys import cancel return cancel(self, *gens, **args) def invert(self, g, *gens, **args): """Return the multiplicative inverse of ``self`` mod ``g`` where ``self`` (and ``g``) may be symbolic expressions). See Also ======== sympy.core.numbers.mod_inverse, sympy.polys.polytools.invert """ from sympy.polys.polytools import invert from sympy.core.numbers import mod_inverse if self.is_number and getattr(g, 'is_number', True): return mod_inverse(self, g) return invert(self, g, *gens, **args) def round(self, n=None): """Return x rounded to the given decimal place. If a complex number would results, apply round to the real and imaginary components of the number. Examples ======== >>> from sympy import pi, E, I, S, Number >>> pi.round() 3 >>> pi.round(2) 3.14 >>> (2*pi + E*I).round() 6 + 3*I The round method has a chopping effect: >>> (2*pi + I/10).round() 6 >>> (pi/10 + 2*I).round() 2*I >>> (pi/10 + E*I).round(2) 0.31 + 2.72*I Notes ===== The Python ``round`` function uses the SymPy ``round`` method so it will always return a SymPy number (not a Python float or int): >>> isinstance(round(S(123), -2), Number) True """ from sympy.core.numbers import Float x = self if not x.is_number: raise TypeError("can't round symbolic expression") if not x.is_Atom: if not pure_complex(x.n(2), or_real=True): raise TypeError( 'Expected a number but got %s:' % func_name(x)) elif x in (S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity): return x if not x.is_extended_real: r, i = x.as_real_imag() return r.round(n) + S.ImaginaryUnit*i.round(n) if not x: return S.Zero if n is None else x p = as_int(n or 0) if x.is_Integer: return Integer(round(int(x), p)) digits_to_decimal = _mag(x) # _mag(12) = 2, _mag(.012) = -1 allow = digits_to_decimal + p precs = [f._prec for f in x.atoms(Float)] dps = prec_to_dps(max(precs)) if precs else None if dps is None: # assume everything is exact so use the Python # float default or whatever was requested dps = max(15, allow) else: allow = min(allow, dps) # this will shift all digits to right of decimal # and give us dps to work with as an int shift = -digits_to_decimal + dps extra = 1 # how far we look past known digits # NOTE # mpmath will calculate the binary representation to # an arbitrary number of digits but we must base our # answer on a finite number of those digits, e.g. # .575 2589569785738035/2**52 in binary. # mpmath shows us that the first 18 digits are # >>> Float(.575).n(18) # 0.574999999999999956 # The default precision is 15 digits and if we ask # for 15 we get # >>> Float(.575).n(15) # 0.575000000000000 # mpmath handles rounding at the 15th digit. But we # need to be careful since the user might be asking # for rounding at the last digit and our semantics # are to round toward the even final digit when there # is a tie. So the extra digit will be used to make # that decision. In this case, the value is the same # to 15 digits: # >>> Float(.575).n(16) # 0.5750000000000000 # Now converting this to the 15 known digits gives # 575000000000000.0 # which rounds to integer # 5750000000000000 # And now we can round to the desired digt, e.g. at # the second from the left and we get # 5800000000000000 # and rescaling that gives # 0.58 # as the final result. # If the value is made slightly less than 0.575 we might # still obtain the same value: # >>> Float(.575-1e-16).n(16)*10**15 # 574999999999999.8 # What 15 digits best represents the known digits (which are # to the left of the decimal? 5750000000000000, the same as # before. The only way we will round down (in this case) is # if we declared that we had more than 15 digits of precision. # For example, if we use 16 digits of precision, the integer # we deal with is # >>> Float(.575-1e-16).n(17)*10**16 # 5749999999999998.4 # and this now rounds to 5749999999999998 and (if we round to # the 2nd digit from the left) we get 5700000000000000. # xf = x.n(dps + extra)*Pow(10, shift) xi = Integer(xf) # use the last digit to select the value of xi # nearest to x before rounding at the desired digit sign = 1 if x > 0 else -1 dif2 = sign*(xf - xi).n(extra) if dif2 < 0: raise NotImplementedError( 'not expecting int(x) to round away from 0') if dif2 > .5: xi += sign # round away from 0 elif dif2 == .5: xi += sign if xi%2 else -sign # round toward even # shift p to the new position ip = p - shift # let Python handle the int rounding then rescale xr = round(xi.p, ip) # restore scale rv = Rational(xr, Pow(10, shift)) # return Float or Integer if rv.is_Integer: if n is None: # the single-arg case return rv # use str or else it won't be a float return Float(str(rv), dps) # keep same precision else: if not allow and rv > self: allow += 1 return Float(rv, allow) __round__ = round def _eval_derivative_matrix_lines(self, x): from sympy.matrices.expressions.matexpr import _LeftRightArgs return [_LeftRightArgs([S.One, S.One], higher=self._eval_derivative(x))] class AtomicExpr(Atom, Expr): """ A parent class for object which are both atoms and Exprs. For example: Symbol, Number, Rational, Integer, ... But not: Add, Mul, Pow, ... """ is_number = False is_Atom = True __slots__ = () def _eval_derivative(self, s): if self == s: return S.One return S.Zero def _eval_derivative_n_times(self, s, n): from sympy import Piecewise, Eq from sympy import Tuple, MatrixExpr from sympy.matrices.common import MatrixCommon if isinstance(s, (MatrixCommon, Tuple, Iterable, MatrixExpr)): return super()._eval_derivative_n_times(s, n) if self == s: return Piecewise((self, Eq(n, 0)), (1, Eq(n, 1)), (0, True)) else: return Piecewise((self, Eq(n, 0)), (0, True)) def _eval_is_polynomial(self, syms): return True def _eval_is_rational_function(self, syms): return True def _eval_is_meromorphic(self, x, a): from sympy.calculus.util import AccumBounds return (not self.is_Number or self.is_finite) and not isinstance(self, AccumBounds) def _eval_is_algebraic_expr(self, syms): return True def _eval_nseries(self, x, n, logx, cdir=0): return self @property def expr_free_symbols(self): return {self} def _mag(x): """Return integer ``i`` such that .1 <= x/10**i < 1 Examples ======== >>> from sympy.core.expr import _mag >>> from sympy import Float >>> _mag(Float(.1)) 0 >>> _mag(Float(.01)) -1 >>> _mag(Float(1234)) 4 """ from math import log10, ceil, log from sympy import Float xpos = abs(x.n()) if not xpos: return S.Zero try: mag_first_dig = int(ceil(log10(xpos))) except (ValueError, OverflowError): mag_first_dig = int(ceil(Float(mpf_log(xpos._mpf_, 53))/log(10))) # check that we aren't off by 1 if (xpos/10**mag_first_dig) >= 1: assert 1 <= (xpos/10**mag_first_dig) < 10 mag_first_dig += 1 return mag_first_dig class UnevaluatedExpr(Expr): """ Expression that is not evaluated unless released. Examples ======== >>> from sympy import UnevaluatedExpr >>> from sympy.abc import x >>> x*(1/x) 1 >>> x*UnevaluatedExpr(1/x) x*1/x """ def __new__(cls, arg, **kwargs): arg = _sympify(arg) obj = Expr.__new__(cls, arg, **kwargs) return obj def doit(self, **kwargs): if kwargs.get("deep", True): return self.args[0].doit(**kwargs) else: return self.args[0] def unchanged(func, *args): """Return True if `func` applied to the `args` is unchanged. Can be used instead of `assert foo == foo`. Examples ======== >>> from sympy import Piecewise, cos, pi >>> from sympy.core.expr import unchanged >>> from sympy.abc import x >>> unchanged(cos, 1) # instead of assert cos(1) == cos(1) True >>> unchanged(cos, pi) False Comparison of args uses the builtin capabilities of the object's arguments to test for equality so args can be defined loosely. Here, the ExprCondPair arguments of Piecewise compare as equal to the tuples that can be used to create the Piecewise: >>> unchanged(Piecewise, (x, x > 1), (0, True)) True """ f = func(*args) return f.func == func and f.args == args class ExprBuilder: def __init__(self, op, args=[], validator=None, check=True): if not hasattr(op, "__call__"): raise TypeError("op {} needs to be callable".format(op)) self.op = op self.args = args self.validator = validator if (validator is not None) and check: self.validate() @staticmethod def _build_args(args): return [i.build() if isinstance(i, ExprBuilder) else i for i in args] def validate(self): if self.validator is None: return args = self._build_args(self.args) self.validator(*args) def build(self, check=True): args = self._build_args(self.args) if self.validator and check: self.validator(*args) return self.op(*args) def append_argument(self, arg, check=True): self.args.append(arg) if self.validator and check: self.validate(*self.args) def __getitem__(self, item): if item == 0: return self.op else: return self.args[item-1] def __repr__(self): return str(self.build()) def search_element(self, elem): for i, arg in enumerate(self.args): if isinstance(arg, ExprBuilder): ret = arg.search_index(elem) if ret is not None: return (i,) + ret elif id(arg) == id(elem): return (i,) return None from .mul import Mul from .add import Add from .power import Pow from .function import Function, _derivative_dispatch from .mod import Mod from .exprtools import factor_terms from .numbers import Integer, Rational
45f93df161cc965b636d3698533e8879c76e159d1c4bf90cdbda6ac300449298
import numbers import decimal import fractions import math import re as regex import sys from .containers import Tuple from .sympify import (SympifyError, converter, sympify, _convert_numpy_types, _sympify, _is_numpy_instance) from .singleton import S, Singleton from .expr import Expr, AtomicExpr from .evalf import pure_complex from .decorators import _sympifyit from .cache import cacheit, clear_cache from .logic import fuzzy_not from sympy.core.compatibility import (as_int, HAS_GMPY, SYMPY_INTS, gmpy) from sympy.core.cache import lru_cache from sympy.multipledispatch import dispatch import mpmath import mpmath.libmp as mlib from mpmath.libmp import bitcount from mpmath.libmp.backend import MPZ from mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed from mpmath.ctx_mp import mpnumeric from mpmath.libmp.libmpf import ( finf as _mpf_inf, fninf as _mpf_ninf, fnan as _mpf_nan, fzero, _normalize as mpf_normalize, prec_to_dps) from sympy.utilities.misc import debug, filldedent from .parameters import global_parameters from sympy.utilities.exceptions import SymPyDeprecationWarning rnd = mlib.round_nearest _LOG2 = math.log(2) def comp(z1, z2, tol=None): """Return a bool indicating whether the error between z1 and z2 is <= tol. Examples ======== If ``tol`` is None then True will be returned if ``abs(z1 - z2)*10**p <= 5`` where ``p`` is minimum value of the decimal precision of each value. >>> from sympy.core.numbers import comp, pi >>> pi4 = pi.n(4); pi4 3.142 >>> comp(_, 3.142) True >>> comp(pi4, 3.141) False >>> comp(pi4, 3.143) False A comparison of strings will be made if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''. >>> comp(pi4, 3.1415) True >>> comp(pi4, 3.1415, '') False When ``tol`` is provided and ``z2`` is non-zero and ``|z1| > 1`` the error is normalized by ``|z1|``: >>> abs(pi4 - 3.14)/pi4 0.000509791731426756 >>> comp(pi4, 3.14, .001) # difference less than 0.1% True >>> comp(pi4, 3.14, .0005) # difference less than 0.1% False When ``|z1| <= 1`` the absolute error is used: >>> 1/pi4 0.3183 >>> abs(1/pi4 - 0.3183)/(1/pi4) 3.07371499106316e-5 >>> abs(1/pi4 - 0.3183) 9.78393554684764e-6 >>> comp(1/pi4, 0.3183, 1e-5) True To see if the absolute error between ``z1`` and ``z2`` is less than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)`` or ``comp(z1 - z2, tol=tol)``: >>> abs(pi4 - 3.14) 0.00160156249999988 >>> comp(pi4 - 3.14, 0, .002) True >>> comp(pi4 - 3.14, 0, .001) False """ if type(z2) is str: if not pure_complex(z1, or_real=True): raise ValueError('when z2 is a str z1 must be a Number') return str(z1) == z2 if not z1: z1, z2 = z2, z1 if not z1: return True if not tol: a, b = z1, z2 if tol == '': return str(a) == str(b) if tol is None: a, b = sympify(a), sympify(b) if not all(i.is_number for i in (a, b)): raise ValueError('expecting 2 numbers') fa = a.atoms(Float) fb = b.atoms(Float) if not fa and not fb: # no floats -- compare exactly return a == b # get a to be pure_complex for do in range(2): ca = pure_complex(a, or_real=True) if not ca: if fa: a = a.n(prec_to_dps(min([i._prec for i in fa]))) ca = pure_complex(a, or_real=True) break else: fa, fb = fb, fa a, b = b, a cb = pure_complex(b) if not cb and fb: b = b.n(prec_to_dps(min([i._prec for i in fb]))) cb = pure_complex(b, or_real=True) if ca and cb and (ca[1] or cb[1]): return all(comp(i, j) for i, j in zip(ca, cb)) tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec))) return int(abs(a - b)*tol) <= 5 diff = abs(z1 - z2) az1 = abs(z1) if z2 and az1 > 1: return diff/az1 <= tol else: return diff <= tol def mpf_norm(mpf, prec): """Return the mpf tuple normalized appropriately for the indicated precision after doing a check to see if zero should be returned or not when the mantissa is 0. ``mpf_normlize`` always assumes that this is zero, but it may not be since the mantissa for mpf's values "+inf", "-inf" and "nan" have a mantissa of zero, too. Note: this is not intended to validate a given mpf tuple, so sending mpf tuples that were not created by mpmath may produce bad results. This is only a wrapper to ``mpf_normalize`` which provides the check for non- zero mpfs that have a 0 for the mantissa. """ sign, man, expt, bc = mpf if not man: # hack for mpf_normalize which does not do this; # it assumes that if man is zero the result is 0 # (see issue 6639) if not bc: return fzero else: # don't change anything; this should already # be a well formed mpf tuple return mpf # Necessary if mpmath is using the gmpy backend from mpmath.libmp.backend import MPZ rv = mpf_normalize(sign, MPZ(man), expt, bc, prec, rnd) return rv # TODO: we should use the warnings module _errdict = {"divide": False} def seterr(divide=False): """ Should sympy raise an exception on 0/0 or return a nan? divide == True .... raise an exception divide == False ... return nan """ if _errdict["divide"] != divide: clear_cache() _errdict["divide"] = divide def _as_integer_ratio(p): neg_pow, man, expt, bc = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_) p = [1, -1][neg_pow % 2]*man if expt < 0: q = 2**-expt else: q = 1 p *= 2**expt return int(p), int(q) def _decimal_to_Rational_prec(dec): """Convert an ordinary decimal instance to a Rational.""" if not dec.is_finite(): raise TypeError("dec must be finite, got %s." % dec) s, d, e = dec.as_tuple() prec = len(d) if e >= 0: # it's an integer rv = Integer(int(dec)) else: s = (-1)**s d = sum([di*10**i for i, di in enumerate(reversed(d))]) rv = Rational(s*d, 10**-e) return rv, prec _floatpat = regex.compile(r"[-+]?((\d*\.\d+)|(\d+\.?))") def _literal_float(f): """Return True if n starts like a floating point number.""" return bool(_floatpat.match(f)) # (a,b) -> gcd(a,b) # TODO caching with decorator, but not to degrade performance @lru_cache(1024) def igcd(*args): """Computes nonnegative integer greatest common divisor. Explanation =========== The algorithm is based on the well known Euclid's algorithm. To improve speed, igcd() has its own caching mechanism implemented. Examples ======== >>> from sympy.core.numbers import igcd >>> igcd(2, 4) 2 >>> igcd(5, 10, 15) 5 """ if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1 a = args_temp.pop() if HAS_GMPY: # Using gmpy if present to speed up. for b in args_temp: a = gmpy.gcd(a, b) if b else a return as_int(a) for b in args_temp: a = math.gcd(a, b) return a igcd2 = math.gcd def igcd_lehmer(a, b): """Computes greatest common divisor of two integers. Explanation =========== Euclid's algorithm for the computation of the greatest common divisor gcd(a, b) of two (positive) integers a and b is based on the division identity a = q*b + r, where the quotient q and the remainder r are integers and 0 <= r < b. Then each common divisor of a and b divides r, and it follows that gcd(a, b) == gcd(b, r). The algorithm works by constructing the sequence r0, r1, r2, ..., where r0 = a, r1 = b, and each rn is the remainder from the division of the two preceding elements. In Python, q = a // b and r = a % b are obtained by the floor division and the remainder operations, respectively. These are the most expensive arithmetic operations, especially for large a and b. Lehmer's algorithm is based on the observation that the quotients qn = r(n-1) // rn are in general small integers even when a and b are very large. Hence the quotients can be usually determined from a relatively small number of most significant bits. The efficiency of the algorithm is further enhanced by not computing each long remainder in Euclid's sequence. The remainders are linear combinations of a and b with integer coefficients derived from the quotients. The coefficients can be computed as far as the quotients can be determined from the chosen most significant parts of a and b. Only then a new pair of consecutive remainders is computed and the algorithm starts anew with this pair. References ========== .. [1] https://en.wikipedia.org/wiki/Lehmer%27s_GCD_algorithm """ a, b = abs(as_int(a)), abs(as_int(b)) if a < b: a, b = b, a # The algorithm works by using one or two digit division # whenever possible. The outer loop will replace the # pair (a, b) with a pair of shorter consecutive elements # of the Euclidean gcd sequence until a and b # fit into two Python (long) int digits. nbits = 2*sys.int_info.bits_per_digit while a.bit_length() > nbits and b != 0: # Quotients are mostly small integers that can # be determined from most significant bits. n = a.bit_length() - nbits x, y = int(a >> n), int(b >> n) # most significant bits # Elements of the Euclidean gcd sequence are linear # combinations of a and b with integer coefficients. # Compute the coefficients of consecutive pairs # a' = A*a + B*b, b' = C*a + D*b # using small integer arithmetic as far as possible. A, B, C, D = 1, 0, 0, 1 # initial values while True: # The coefficients alternate in sign while looping. # The inner loop combines two steps to keep track # of the signs. # At this point we have # A > 0, B <= 0, C <= 0, D > 0, # x' = x + B <= x < x" = x + A, # y' = y + C <= y < y" = y + D, # and # x'*N <= a' < x"*N, y'*N <= b' < y"*N, # where N = 2**n. # Now, if y' > 0, and x"//y' and x'//y" agree, # then their common value is equal to q = a'//b'. # In addition, # x'%y" = x' - q*y" < x" - q*y' = x"%y', # and # (x'%y")*N < a'%b' < (x"%y')*N. # On the other hand, we also have x//y == q, # and therefore # x'%y" = x + B - q*(y + D) = x%y + B', # x"%y' = x + A - q*(y + C) = x%y + A', # where # B' = B - q*D < 0, A' = A - q*C > 0. if y + C <= 0: break q = (x + A) // (y + C) # Now x'//y" <= q, and equality holds if # x' - q*y" = (x - q*y) + (B - q*D) >= 0. # This is a minor optimization to avoid division. x_qy, B_qD = x - q*y, B - q*D if x_qy + B_qD < 0: break # Next step in the Euclidean sequence. x, y = y, x_qy A, B, C, D = C, D, A - q*C, B_qD # At this point the signs of the coefficients # change and their roles are interchanged. # A <= 0, B > 0, C > 0, D < 0, # x' = x + A <= x < x" = x + B, # y' = y + D < y < y" = y + C. if y + D <= 0: break q = (x + B) // (y + D) x_qy, A_qC = x - q*y, A - q*C if x_qy + A_qC < 0: break x, y = y, x_qy A, B, C, D = C, D, A_qC, B - q*D # Now the conditions on top of the loop # are again satisfied. # A > 0, B < 0, C < 0, D > 0. if B == 0: # This can only happen when y == 0 in the beginning # and the inner loop does nothing. # Long division is forced. a, b = b, a % b continue # Compute new long arguments using the coefficients. a, b = A*a + B*b, C*a + D*b # Small divisors. Finish with the standard algorithm. while b: a, b = b, a % b return a def ilcm(*args): """Computes integer least common multiple. Examples ======== >>> from sympy.core.numbers import ilcm >>> ilcm(5, 10) 10 >>> ilcm(7, 3) 21 >>> ilcm(5, 10, 15) 30 """ if len(args) < 2: raise TypeError( 'ilcm() takes at least 2 arguments (%s given)' % len(args)) if 0 in args: return 0 a = args[0] for b in args[1:]: a = a // igcd(a, b) * b # since gcd(a,b) | a return a def igcdex(a, b): """Returns x, y, g such that g = x*a + y*b = gcd(a, b). Examples ======== >>> from sympy.core.numbers import igcdex >>> igcdex(2, 3) (-1, 1, 1) >>> igcdex(10, 12) (-1, 1, 2) >>> x, y, g = igcdex(100, 2004) >>> x, y, g (-20, 1, 4) >>> x*100 + y*2004 4 """ if (not a) and (not b): return (0, 1, 0) if not a: return (0, b//abs(b), abs(b)) if not b: return (a//abs(a), 0, abs(a)) if a < 0: a, x_sign = -a, -1 else: x_sign = 1 if b < 0: b, y_sign = -b, -1 else: y_sign = 1 x, y, r, s = 1, 0, 0, 1 while b: (c, q) = (a % b, a // b) (a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s) return (x*x_sign, y*y_sign, a) def mod_inverse(a, m): """ Return the number c such that, (a * c) = 1 (mod m) where c has the same sign as m. If no such value exists, a ValueError is raised. Examples ======== >>> from sympy import S >>> from sympy.core.numbers import mod_inverse Suppose we wish to find multiplicative inverse x of 3 modulo 11. This is the same as finding x such that 3 * x = 1 (mod 11). One value of x that satisfies this congruence is 4. Because 3 * 4 = 12 and 12 = 1 (mod 11). This is the value returned by mod_inverse: >>> mod_inverse(3, 11) 4 >>> mod_inverse(-3, 11) 7 When there is a common factor between the numerators of ``a`` and ``m`` the inverse does not exist: >>> mod_inverse(2, 4) Traceback (most recent call last): ... ValueError: inverse of 2 mod 4 does not exist >>> mod_inverse(S(2)/7, S(5)/2) 7/2 References ========== .. [1] https://en.wikipedia.org/wiki/Modular_multiplicative_inverse .. [2] https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm """ c = None try: a, m = as_int(a), as_int(m) if m != 1 and m != -1: x, y, g = igcdex(a, m) if g == 1: c = x % m except ValueError: a, m = sympify(a), sympify(m) if not (a.is_number and m.is_number): raise TypeError(filldedent(''' Expected numbers for arguments; symbolic `mod_inverse` is not implemented but symbolic expressions can be handled with the similar function, sympy.polys.polytools.invert''')) big = (m > 1) if not (big is S.true or big is S.false): raise ValueError('m > 1 did not evaluate; try to simplify %s' % m) elif big: c = 1/a if c is None: raise ValueError('inverse of %s (mod %s) does not exist' % (a, m)) return c class Number(AtomicExpr): """Represents atomic numbers in SymPy. Explanation =========== Floating point numbers are represented by the Float class. Rational numbers (of any size) are represented by the Rational class. Integer numbers (of any size) are represented by the Integer class. Float and Rational are subclasses of Number; Integer is a subclass of Rational. For example, ``2/3`` is represented as ``Rational(2, 3)`` which is a different object from the floating point number obtained with Python division ``2/3``. Even for numbers that are exactly represented in binary, there is a difference between how two forms, such as ``Rational(1, 2)`` and ``Float(0.5)``, are used in SymPy. The rational form is to be preferred in symbolic computations. Other kinds of numbers, such as algebraic numbers ``sqrt(2)`` or complex numbers ``3 + 4*I``, are not instances of Number class as they are not atomic. See Also ======== Float, Integer, Rational """ is_commutative = True is_number = True is_Number = True __slots__ = () # Used to make max(x._prec, y._prec) return x._prec when only x is a float _prec = -1 def __new__(cls, *obj): if len(obj) == 1: obj = obj[0] if isinstance(obj, Number): return obj if isinstance(obj, SYMPY_INTS): return Integer(obj) if isinstance(obj, tuple) and len(obj) == 2: return Rational(*obj) if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)): return Float(obj) if isinstance(obj, str): _obj = obj.lower() # float('INF') == float('inf') if _obj == 'nan': return S.NaN elif _obj == 'inf': return S.Infinity elif _obj == '+inf': return S.Infinity elif _obj == '-inf': return S.NegativeInfinity val = sympify(obj) if isinstance(val, Number): return val else: raise ValueError('String "%s" does not denote a Number' % obj) msg = "expected str|int|long|float|Decimal|Number object but got %r" raise TypeError(msg % type(obj).__name__) def invert(self, other, *gens, **args): from sympy.polys.polytools import invert if getattr(other, 'is_number', True): return mod_inverse(self, other) return invert(self, other, *gens, **args) def __divmod__(self, other): from .containers import Tuple from sympy.functions.elementary.complexes import sign try: other = Number(other) if self.is_infinite or S.NaN in (self, other): return (S.NaN, S.NaN) except TypeError: return NotImplemented if not other: raise ZeroDivisionError('modulo by zero') if self.is_Integer and other.is_Integer: return Tuple(*divmod(self.p, other.p)) elif isinstance(other, Float): rat = self/Rational(other) else: rat = self/other if other.is_finite: w = int(rat) if rat >= 0 else int(rat) - 1 r = self - other*w else: w = 0 if not self or (sign(self) == sign(other)) else -1 r = other if w else self return Tuple(w, r) def __rdivmod__(self, other): try: other = Number(other) except TypeError: return NotImplemented return divmod(other, self) def _as_mpf_val(self, prec): """Evaluation of mpf tuple accurate to at least prec bits.""" raise NotImplementedError('%s needs ._as_mpf_val() method' % (self.__class__.__name__)) def _eval_evalf(self, prec): return Float._new(self._as_mpf_val(prec), prec) def _as_mpf_op(self, prec): prec = max(prec, self._prec) return self._as_mpf_val(prec), prec def __float__(self): return mlib.to_float(self._as_mpf_val(53)) def floor(self): raise NotImplementedError('%s needs .floor() method' % (self.__class__.__name__)) def ceiling(self): raise NotImplementedError('%s needs .ceiling() method' % (self.__class__.__name__)) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() def _eval_conjugate(self): return self def _eval_order(self, *symbols): from sympy import Order # Order(5, x, y) -> Order(1,x,y) return Order(S.One, *symbols) def _eval_subs(self, old, new): if old == -self: return -new return self # there is no other possibility def _eval_is_finite(self): return True @classmethod def class_key(cls): return 1, 0, 'Number' @cacheit def sort_key(self, order=None): return self.class_key(), (0, ()), (), self @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity: return S.Infinity elif other is S.NegativeInfinity: return S.NegativeInfinity return AtomicExpr.__add__(self, other) @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity: return S.NegativeInfinity elif other is S.NegativeInfinity: return S.Infinity return AtomicExpr.__sub__(self, other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity: if self.is_zero: return S.NaN elif self.is_positive: return S.Infinity else: return S.NegativeInfinity elif other is S.NegativeInfinity: if self.is_zero: return S.NaN elif self.is_positive: return S.NegativeInfinity else: return S.Infinity elif isinstance(other, Tuple): return NotImplemented return AtomicExpr.__mul__(self, other) @_sympifyit('other', NotImplemented) def __truediv__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NaN: return S.NaN elif other is S.Infinity or other is S.NegativeInfinity: return S.Zero return AtomicExpr.__truediv__(self, other) def __eq__(self, other): raise NotImplementedError('%s needs .__eq__() method' % (self.__class__.__name__)) def __ne__(self, other): raise NotImplementedError('%s needs .__ne__() method' % (self.__class__.__name__)) def __lt__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s < %s" % (self, other)) raise NotImplementedError('%s needs .__lt__() method' % (self.__class__.__name__)) def __le__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s <= %s" % (self, other)) raise NotImplementedError('%s needs .__le__() method' % (self.__class__.__name__)) def __gt__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s > %s" % (self, other)) return _sympify(other).__lt__(self) def __ge__(self, other): try: other = _sympify(other) except SympifyError: raise TypeError("Invalid comparison %s >= %s" % (self, other)) return _sympify(other).__le__(self) def __hash__(self): return super().__hash__() def is_constant(self, *wrt, **flags): return True def as_coeff_mul(self, *deps, rational=True, **kwargs): # a -> c*t if self.is_Rational or not rational: return self, tuple() elif self.is_negative: return S.NegativeOne, (-self,) return S.One, (self,) def as_coeff_add(self, *deps): # a -> c + t if self.is_Rational: return self, tuple() return S.Zero, (self,) def as_coeff_Mul(self, rational=False): """Efficiently extract the coefficient of a product. """ if rational and not self.is_Rational: return S.One, self return (self, S.One) if self else (S.One, self) def as_coeff_Add(self, rational=False): """Efficiently extract the coefficient of a summation. """ if not rational: return self, S.Zero return S.Zero, self def gcd(self, other): """Compute GCD of `self` and `other`. """ from sympy.polys import gcd return gcd(self, other) def lcm(self, other): """Compute LCM of `self` and `other`. """ from sympy.polys import lcm return lcm(self, other) def cofactors(self, other): """Compute GCD and cofactors of `self` and `other`. """ from sympy.polys import cofactors return cofactors(self, other) class Float(Number): """Represent a floating-point number of arbitrary precision. Examples ======== >>> from sympy import Float >>> Float(3.5) 3.50000000000000 >>> Float(3) 3.00000000000000 Creating Floats from strings (and Python ``int`` and ``long`` types) will give a minimum precision of 15 digits, but the precision will automatically increase to capture all digits entered. >>> Float(1) 1.00000000000000 >>> Float(10**20) 100000000000000000000. >>> Float('1e20') 100000000000000000000. However, *floating-point* numbers (Python ``float`` types) retain only 15 digits of precision: >>> Float(1e20) 1.00000000000000e+20 >>> Float(1.23456789123456789) 1.23456789123457 It may be preferable to enter high-precision decimal numbers as strings: >>> Float('1.23456789123456789') 1.23456789123456789 The desired number of digits can also be specified: >>> Float('1e-3', 3) 0.00100 >>> Float(100, 4) 100.0 Float can automatically count significant figures if a null string is sent for the precision; spaces or underscores are also allowed. (Auto- counting is only allowed for strings, ints and longs). >>> Float('123 456 789.123_456', '') 123456789.123456 >>> Float('12e-3', '') 0.012 >>> Float(3, '') 3. If a number is written in scientific notation, only the digits before the exponent are considered significant if a decimal appears, otherwise the "e" signifies only how to move the decimal: >>> Float('60.e2', '') # 2 digits significant 6.0e+3 >>> Float('60e2', '') # 4 digits significant 6000. >>> Float('600e-2', '') # 3 digits significant 6.00 Notes ===== Floats are inexact by their nature unless their value is a binary-exact value. >>> approx, exact = Float(.1, 1), Float(.125, 1) For calculation purposes, evalf needs to be able to change the precision but this will not increase the accuracy of the inexact value. The following is the most accurate 5-digit approximation of a value of 0.1 that had only 1 digit of precision: >>> approx.evalf(5) 0.099609 By contrast, 0.125 is exact in binary (as it is in base 10) and so it can be passed to Float or evalf to obtain an arbitrary precision with matching accuracy: >>> Float(exact, 5) 0.12500 >>> exact.evalf(20) 0.12500000000000000000 Trying to make a high-precision Float from a float is not disallowed, but one must keep in mind that the *underlying float* (not the apparent decimal value) is being obtained with high precision. For example, 0.3 does not have a finite binary representation. The closest rational is the fraction 5404319552844595/2**54. So if you try to obtain a Float of 0.3 to 20 digits of precision you will not see the same thing as 0.3 followed by 19 zeros: >>> Float(0.3, 20) 0.29999999999999998890 If you want a 20-digit value of the decimal 0.3 (not the floating point approximation of 0.3) you should send the 0.3 as a string. The underlying representation is still binary but a higher precision than Python's float is used: >>> Float('0.3', 20) 0.30000000000000000000 Although you can increase the precision of an existing Float using Float it will not increase the accuracy -- the underlying value is not changed: >>> def show(f): # binary rep of Float ... from sympy import Mul, Pow ... s, m, e, b = f._mpf_ ... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False) ... print('%s at prec=%s' % (v, f._prec)) ... >>> t = Float('0.3', 3) >>> show(t) 4915/2**14 at prec=13 >>> show(Float(t, 20)) # higher prec, not higher accuracy 4915/2**14 at prec=70 >>> show(Float(t, 2)) # lower prec 307/2**10 at prec=10 The same thing happens when evalf is used on a Float: >>> show(t.evalf(20)) 4915/2**14 at prec=70 >>> show(t.evalf(2)) 307/2**10 at prec=10 Finally, Floats can be instantiated with an mpf tuple (n, c, p) to produce the number (-1)**n*c*2**p: >>> n, c, p = 1, 5, 0 >>> (-1)**n*c*2**p -5 >>> Float((1, 5, 0)) -5.00000000000000 An actual mpf tuple also contains the number of bits in c as the last element of the tuple: >>> _._mpf_ (1, 5, 0, 3) This is not needed for instantiation and is not the same thing as the precision. The mpf tuple and the precision are two separate quantities that Float tracks. In SymPy, a Float is a number that can be computed with arbitrary precision. Although floating point 'inf' and 'nan' are not such numbers, Float can create these numbers: >>> Float('-inf') -oo >>> _.is_Float False """ __slots__ = ('_mpf_', '_prec') # A Float represents many real numbers, # both rational and irrational. is_rational = None is_irrational = None is_number = True is_real = True is_extended_real = True is_Float = True def __new__(cls, num, dps=None, prec=None, precision=None): if prec is not None: SymPyDeprecationWarning( feature="Using 'prec=XX' to denote decimal precision", useinstead="'dps=XX' for decimal precision and 'precision=XX' "\ "for binary precision", issue=12820, deprecated_since_version="1.1").warn() dps = prec del prec # avoid using this deprecated kwarg if dps is not None and precision is not None: raise ValueError('Both decimal and binary precision supplied. ' 'Supply only one. ') if isinstance(num, str): # Float accepts spaces as digit separators num = num.replace(' ', '').lower() # in Py 3.6 # underscores are allowed. In anticipation of that, we ignore # legally placed underscores if '_' in num: parts = num.split('_') if not (all(parts) and all(parts[i][-1].isdigit() for i in range(0, len(parts), 2)) and all(parts[i][0].isdigit() for i in range(1, len(parts), 2))): # copy Py 3.6 error raise ValueError("could not convert string to float: '%s'" % num) num = ''.join(parts) if num.startswith('.') and len(num) > 1: num = '0' + num elif num.startswith('-.') and len(num) > 2: num = '-0.' + num[2:] elif num in ('inf', '+inf'): return S.Infinity elif num == '-inf': return S.NegativeInfinity elif isinstance(num, float) and num == 0: num = '0' elif isinstance(num, float) and num == float('inf'): return S.Infinity elif isinstance(num, float) and num == float('-inf'): return S.NegativeInfinity elif isinstance(num, float) and num == float('nan'): return S.NaN elif isinstance(num, (SYMPY_INTS, Integer)): num = str(num) elif num is S.Infinity: return num elif num is S.NegativeInfinity: return num elif num is S.NaN: return num elif _is_numpy_instance(num): # support for numpy datatypes num = _convert_numpy_types(num) elif isinstance(num, mpmath.mpf): if precision is None: if dps is None: precision = num.context.prec num = num._mpf_ if dps is None and precision is None: dps = 15 if isinstance(num, Float): return num if isinstance(num, str) and _literal_float(num): try: Num = decimal.Decimal(num) except decimal.InvalidOperation: pass else: isint = '.' not in num num, dps = _decimal_to_Rational_prec(Num) if num.is_Integer and isint: dps = max(dps, len(str(num).lstrip('-'))) dps = max(15, dps) precision = mlib.libmpf.dps_to_prec(dps) elif precision == '' and dps is None or precision is None and dps == '': if not isinstance(num, str): raise ValueError('The null string can only be used when ' 'the number to Float is passed as a string or an integer.') ok = None if _literal_float(num): try: Num = decimal.Decimal(num) except decimal.InvalidOperation: pass else: isint = '.' not in num num, dps = _decimal_to_Rational_prec(Num) if num.is_Integer and isint: dps = max(dps, len(str(num).lstrip('-'))) precision = mlib.libmpf.dps_to_prec(dps) ok = True if ok is None: raise ValueError('string-float not recognized: %s' % num) # decimal precision(dps) is set and maybe binary precision(precision) # as well.From here on binary precision is used to compute the Float. # Hence, if supplied use binary precision else translate from decimal # precision. if precision is None or precision == '': precision = mlib.libmpf.dps_to_prec(dps) precision = int(precision) if isinstance(num, float): _mpf_ = mlib.from_float(num, precision, rnd) elif isinstance(num, str): _mpf_ = mlib.from_str(num, precision, rnd) elif isinstance(num, decimal.Decimal): if num.is_finite(): _mpf_ = mlib.from_str(str(num), precision, rnd) elif num.is_nan(): return S.NaN elif num.is_infinite(): if num > 0: return S.Infinity return S.NegativeInfinity else: raise ValueError("unexpected decimal value %s" % str(num)) elif isinstance(num, tuple) and len(num) in (3, 4): if type(num[1]) is str: # it's a hexadecimal (coming from a pickled object) # assume that it is in standard form num = list(num) # If we're loading an object pickled in Python 2 into # Python 3, we may need to strip a tailing 'L' because # of a shim for int on Python 3, see issue #13470. if num[1].endswith('L'): num[1] = num[1][:-1] num[1] = MPZ(num[1], 16) _mpf_ = tuple(num) else: if len(num) == 4: # handle normalization hack return Float._new(num, precision) else: if not all(( num[0] in (0, 1), num[1] >= 0, all(type(i) in (int, int) for i in num) )): raise ValueError('malformed mpf: %s' % (num,)) # don't compute number or else it may # over/underflow return Float._new( (num[0], num[1], num[2], bitcount(num[1])), precision) else: try: _mpf_ = num._as_mpf_val(precision) except (NotImplementedError, AttributeError): _mpf_ = mpmath.mpf(num, prec=precision)._mpf_ return cls._new(_mpf_, precision, zero=False) @classmethod def _new(cls, _mpf_, _prec, zero=True): # special cases if zero and _mpf_ == fzero: return S.Zero # Float(0) -> 0.0; Float._new((0,0,0,0)) -> 0 elif _mpf_ == _mpf_nan: return S.NaN elif _mpf_ == _mpf_inf: return S.Infinity elif _mpf_ == _mpf_ninf: return S.NegativeInfinity obj = Expr.__new__(cls) obj._mpf_ = mpf_norm(_mpf_, _prec) obj._prec = _prec return obj # mpz can't be pickled def __getnewargs__(self): return (mlib.to_pickable(self._mpf_),) def __getstate__(self): return {'_prec': self._prec} def _hashable_content(self): return (self._mpf_, self._prec) def floor(self): return Integer(int(mlib.to_int( mlib.mpf_floor(self._mpf_, self._prec)))) def ceiling(self): return Integer(int(mlib.to_int( mlib.mpf_ceil(self._mpf_, self._prec)))) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() @property def num(self): return mpmath.mpf(self._mpf_) def _as_mpf_val(self, prec): rv = mpf_norm(self._mpf_, prec) if rv != self._mpf_ and self._prec == prec: debug(self._mpf_, rv) return rv def _as_mpf_op(self, prec): return self._mpf_, max(prec, self._prec) def _eval_is_finite(self): if self._mpf_ in (_mpf_inf, _mpf_ninf): return False return True def _eval_is_infinite(self): if self._mpf_ in (_mpf_inf, _mpf_ninf): return True return False def _eval_is_integer(self): return self._mpf_ == fzero def _eval_is_negative(self): if self._mpf_ == _mpf_ninf or self._mpf_ == _mpf_inf: return False return self.num < 0 def _eval_is_positive(self): if self._mpf_ == _mpf_ninf or self._mpf_ == _mpf_inf: return False return self.num > 0 def _eval_is_extended_negative(self): if self._mpf_ == _mpf_ninf: return True if self._mpf_ == _mpf_inf: return False return self.num < 0 def _eval_is_extended_positive(self): if self._mpf_ == _mpf_inf: return True if self._mpf_ == _mpf_ninf: return False return self.num > 0 def _eval_is_zero(self): return self._mpf_ == fzero def __bool__(self): return self._mpf_ != fzero def __neg__(self): return Float._new(mlib.mpf_neg(self._mpf_), self._prec) @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec) return Number.__add__(self, other) @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec) return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec) return Number.__mul__(self, other) @_sympifyit('other', NotImplemented) def __truediv__(self, other): if isinstance(other, Number) and other != 0 and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec) return Number.__truediv__(self, other) @_sympifyit('other', NotImplemented) def __mod__(self, other): if isinstance(other, Rational) and other.q != 1 and global_parameters.evaluate: # calculate mod with Rationals, *then* round the result return Float(Rational.__mod__(Rational(self), other), precision=self._prec) if isinstance(other, Float) and global_parameters.evaluate: r = self/other if r == int(r): return Float(0, precision=max(self._prec, other._prec)) if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec) return Number.__mod__(self, other) @_sympifyit('other', NotImplemented) def __rmod__(self, other): if isinstance(other, Float) and global_parameters.evaluate: return other.__mod__(self) if isinstance(other, Number) and global_parameters.evaluate: rhs, prec = other._as_mpf_op(self._prec) return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec) return Number.__rmod__(self, other) def _eval_power(self, expt): """ expt is symbolic object but not equal to 0, 1 (-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) -> -> p**r*(sin(Pi*r) + cos(Pi*r)*I) """ if self == 0: if expt.is_positive: return S.Zero if expt.is_negative: return S.Infinity if isinstance(expt, Number): if isinstance(expt, Integer): prec = self._prec return Float._new( mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec) elif isinstance(expt, Rational) and \ expt.p == 1 and expt.q % 2 and self.is_negative: return Pow(S.NegativeOne, expt, evaluate=False)*( -self)._eval_power(expt) expt, prec = expt._as_mpf_op(self._prec) mpfself = self._mpf_ try: y = mpf_pow(mpfself, expt, prec, rnd) return Float._new(y, prec) except mlib.ComplexResult: re, im = mlib.mpc_pow( (mpfself, fzero), (expt, fzero), prec, rnd) return Float._new(re, prec) + \ Float._new(im, prec)*S.ImaginaryUnit def __abs__(self): return Float._new(mlib.mpf_abs(self._mpf_), self._prec) def __int__(self): if self._mpf_ == fzero: return 0 return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down def __eq__(self, other): from sympy.logic.boolalg import Boolean try: other = _sympify(other) except SympifyError: return NotImplemented if not self: return not other if isinstance(other, Boolean): return False if other.is_NumberSymbol: if other.is_irrational: return False return other.__eq__(self) if other.is_Float: # comparison is exact # so Float(.1, 3) != Float(.1, 33) return self._mpf_ == other._mpf_ if other.is_Rational: return other.__eq__(self) if other.is_Number: # numbers should compare at the same precision; # all _as_mpf_val routines should be sure to abide # by the request to change the prec if necessary; if # they don't, the equality test will fail since it compares # the mpf tuples ompf = other._as_mpf_val(self._prec) return bool(mlib.mpf_eq(self._mpf_, ompf)) return False # Float != non-Number def __ne__(self, other): return not self == other def _Frel(self, other, op): from sympy.core.numbers import prec_to_dps try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Rational: # test self*other.q <?> other.p without losing precision ''' >>> f = Float(.1,2) >>> i = 1234567890 >>> (f*i)._mpf_ (0, 471, 18, 9) >>> mlib.mpf_mul(f._mpf_, mlib.from_int(i)) (0, 505555550955, -12, 39) ''' smpf = mlib.mpf_mul(self._mpf_, mlib.from_int(other.q)) ompf = mlib.from_int(other.p) return _sympify(bool(op(smpf, ompf))) elif other.is_Float: return _sympify(bool( op(self._mpf_, other._mpf_))) elif other.is_comparable and other not in ( S.Infinity, S.NegativeInfinity): other = other.evalf(prec_to_dps(self._prec)) if other._prec > 1: if other.is_Number: return _sympify(bool( op(self._mpf_, other._as_mpf_val(self._prec)))) def __gt__(self, other): if isinstance(other, NumberSymbol): return other.__lt__(self) rv = self._Frel(other, mlib.mpf_gt) if rv is None: return Expr.__gt__(self, other) return rv def __ge__(self, other): if isinstance(other, NumberSymbol): return other.__le__(self) rv = self._Frel(other, mlib.mpf_ge) if rv is None: return Expr.__ge__(self, other) return rv def __lt__(self, other): if isinstance(other, NumberSymbol): return other.__gt__(self) rv = self._Frel(other, mlib.mpf_lt) if rv is None: return Expr.__lt__(self, other) return rv def __le__(self, other): if isinstance(other, NumberSymbol): return other.__ge__(self) rv = self._Frel(other, mlib.mpf_le) if rv is None: return Expr.__le__(self, other) return rv def __hash__(self): return super().__hash__() def epsilon_eq(self, other, epsilon="1e-15"): return abs(self - other) < Float(epsilon) def _sage_(self): import sage.all as sage return sage.RealNumber(str(self)) def __format__(self, format_spec): return format(decimal.Decimal(str(self)), format_spec) # Add sympify converters converter[float] = converter[decimal.Decimal] = Float # this is here to work nicely in Sage RealNumber = Float class Rational(Number): """Represents rational numbers (p/q) of any size. Examples ======== >>> from sympy import Rational, nsimplify, S, pi >>> Rational(1, 2) 1/2 Rational is unprejudiced in accepting input. If a float is passed, the underlying value of the binary representation will be returned: >>> Rational(.5) 1/2 >>> Rational(.2) 3602879701896397/18014398509481984 If the simpler representation of the float is desired then consider limiting the denominator to the desired value or convert the float to a string (which is roughly equivalent to limiting the denominator to 10**12): >>> Rational(str(.2)) 1/5 >>> Rational(.2).limit_denominator(10**12) 1/5 An arbitrarily precise Rational is obtained when a string literal is passed: >>> Rational("1.23") 123/100 >>> Rational('1e-2') 1/100 >>> Rational(".1") 1/10 >>> Rational('1e-2/3.2') 1/320 The conversion of other types of strings can be handled by the sympify() function, and conversion of floats to expressions or simple fractions can be handled with nsimplify: >>> S('.[3]') # repeating digits in brackets 1/3 >>> S('3**2/10') # general expressions 9/10 >>> nsimplify(.3) # numbers that have a simple form 3/10 But if the input does not reduce to a literal Rational, an error will be raised: >>> Rational(pi) Traceback (most recent call last): ... TypeError: invalid input: pi Low-level --------- Access numerator and denominator as .p and .q: >>> r = Rational(3, 4) >>> r 3/4 >>> r.p 3 >>> r.q 4 Note that p and q return integers (not SymPy Integers) so some care is needed when using them in expressions: >>> r.p/r.q 0.75 See Also ======== sympy.core.sympify.sympify, sympy.simplify.simplify.nsimplify """ is_real = True is_integer = False is_rational = True is_number = True __slots__ = ('p', 'q') is_Rational = True @cacheit def __new__(cls, p, q=None, gcd=None): if q is None: if isinstance(p, Rational): return p if isinstance(p, SYMPY_INTS): pass else: if isinstance(p, (float, Float)): return Rational(*_as_integer_ratio(p)) if not isinstance(p, str): try: p = sympify(p) except (SympifyError, SyntaxError): pass # error will raise below else: if p.count('/') > 1: raise TypeError('invalid input: %s' % p) p = p.replace(' ', '') pq = p.rsplit('/', 1) if len(pq) == 2: p, q = pq fp = fractions.Fraction(p) fq = fractions.Fraction(q) p = fp/fq try: p = fractions.Fraction(p) except ValueError: pass # error will raise below else: return Rational(p.numerator, p.denominator, 1) if not isinstance(p, Rational): raise TypeError('invalid input: %s' % p) q = 1 gcd = 1 else: p = Rational(p) q = Rational(q) if isinstance(q, Rational): p *= q.q q = q.p if isinstance(p, Rational): q *= p.q p = p.p # p and q are now integers if q == 0: if p == 0: if _errdict["divide"]: raise ValueError("Indeterminate 0/0") else: return S.NaN return S.ComplexInfinity if q < 0: q = -q p = -p if not gcd: gcd = igcd(abs(p), q) if gcd > 1: p //= gcd q //= gcd if q == 1: return Integer(p) if p == 1 and q == 2: return S.Half obj = Expr.__new__(cls) obj.p = p obj.q = q return obj def limit_denominator(self, max_denominator=1000000): """Closest Rational to self with denominator at most max_denominator. Examples ======== >>> from sympy import Rational >>> Rational('3.141592653589793').limit_denominator(10) 22/7 >>> Rational('3.141592653589793').limit_denominator(100) 311/99 """ f = fractions.Fraction(self.p, self.q) return Rational(f.limit_denominator(fractions.Fraction(int(max_denominator)))) def __getnewargs__(self): return (self.p, self.q) def _hashable_content(self): return (self.p, self.q) def _eval_is_positive(self): return self.p > 0 def _eval_is_zero(self): return self.p == 0 def __neg__(self): return Rational(-self.p, self.q) @_sympifyit('other', NotImplemented) def __add__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.p + self.q*other.p, self.q, 1) elif isinstance(other, Rational): #TODO: this can probably be optimized more return Rational(self.p*other.q + self.q*other.p, self.q*other.q) elif isinstance(other, Float): return other + self else: return Number.__add__(self, other) return Number.__add__(self, other) __radd__ = __add__ @_sympifyit('other', NotImplemented) def __sub__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.p - self.q*other.p, self.q, 1) elif isinstance(other, Rational): return Rational(self.p*other.q - self.q*other.p, self.q*other.q) elif isinstance(other, Float): return -other + self else: return Number.__sub__(self, other) return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __rsub__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.q*other.p - self.p, self.q, 1) elif isinstance(other, Rational): return Rational(self.q*other.p - self.p*other.q, self.q*other.q) elif isinstance(other, Float): return -self + other else: return Number.__rsub__(self, other) return Number.__rsub__(self, other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(self.p*other.p, self.q, igcd(other.p, self.q)) elif isinstance(other, Rational): return Rational(self.p*other.p, self.q*other.q, igcd(self.p, other.q)*igcd(self.q, other.p)) elif isinstance(other, Float): return other*self else: return Number.__mul__(self, other) return Number.__mul__(self, other) __rmul__ = __mul__ @_sympifyit('other', NotImplemented) def __truediv__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): if self.p and other.p == S.Zero: return S.ComplexInfinity else: return Rational(self.p, self.q*other.p, igcd(self.p, other.p)) elif isinstance(other, Rational): return Rational(self.p*other.q, self.q*other.p, igcd(self.p, other.p)*igcd(self.q, other.q)) elif isinstance(other, Float): return self*(1/other) else: return Number.__truediv__(self, other) return Number.__truediv__(self, other) @_sympifyit('other', NotImplemented) def __rtruediv__(self, other): if global_parameters.evaluate: if isinstance(other, Integer): return Rational(other.p*self.q, self.p, igcd(self.p, other.p)) elif isinstance(other, Rational): return Rational(other.p*self.q, other.q*self.p, igcd(self.p, other.p)*igcd(self.q, other.q)) elif isinstance(other, Float): return other*(1/self) else: return Number.__rtruediv__(self, other) return Number.__rtruediv__(self, other) @_sympifyit('other', NotImplemented) def __mod__(self, other): if global_parameters.evaluate: if isinstance(other, Rational): n = (self.p*other.q) // (other.p*self.q) return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q) if isinstance(other, Float): # calculate mod with Rationals, *then* round the answer return Float(self.__mod__(Rational(other)), precision=other._prec) return Number.__mod__(self, other) return Number.__mod__(self, other) @_sympifyit('other', NotImplemented) def __rmod__(self, other): if isinstance(other, Rational): return Rational.__mod__(other, self) return Number.__rmod__(self, other) def _eval_power(self, expt): if isinstance(expt, Number): if isinstance(expt, Float): return self._eval_evalf(expt._prec)**expt if expt.is_extended_negative: # (3/4)**-2 -> (4/3)**2 ne = -expt if (ne is S.One): return Rational(self.q, self.p) if self.is_negative: return S.NegativeOne**expt*Rational(self.q, -self.p)**ne else: return Rational(self.q, self.p)**ne if expt is S.Infinity: # -oo already caught by test for negative if self.p > self.q: # (3/2)**oo -> oo return S.Infinity if self.p < -self.q: # (-3/2)**oo -> oo + I*oo return S.Infinity + S.Infinity*S.ImaginaryUnit return S.Zero if isinstance(expt, Integer): # (4/3)**2 -> 4**2 / 3**2 return Rational(self.p**expt.p, self.q**expt.p, 1) if isinstance(expt, Rational): if self.p != 1: # (4/3)**(5/6) -> 4**(5/6)*3**(-5/6) return Integer(self.p)**expt*Integer(self.q)**(-expt) # as the above caught negative self.p, now self is positive return Integer(self.q)**Rational( expt.p*(expt.q - 1), expt.q) / \ Integer(self.q)**Integer(expt.p) if self.is_extended_negative and expt.is_even: return (-self)**expt return def _as_mpf_val(self, prec): return mlib.from_rational(self.p, self.q, prec, rnd) def _mpmath_(self, prec, rnd): return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd)) def __abs__(self): return Rational(abs(self.p), self.q) def __int__(self): p, q = self.p, self.q if p < 0: return -int(-p//q) return int(p//q) def floor(self): return Integer(self.p // self.q) def ceiling(self): return -Integer(-self.p // self.q) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() def __eq__(self, other): from sympy.core.power import integer_log try: other = _sympify(other) except SympifyError: return NotImplemented if not isinstance(other, Number): # S(0) == S.false is False # S(0) == False is True return False if not self: return not other if other.is_NumberSymbol: if other.is_irrational: return False return other.__eq__(self) if other.is_Rational: # a Rational is always in reduced form so will never be 2/4 # so we can just check equivalence of args return self.p == other.p and self.q == other.q if other.is_Float: # all Floats have a denominator that is a power of 2 # so if self doesn't, it can't be equal to other if self.q & (self.q - 1): return False s, m, t = other._mpf_[:3] if s: m = -m if not t: # other is an odd integer if not self.is_Integer or self.is_even: return False return m == self.p if t > 0: # other is an even integer if not self.is_Integer: return False # does m*2**t == self.p return self.p and not self.p % m and \ integer_log(self.p//m, 2) == (t, True) # does non-integer s*m/2**-t = p/q? if self.is_Integer: return False return m == self.p and integer_log(self.q, 2) == (-t, True) return False def __ne__(self, other): return not self == other def _Rrel(self, other, attr): # if you want self < other, pass self, other, __gt__ try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Number: op = None s, o = self, other if other.is_NumberSymbol: op = getattr(o, attr) elif other.is_Float: op = getattr(o, attr) elif other.is_Rational: s, o = Integer(s.p*o.q), Integer(s.q*o.p) op = getattr(o, attr) if op: return op(s) if o.is_number and o.is_extended_real: return Integer(s.p), s.q*o def __gt__(self, other): rv = self._Rrel(other, '__lt__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__gt__(*rv) def __ge__(self, other): rv = self._Rrel(other, '__le__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__ge__(*rv) def __lt__(self, other): rv = self._Rrel(other, '__gt__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__lt__(*rv) def __le__(self, other): rv = self._Rrel(other, '__ge__') if rv is None: rv = self, other elif not type(rv) is tuple: return rv return Expr.__le__(*rv) def __hash__(self): return super().__hash__() def factors(self, limit=None, use_trial=True, use_rho=False, use_pm1=False, verbose=False, visual=False): """A wrapper to factorint which return factors of self that are smaller than limit (or cheap to compute). Special methods of factoring are disabled by default so that only trial division is used. """ from sympy.ntheory import factorrat return factorrat(self, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose).copy() def numerator(self): return self.p def denominator(self): return self.q @_sympifyit('other', NotImplemented) def gcd(self, other): if isinstance(other, Rational): if other == S.Zero: return other return Rational( Integer(igcd(self.p, other.p)), Integer(ilcm(self.q, other.q))) return Number.gcd(self, other) @_sympifyit('other', NotImplemented) def lcm(self, other): if isinstance(other, Rational): return Rational( self.p // igcd(self.p, other.p) * other.p, igcd(self.q, other.q)) return Number.lcm(self, other) def as_numer_denom(self): return Integer(self.p), Integer(self.q) def _sage_(self): import sage.all as sage return sage.Integer(self.p)/sage.Integer(self.q) def as_content_primitive(self, radical=False, clear=True): """Return the tuple (R, self/R) where R is the positive Rational extracted from self. Examples ======== >>> from sympy import S >>> (S(-3)/2).as_content_primitive() (3/2, -1) See docstring of Expr.as_content_primitive for more examples. """ if self: if self.is_positive: return self, S.One return -self, S.NegativeOne return S.One, self def as_coeff_Mul(self, rational=False): """Efficiently extract the coefficient of a product. """ return self, S.One def as_coeff_Add(self, rational=False): """Efficiently extract the coefficient of a summation. """ return self, S.Zero class Integer(Rational): """Represents integer numbers of any size. Examples ======== >>> from sympy import Integer >>> Integer(3) 3 If a float or a rational is passed to Integer, the fractional part will be discarded; the effect is of rounding toward zero. >>> Integer(3.8) 3 >>> Integer(-3.8) -3 A string is acceptable input if it can be parsed as an integer: >>> Integer("9" * 20) 99999999999999999999 It is rarely needed to explicitly instantiate an Integer, because Python integers are automatically converted to Integer when they are used in SymPy expressions. """ q = 1 is_integer = True is_number = True is_Integer = True __slots__ = ('p',) def _as_mpf_val(self, prec): return mlib.from_int(self.p, prec, rnd) def _mpmath_(self, prec, rnd): return mpmath.make_mpf(self._as_mpf_val(prec)) @cacheit def __new__(cls, i): if isinstance(i, str): i = i.replace(' ', '') # whereas we cannot, in general, make a Rational from an # arbitrary expression, we can make an Integer unambiguously # (except when a non-integer expression happens to round to # an integer). So we proceed by taking int() of the input and # let the int routines determine whether the expression can # be made into an int or whether an error should be raised. try: ival = int(i) except TypeError: raise TypeError( "Argument of Integer should be of numeric type, got %s." % i) # We only work with well-behaved integer types. This converts, for # example, numpy.int32 instances. if ival == 1: return S.One if ival == -1: return S.NegativeOne if ival == 0: return S.Zero obj = Expr.__new__(cls) obj.p = ival return obj def __getnewargs__(self): return (self.p,) # Arithmetic operations are here for efficiency def __int__(self): return self.p def floor(self): return Integer(self.p) def ceiling(self): return Integer(self.p) def __floor__(self): return self.floor() def __ceil__(self): return self.ceiling() def __neg__(self): return Integer(-self.p) def __abs__(self): if self.p >= 0: return self else: return Integer(-self.p) def __divmod__(self, other): from .containers import Tuple if isinstance(other, Integer) and global_parameters.evaluate: return Tuple(*(divmod(self.p, other.p))) else: return Number.__divmod__(self, other) def __rdivmod__(self, other): from .containers import Tuple if isinstance(other, int) and global_parameters.evaluate: return Tuple(*(divmod(other, self.p))) else: try: other = Number(other) except TypeError: msg = "unsupported operand type(s) for divmod(): '%s' and '%s'" oname = type(other).__name__ sname = type(self).__name__ raise TypeError(msg % (oname, sname)) return Number.__divmod__(other, self) # TODO make it decorator + bytecodehacks? def __add__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(self.p + other) elif isinstance(other, Integer): return Integer(self.p + other.p) elif isinstance(other, Rational): return Rational(self.p*other.q + other.p, other.q, 1) return Rational.__add__(self, other) else: return Add(self, other) def __radd__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(other + self.p) elif isinstance(other, Rational): return Rational(other.p + self.p*other.q, other.q, 1) return Rational.__radd__(self, other) return Rational.__radd__(self, other) def __sub__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(self.p - other) elif isinstance(other, Integer): return Integer(self.p - other.p) elif isinstance(other, Rational): return Rational(self.p*other.q - other.p, other.q, 1) return Rational.__sub__(self, other) return Rational.__sub__(self, other) def __rsub__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(other - self.p) elif isinstance(other, Rational): return Rational(other.p - self.p*other.q, other.q, 1) return Rational.__rsub__(self, other) return Rational.__rsub__(self, other) def __mul__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(self.p*other) elif isinstance(other, Integer): return Integer(self.p*other.p) elif isinstance(other, Rational): return Rational(self.p*other.p, other.q, igcd(self.p, other.q)) return Rational.__mul__(self, other) return Rational.__mul__(self, other) def __rmul__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(other*self.p) elif isinstance(other, Rational): return Rational(other.p*self.p, other.q, igcd(self.p, other.q)) return Rational.__rmul__(self, other) return Rational.__rmul__(self, other) def __mod__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(self.p % other) elif isinstance(other, Integer): return Integer(self.p % other.p) return Rational.__mod__(self, other) return Rational.__mod__(self, other) def __rmod__(self, other): if global_parameters.evaluate: if isinstance(other, int): return Integer(other % self.p) elif isinstance(other, Integer): return Integer(other.p % self.p) return Rational.__rmod__(self, other) return Rational.__rmod__(self, other) def __eq__(self, other): if isinstance(other, int): return (self.p == other) elif isinstance(other, Integer): return (self.p == other.p) return Rational.__eq__(self, other) def __ne__(self, other): return not self == other def __gt__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p > other.p) return Rational.__gt__(self, other) def __lt__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p < other.p) return Rational.__lt__(self, other) def __ge__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p >= other.p) return Rational.__ge__(self, other) def __le__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if other.is_Integer: return _sympify(self.p <= other.p) return Rational.__le__(self, other) def __hash__(self): return hash(self.p) def __index__(self): return self.p ######################################## def _eval_is_odd(self): return bool(self.p % 2) def _eval_power(self, expt): """ Tries to do some simplifications on self**expt Returns None if no further simplifications can be done. Explanation =========== When exponent is a fraction (so we have for example a square root), we try to find a simpler representation by factoring the argument up to factors of 2**15, e.g. - sqrt(4) becomes 2 - sqrt(-4) becomes 2*I - (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7) Further simplification would require a special call to factorint on the argument which is not done here for sake of speed. """ from sympy.ntheory.factor_ import perfect_power if expt is S.Infinity: if self.p > S.One: return S.Infinity # cases -1, 0, 1 are done in their respective classes return S.Infinity + S.ImaginaryUnit*S.Infinity if expt is S.NegativeInfinity: return Rational(1, self)**S.Infinity if not isinstance(expt, Number): # simplify when expt is even # (-2)**k --> 2**k if self.is_negative and expt.is_even: return (-self)**expt if isinstance(expt, Float): # Rational knows how to exponentiate by a Float return super()._eval_power(expt) if not isinstance(expt, Rational): return if expt is S.Half and self.is_negative: # we extract I for this special case since everyone is doing so return S.ImaginaryUnit*Pow(-self, expt) if expt.is_negative: # invert base and change sign on exponent ne = -expt if self.is_negative: return S.NegativeOne**expt*Rational(1, -self)**ne else: return Rational(1, self.p)**ne # see if base is a perfect root, sqrt(4) --> 2 x, xexact = integer_nthroot(abs(self.p), expt.q) if xexact: # if it's a perfect root we've finished result = Integer(x**abs(expt.p)) if self.is_negative: result *= S.NegativeOne**expt return result # The following is an algorithm where we collect perfect roots # from the factors of base. # if it's not an nth root, it still might be a perfect power b_pos = int(abs(self.p)) p = perfect_power(b_pos) if p is not False: dict = {p[0]: p[1]} else: dict = Integer(b_pos).factors(limit=2**15) # now process the dict of factors out_int = 1 # integer part out_rad = 1 # extracted radicals sqr_int = 1 sqr_gcd = 0 sqr_dict = {} for prime, exponent in dict.items(): exponent *= expt.p # remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10) div_e, div_m = divmod(exponent, expt.q) if div_e > 0: out_int *= prime**div_e if div_m > 0: # see if the reduced exponent shares a gcd with e.q # (2**2)**(1/10) -> 2**(1/5) g = igcd(div_m, expt.q) if g != 1: out_rad *= Pow(prime, Rational(div_m//g, expt.q//g)) else: sqr_dict[prime] = div_m # identify gcd of remaining powers for p, ex in sqr_dict.items(): if sqr_gcd == 0: sqr_gcd = ex else: sqr_gcd = igcd(sqr_gcd, ex) if sqr_gcd == 1: break for k, v in sqr_dict.items(): sqr_int *= k**(v//sqr_gcd) if sqr_int == b_pos and out_int == 1 and out_rad == 1: result = None else: result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q)) if self.is_negative: result *= Pow(S.NegativeOne, expt) return result def _eval_is_prime(self): from sympy.ntheory import isprime return isprime(self) def _eval_is_composite(self): if self > 1: return fuzzy_not(self.is_prime) else: return False def as_numer_denom(self): return self, S.One @_sympifyit('other', NotImplemented) def __floordiv__(self, other): if not isinstance(other, Expr): return NotImplemented if isinstance(other, Integer): return Integer(self.p // other) return Integer(divmod(self, other)[0]) def __rfloordiv__(self, other): return Integer(Integer(other).p // self.p) # Add sympify converters converter[int] = Integer class AlgebraicNumber(Expr): """Class for representing algebraic numbers in SymPy. """ __slots__ = ('rep', 'root', 'alias', 'minpoly') is_AlgebraicNumber = True is_algebraic = True is_number = True # Optional alias symbol is not free. # Actually, alias should be a Str, but some methods # expect that it be an instance of Expr. free_symbols = set() def __new__(cls, expr, coeffs=None, alias=None, **args): """Construct a new algebraic number. """ from sympy import Poly from sympy.polys.polyclasses import ANP, DMP from sympy.polys.numberfields import minimal_polynomial from sympy.core.symbol import Symbol expr = sympify(expr) if isinstance(expr, (tuple, Tuple)): minpoly, root = expr if not minpoly.is_Poly: minpoly = Poly(minpoly) elif expr.is_AlgebraicNumber: minpoly, root = expr.minpoly, expr.root else: minpoly, root = minimal_polynomial( expr, args.get('gen'), polys=True), expr dom = minpoly.get_domain() if coeffs is not None: if not isinstance(coeffs, ANP): rep = DMP.from_sympy_list(sympify(coeffs), 0, dom) scoeffs = Tuple(*coeffs) else: rep = DMP.from_list(coeffs.to_list(), 0, dom) scoeffs = Tuple(*coeffs.to_list()) if rep.degree() >= minpoly.degree(): rep = rep.rem(minpoly.rep) else: rep = DMP.from_list([1, 0], 0, dom) scoeffs = Tuple(1, 0) sargs = (root, scoeffs) if alias is not None: if not isinstance(alias, Symbol): alias = Symbol(alias) sargs = sargs + (alias,) obj = Expr.__new__(cls, *sargs) obj.rep = rep obj.root = root obj.alias = alias obj.minpoly = minpoly return obj def __hash__(self): return super().__hash__() def _eval_evalf(self, prec): return self.as_expr()._evalf(prec) @property def is_aliased(self): """Returns ``True`` if ``alias`` was set. """ return self.alias is not None def as_poly(self, x=None): """Create a Poly instance from ``self``. """ from sympy import Dummy, Poly, PurePoly if x is not None: return Poly.new(self.rep, x) else: if self.alias is not None: return Poly.new(self.rep, self.alias) else: return PurePoly.new(self.rep, Dummy('x')) def as_expr(self, x=None): """Create a Basic expression from ``self``. """ return self.as_poly(x or self.root).as_expr().expand() def coeffs(self): """Returns all SymPy coefficients of an algebraic number. """ return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ] def native_coeffs(self): """Returns all native coefficients of an algebraic number. """ return self.rep.all_coeffs() def to_algebraic_integer(self): """Convert ``self`` to an algebraic integer. """ from sympy import Poly f = self.minpoly if f.LC() == 1: return self coeff = f.LC()**(f.degree() - 1) poly = f.compose(Poly(f.gen/f.LC())) minpoly = poly*coeff root = f.LC()*self.root return AlgebraicNumber((minpoly, root), self.coeffs()) def _eval_simplify(self, **kwargs): from sympy.polys import CRootOf, minpoly measure, ratio = kwargs['measure'], kwargs['ratio'] for r in [r for r in self.minpoly.all_roots() if r.func != CRootOf]: if minpoly(self.root - r).is_Symbol: # use the matching root if it's simpler if measure(r) < ratio*measure(self.root): return AlgebraicNumber(r) return self class RationalConstant(Rational): """ Abstract base class for rationals with specific behaviors Derived classes must define class attributes p and q and should probably all be singletons. """ __slots__ = () def __new__(cls): return AtomicExpr.__new__(cls) class IntegerConstant(Integer): __slots__ = () def __new__(cls): return AtomicExpr.__new__(cls) class Zero(IntegerConstant, metaclass=Singleton): """The number zero. Zero is a singleton, and can be accessed by ``S.Zero`` Examples ======== >>> from sympy import S, Integer >>> Integer(0) is S.Zero True >>> 1/S.Zero zoo References ========== .. [1] https://en.wikipedia.org/wiki/Zero """ p = 0 q = 1 is_positive = False is_negative = False is_zero = True is_number = True is_comparable = True __slots__ = () def __getnewargs__(self): return () @staticmethod def __abs__(): return S.Zero @staticmethod def __neg__(): return S.Zero def _eval_power(self, expt): if expt.is_positive: return self if expt.is_negative: return S.ComplexInfinity if expt.is_extended_real is False: return S.NaN # infinities are already handled with pos and neg # tests above; now throw away leading numbers on Mul # exponent coeff, terms = expt.as_coeff_Mul() if coeff.is_negative: return S.ComplexInfinity**terms if coeff is not S.One: # there is a Number to discard return self**terms def _eval_order(self, *symbols): # Order(0,x) -> 0 return self def __bool__(self): return False def as_coeff_Mul(self, rational=False): # XXX this routine should be deleted """Efficiently extract the coefficient of a summation. """ return S.One, self class One(IntegerConstant, metaclass=Singleton): """The number one. One is a singleton, and can be accessed by ``S.One``. Examples ======== >>> from sympy import S, Integer >>> Integer(1) is S.One True References ========== .. [1] https://en.wikipedia.org/wiki/1_%28number%29 """ is_number = True p = 1 q = 1 __slots__ = () def __getnewargs__(self): return () @staticmethod def __abs__(): return S.One @staticmethod def __neg__(): return S.NegativeOne def _eval_power(self, expt): return self def _eval_order(self, *symbols): return @staticmethod def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False, verbose=False, visual=False): if visual: return S.One else: return {} class NegativeOne(IntegerConstant, metaclass=Singleton): """The number negative one. NegativeOne is a singleton, and can be accessed by ``S.NegativeOne``. Examples ======== >>> from sympy import S, Integer >>> Integer(-1) is S.NegativeOne True See Also ======== One References ========== .. [1] https://en.wikipedia.org/wiki/%E2%88%921_%28number%29 """ is_number = True p = -1 q = 1 __slots__ = () def __getnewargs__(self): return () @staticmethod def __abs__(): return S.One @staticmethod def __neg__(): return S.One def _eval_power(self, expt): if expt.is_odd: return S.NegativeOne if expt.is_even: return S.One if isinstance(expt, Number): if isinstance(expt, Float): return Float(-1.0)**expt if expt is S.NaN: return S.NaN if expt is S.Infinity or expt is S.NegativeInfinity: return S.NaN if expt is S.Half: return S.ImaginaryUnit if isinstance(expt, Rational): if expt.q == 2: return S.ImaginaryUnit**Integer(expt.p) i, r = divmod(expt.p, expt.q) if i: return self**i*self**Rational(r, expt.q) return class Half(RationalConstant, metaclass=Singleton): """The rational number 1/2. Half is a singleton, and can be accessed by ``S.Half``. Examples ======== >>> from sympy import S, Rational >>> Rational(1, 2) is S.Half True References ========== .. [1] https://en.wikipedia.org/wiki/One_half """ is_number = True p = 1 q = 2 __slots__ = () def __getnewargs__(self): return () @staticmethod def __abs__(): return S.Half class Infinity(Number, metaclass=Singleton): r"""Positive infinite quantity. Explanation =========== In real analysis the symbol `\infty` denotes an unbounded limit: `x\to\infty` means that `x` grows without bound. Infinity is often used not only to define a limit but as a value in the affinely extended real number system. Points labeled `+\infty` and `-\infty` can be added to the topological space of the real numbers, producing the two-point compactification of the real numbers. Adding algebraic properties to this gives us the extended real numbers. Infinity is a singleton, and can be accessed by ``S.Infinity``, or can be imported as ``oo``. Examples ======== >>> from sympy import oo, exp, limit, Symbol >>> 1 + oo oo >>> 42/oo 0 >>> x = Symbol('x') >>> limit(exp(x), x, oo) oo See Also ======== NegativeInfinity, NaN References ========== .. [1] https://en.wikipedia.org/wiki/Infinity """ is_commutative = True is_number = True is_complex = False is_extended_real = True is_infinite = True is_comparable = True is_extended_positive = True is_prime = False __slots__ = () def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"\infty" def _eval_subs(self, old, new): if self == old: return new def _eval_evalf(self, prec=None): return Float('inf') def evalf(self, prec=None, **options): return self._eval_evalf(prec) @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NegativeInfinity or other is S.NaN: return S.NaN return self return Number.__add__(self, other) __radd__ = __add__ @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or other is S.NaN: return S.NaN return self return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __rsub__(self, other): return (-self).__add__(other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other.is_zero or other is S.NaN: return S.NaN if other.is_extended_positive: return self return S.NegativeInfinity return Number.__mul__(self, other) __rmul__ = __mul__ @_sympifyit('other', NotImplemented) def __truediv__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or \ other is S.NegativeInfinity or \ other is S.NaN: return S.NaN if other.is_extended_nonnegative: return self return S.NegativeInfinity return Number.__truediv__(self, other) def __abs__(self): return S.Infinity def __neg__(self): return S.NegativeInfinity def _eval_power(self, expt): """ ``expt`` is symbolic object but not equal to 0 or 1. ================ ======= ============================== Expression Result Notes ================ ======= ============================== ``oo ** nan`` ``nan`` ``oo ** -p`` ``0`` ``p`` is number, ``oo`` ================ ======= ============================== See Also ======== Pow NaN NegativeInfinity """ from sympy.functions import re if expt.is_extended_positive: return S.Infinity if expt.is_extended_negative: return S.Zero if expt is S.NaN: return S.NaN if expt is S.ComplexInfinity: return S.NaN if expt.is_extended_real is False and expt.is_number: expt_real = re(expt) if expt_real.is_positive: return S.ComplexInfinity if expt_real.is_negative: return S.Zero if expt_real.is_zero: return S.NaN return self**expt.evalf() def _as_mpf_val(self, prec): return mlib.finf def _sage_(self): import sage.all as sage return sage.oo def __hash__(self): return super().__hash__() def __eq__(self, other): return other is S.Infinity or other == float('inf') def __ne__(self, other): return other is not S.Infinity and other != float('inf') __gt__ = Expr.__gt__ __ge__ = Expr.__ge__ __lt__ = Expr.__lt__ __le__ = Expr.__le__ @_sympifyit('other', NotImplemented) def __mod__(self, other): if not isinstance(other, Expr): return NotImplemented return S.NaN __rmod__ = __mod__ def floor(self): return self def ceiling(self): return self oo = S.Infinity class NegativeInfinity(Number, metaclass=Singleton): """Negative infinite quantity. NegativeInfinity is a singleton, and can be accessed by ``S.NegativeInfinity``. See Also ======== Infinity """ is_extended_real = True is_complex = False is_commutative = True is_infinite = True is_comparable = True is_extended_negative = True is_number = True is_prime = False __slots__ = () def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"-\infty" def _eval_subs(self, old, new): if self == old: return new def _eval_evalf(self, prec=None): return Float('-inf') def evalf(self, prec=None, **options): return self._eval_evalf(prec) @_sympifyit('other', NotImplemented) def __add__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or other is S.NaN: return S.NaN return self return Number.__add__(self, other) __radd__ = __add__ @_sympifyit('other', NotImplemented) def __sub__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.NegativeInfinity or other is S.NaN: return S.NaN return self return Number.__sub__(self, other) @_sympifyit('other', NotImplemented) def __rsub__(self, other): return (-self).__add__(other) @_sympifyit('other', NotImplemented) def __mul__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other.is_zero or other is S.NaN: return S.NaN if other.is_extended_positive: return self return S.Infinity return Number.__mul__(self, other) __rmul__ = __mul__ @_sympifyit('other', NotImplemented) def __truediv__(self, other): if isinstance(other, Number) and global_parameters.evaluate: if other is S.Infinity or \ other is S.NegativeInfinity or \ other is S.NaN: return S.NaN if other.is_extended_nonnegative: return self return S.Infinity return Number.__truediv__(self, other) def __abs__(self): return S.Infinity def __neg__(self): return S.Infinity def _eval_power(self, expt): """ ``expt`` is symbolic object but not equal to 0 or 1. ================ ======= ============================== Expression Result Notes ================ ======= ============================== ``(-oo) ** nan`` ``nan`` ``(-oo) ** oo`` ``nan`` ``(-oo) ** -oo`` ``nan`` ``(-oo) ** e`` ``oo`` ``e`` is positive even integer ``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer ================ ======= ============================== See Also ======== Infinity Pow NaN """ if expt.is_number: if expt is S.NaN or \ expt is S.Infinity or \ expt is S.NegativeInfinity: return S.NaN if isinstance(expt, Integer) and expt.is_extended_positive: if expt.is_odd: return S.NegativeInfinity else: return S.Infinity return S.NegativeOne**expt*S.Infinity**expt def _as_mpf_val(self, prec): return mlib.fninf def _sage_(self): import sage.all as sage return -(sage.oo) def __hash__(self): return super().__hash__() def __eq__(self, other): return other is S.NegativeInfinity or other == float('-inf') def __ne__(self, other): return other is not S.NegativeInfinity and other != float('-inf') __gt__ = Expr.__gt__ __ge__ = Expr.__ge__ __lt__ = Expr.__lt__ __le__ = Expr.__le__ @_sympifyit('other', NotImplemented) def __mod__(self, other): if not isinstance(other, Expr): return NotImplemented return S.NaN __rmod__ = __mod__ def floor(self): return self def ceiling(self): return self def as_powers_dict(self): return {S.NegativeOne: 1, S.Infinity: 1} class NaN(Number, metaclass=Singleton): """ Not a Number. Explanation =========== This serves as a place holder for numeric values that are indeterminate. Most operations on NaN, produce another NaN. Most indeterminate forms, such as ``0/0`` or ``oo - oo` produce NaN. Two exceptions are ``0**0`` and ``oo**0``, which all produce ``1`` (this is consistent with Python's float). NaN is loosely related to floating point nan, which is defined in the IEEE 754 floating point standard, and corresponds to the Python ``float('nan')``. Differences are noted below. NaN is mathematically not equal to anything else, even NaN itself. This explains the initially counter-intuitive results with ``Eq`` and ``==`` in the examples below. NaN is not comparable so inequalities raise a TypeError. This is in contrast with floating point nan where all inequalities are false. NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported as ``nan``. Examples ======== >>> from sympy import nan, S, oo, Eq >>> nan is S.NaN True >>> oo - oo nan >>> nan + 1 nan >>> Eq(nan, nan) # mathematical equality False >>> nan == nan # structural equality True References ========== .. [1] https://en.wikipedia.org/wiki/NaN """ is_commutative = True is_extended_real = None is_real = None is_rational = None is_algebraic = None is_transcendental = None is_integer = None is_comparable = False is_finite = None is_zero = None is_prime = None is_positive = None is_negative = None is_number = True __slots__ = () def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"\text{NaN}" def __neg__(self): return self @_sympifyit('other', NotImplemented) def __add__(self, other): return self @_sympifyit('other', NotImplemented) def __sub__(self, other): return self @_sympifyit('other', NotImplemented) def __mul__(self, other): return self @_sympifyit('other', NotImplemented) def __truediv__(self, other): return self def floor(self): return self def ceiling(self): return self def _as_mpf_val(self, prec): return _mpf_nan def _sage_(self): import sage.all as sage return sage.NaN def __hash__(self): return super().__hash__() def __eq__(self, other): # NaN is structurally equal to another NaN return other is S.NaN def __ne__(self, other): return other is not S.NaN # Expr will _sympify and raise TypeError __gt__ = Expr.__gt__ __ge__ = Expr.__ge__ __lt__ = Expr.__lt__ __le__ = Expr.__le__ nan = S.NaN @dispatch(NaN, Expr) # type:ignore def _eval_is_eq(a, b): # noqa:F811 return False class ComplexInfinity(AtomicExpr, metaclass=Singleton): r"""Complex infinity. Explanation =========== In complex analysis the symbol `\tilde\infty`, called "complex infinity", represents a quantity with infinite magnitude, but undetermined complex phase. ComplexInfinity is a singleton, and can be accessed by ``S.ComplexInfinity``, or can be imported as ``zoo``. Examples ======== >>> from sympy import zoo >>> zoo + 42 zoo >>> 42/zoo 0 >>> zoo + zoo nan >>> zoo*zoo zoo See Also ======== Infinity """ is_commutative = True is_infinite = True is_number = True is_prime = False is_complex = False is_extended_real = False __slots__ = () def __new__(cls): return AtomicExpr.__new__(cls) def _latex(self, printer): return r"\tilde{\infty}" @staticmethod def __abs__(): return S.Infinity def floor(self): return self def ceiling(self): return self @staticmethod def __neg__(): return S.ComplexInfinity def _eval_power(self, expt): if expt is S.ComplexInfinity: return S.NaN if isinstance(expt, Number): if expt.is_zero: return S.NaN else: if expt.is_positive: return S.ComplexInfinity else: return S.Zero def _sage_(self): import sage.all as sage return sage.UnsignedInfinityRing.gen() zoo = S.ComplexInfinity class NumberSymbol(AtomicExpr): is_commutative = True is_finite = True is_number = True __slots__ = () is_NumberSymbol = True def __new__(cls): return AtomicExpr.__new__(cls) def approximation(self, number_cls): """ Return an interval with number_cls endpoints that contains the value of NumberSymbol. If not implemented, then return None. """ def _eval_evalf(self, prec): return Float._new(self._as_mpf_val(prec), prec) def __eq__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented if self is other: return True if other.is_Number and self.is_irrational: return False return False # NumberSymbol != non-(Number|self) def __ne__(self, other): return not self == other def __le__(self, other): if self is other: return S.true return Expr.__le__(self, other) def __ge__(self, other): if self is other: return S.true return Expr.__ge__(self, other) def __int__(self): # subclass with appropriate return value raise NotImplementedError def __hash__(self): return super().__hash__() class Exp1(NumberSymbol, metaclass=Singleton): r"""The `e` constant. Explanation =========== The transcendental number `e = 2.718281828\ldots` is the base of the natural logarithm and of the exponential function, `e = \exp(1)`. Sometimes called Euler's number or Napier's constant. Exp1 is a singleton, and can be accessed by ``S.Exp1``, or can be imported as ``E``. Examples ======== >>> from sympy import exp, log, E >>> E is exp(1) True >>> log(E) 1 References ========== .. [1] https://en.wikipedia.org/wiki/E_%28mathematical_constant%29 """ is_real = True is_positive = True is_negative = False # XXX Forces is_negative/is_nonnegative is_irrational = True is_number = True is_algebraic = False is_transcendental = True __slots__ = () def _latex(self, printer): return r"e" @staticmethod def __abs__(): return S.Exp1 def __int__(self): return 2 def _as_mpf_val(self, prec): return mpf_e(prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (Integer(2), Integer(3)) elif issubclass(number_cls, Rational): pass def _eval_power(self, expt): from sympy import exp return exp(expt) def _eval_rewrite_as_sin(self, **kwargs): from sympy import sin I = S.ImaginaryUnit return sin(I + S.Pi/2) - I*sin(I) def _eval_rewrite_as_cos(self, **kwargs): from sympy import cos I = S.ImaginaryUnit return cos(I) + I*cos(I + S.Pi/2) def _sage_(self): import sage.all as sage return sage.e E = S.Exp1 class Pi(NumberSymbol, metaclass=Singleton): r"""The `\pi` constant. Explanation =========== The transcendental number `\pi = 3.141592654\ldots` represents the ratio of a circle's circumference to its diameter, the area of the unit circle, the half-period of trigonometric functions, and many other things in mathematics. Pi is a singleton, and can be accessed by ``S.Pi``, or can be imported as ``pi``. Examples ======== >>> from sympy import S, pi, oo, sin, exp, integrate, Symbol >>> S.Pi pi >>> pi > 3 True >>> pi.is_irrational True >>> x = Symbol('x') >>> sin(x + 2*pi) sin(x) >>> integrate(exp(-x**2), (x, -oo, oo)) sqrt(pi) References ========== .. [1] https://en.wikipedia.org/wiki/Pi """ is_real = True is_positive = True is_negative = False is_irrational = True is_number = True is_algebraic = False is_transcendental = True __slots__ = () def _latex(self, printer): return r"\pi" @staticmethod def __abs__(): return S.Pi def __int__(self): return 3 def _as_mpf_val(self, prec): return mpf_pi(prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (Integer(3), Integer(4)) elif issubclass(number_cls, Rational): return (Rational(223, 71), Rational(22, 7)) def _sage_(self): import sage.all as sage return sage.pi pi = S.Pi class GoldenRatio(NumberSymbol, metaclass=Singleton): r"""The golden ratio, `\phi`. Explanation =========== `\phi = \frac{1 + \sqrt{5}}{2}` is algebraic number. Two quantities are in the golden ratio if their ratio is the same as the ratio of their sum to the larger of the two quantities, i.e. their maximum. GoldenRatio is a singleton, and can be accessed by ``S.GoldenRatio``. Examples ======== >>> from sympy import S >>> S.GoldenRatio > 1 True >>> S.GoldenRatio.expand(func=True) 1/2 + sqrt(5)/2 >>> S.GoldenRatio.is_irrational True References ========== .. [1] https://en.wikipedia.org/wiki/Golden_ratio """ is_real = True is_positive = True is_negative = False is_irrational = True is_number = True is_algebraic = True is_transcendental = False __slots__ = () def _latex(self, printer): return r"\phi" def __int__(self): return 1 def _as_mpf_val(self, prec): # XXX track down why this has to be increased rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10) return mpf_norm(rv, prec) def _eval_expand_func(self, **hints): from sympy import sqrt return S.Half + S.Half*sqrt(5) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.One, Rational(2)) elif issubclass(number_cls, Rational): pass def _sage_(self): import sage.all as sage return sage.golden_ratio _eval_rewrite_as_sqrt = _eval_expand_func class TribonacciConstant(NumberSymbol, metaclass=Singleton): r"""The tribonacci constant. Explanation =========== The tribonacci numbers are like the Fibonacci numbers, but instead of starting with two predetermined terms, the sequence starts with three predetermined terms and each term afterwards is the sum of the preceding three terms. The tribonacci constant is the ratio toward which adjacent tribonacci numbers tend. It is a root of the polynomial `x^3 - x^2 - x - 1 = 0`, and also satisfies the equation `x + x^{-3} = 2`. TribonacciConstant is a singleton, and can be accessed by ``S.TribonacciConstant``. Examples ======== >>> from sympy import S >>> S.TribonacciConstant > 1 True >>> S.TribonacciConstant.expand(func=True) 1/3 + (19 - 3*sqrt(33))**(1/3)/3 + (3*sqrt(33) + 19)**(1/3)/3 >>> S.TribonacciConstant.is_irrational True >>> S.TribonacciConstant.n(20) 1.8392867552141611326 References ========== .. [1] https://en.wikipedia.org/wiki/Generalizations_of_Fibonacci_numbers#Tribonacci_numbers """ is_real = True is_positive = True is_negative = False is_irrational = True is_number = True is_algebraic = True is_transcendental = False __slots__ = () def _latex(self, printer): return r"\text{TribonacciConstant}" def __int__(self): return 2 def _eval_evalf(self, prec): rv = self._eval_expand_func(function=True)._eval_evalf(prec + 4) return Float(rv, precision=prec) def _eval_expand_func(self, **hints): from sympy import sqrt, cbrt return (1 + cbrt(19 - 3*sqrt(33)) + cbrt(19 + 3*sqrt(33))) / 3 def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.One, Rational(2)) elif issubclass(number_cls, Rational): pass _eval_rewrite_as_sqrt = _eval_expand_func class EulerGamma(NumberSymbol, metaclass=Singleton): r"""The Euler-Mascheroni constant. Explanation =========== `\gamma = 0.5772157\ldots` (also called Euler's constant) is a mathematical constant recurring in analysis and number theory. It is defined as the limiting difference between the harmonic series and the natural logarithm: .. math:: \gamma = \lim\limits_{n\to\infty} \left(\sum\limits_{k=1}^n\frac{1}{k} - \ln n\right) EulerGamma is a singleton, and can be accessed by ``S.EulerGamma``. Examples ======== >>> from sympy import S >>> S.EulerGamma.is_irrational >>> S.EulerGamma > 0 True >>> S.EulerGamma > 1 False References ========== .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant """ is_real = True is_positive = True is_negative = False is_irrational = None is_number = True __slots__ = () def _latex(self, printer): return r"\gamma" def __int__(self): return 0 def _as_mpf_val(self, prec): # XXX track down why this has to be increased v = mlib.libhyper.euler_fixed(prec + 10) rv = mlib.from_man_exp(v, -prec - 10) return mpf_norm(rv, prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.Zero, S.One) elif issubclass(number_cls, Rational): return (S.Half, Rational(3, 5)) def _sage_(self): import sage.all as sage return sage.euler_gamma class Catalan(NumberSymbol, metaclass=Singleton): r"""Catalan's constant. Explanation =========== `K = 0.91596559\ldots` is given by the infinite series .. math:: K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2} Catalan is a singleton, and can be accessed by ``S.Catalan``. Examples ======== >>> from sympy import S >>> S.Catalan.is_irrational >>> S.Catalan > 0 True >>> S.Catalan > 1 False References ========== .. [1] https://en.wikipedia.org/wiki/Catalan%27s_constant """ is_real = True is_positive = True is_negative = False is_irrational = None is_number = True __slots__ = () def __int__(self): return 0 def _as_mpf_val(self, prec): # XXX track down why this has to be increased v = mlib.catalan_fixed(prec + 10) rv = mlib.from_man_exp(v, -prec - 10) return mpf_norm(rv, prec) def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): return (S.Zero, S.One) elif issubclass(number_cls, Rational): return (Rational(9, 10), S.One) def _eval_rewrite_as_Sum(self, k_sym=None, symbols=None): from sympy import Sum, Dummy if (k_sym is not None) or (symbols is not None): return self k = Dummy('k', integer=True, nonnegative=True) return Sum((-1)**k / (2*k+1)**2, (k, 0, S.Infinity)) def _sage_(self): import sage.all as sage return sage.catalan class ImaginaryUnit(AtomicExpr, metaclass=Singleton): r"""The imaginary unit, `i = \sqrt{-1}`. I is a singleton, and can be accessed by ``S.I``, or can be imported as ``I``. Examples ======== >>> from sympy import I, sqrt >>> sqrt(-1) I >>> I*I -1 >>> 1/I -I References ========== .. [1] https://en.wikipedia.org/wiki/Imaginary_unit """ is_commutative = True is_imaginary = True is_finite = True is_number = True is_algebraic = True is_transcendental = False __slots__ = () def _latex(self, printer): return printer._settings['imaginary_unit_latex'] @staticmethod def __abs__(): return S.One def _eval_evalf(self, prec): return self def _eval_conjugate(self): return -S.ImaginaryUnit def _eval_power(self, expt): """ b is I = sqrt(-1) e is symbolic object but not equal to 0, 1 I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal I**0 mod 4 -> 1 I**1 mod 4 -> I I**2 mod 4 -> -1 I**3 mod 4 -> -I """ if isinstance(expt, Number): if isinstance(expt, Integer): expt = expt.p % 4 if expt == 0: return S.One if expt == 1: return S.ImaginaryUnit if expt == 2: return -S.One return -S.ImaginaryUnit return def as_base_exp(self): return S.NegativeOne, S.Half def _sage_(self): import sage.all as sage return sage.I @property def _mpc_(self): return (Float(0)._mpf_, Float(1)._mpf_) I = S.ImaginaryUnit @dispatch(Tuple, Number) # type:ignore def _eval_is_eq(self, other): # noqa: F811 return False def sympify_fractions(f): return Rational(f.numerator, f.denominator, 1) converter[fractions.Fraction] = sympify_fractions if HAS_GMPY: def sympify_mpz(x): return Integer(int(x)) # XXX: The sympify_mpq function here was never used because it is # overridden by the other sympify_mpq function below. Maybe it should just # be removed or maybe it should be used for something... def sympify_mpq(x): return Rational(int(x.numerator), int(x.denominator)) converter[type(gmpy.mpz(1))] = sympify_mpz converter[type(gmpy.mpq(1, 2))] = sympify_mpq def sympify_mpmath_mpq(x): p, q = x._mpq_ return Rational(p, q, 1) converter[type(mpmath.rational.mpq(1, 2))] = sympify_mpmath_mpq def sympify_mpmath(x): return Expr._from_mpmath(x, x.context.prec) converter[mpnumeric] = sympify_mpmath def sympify_complex(a): real, imag = list(map(sympify, (a.real, a.imag))) return real + S.ImaginaryUnit*imag converter[complex] = sympify_complex from .power import Pow, integer_nthroot from .mul import Mul Mul.identity = One() from .add import Add Add.identity = Zero() def _register_classes(): numbers.Number.register(Number) numbers.Real.register(Float) numbers.Rational.register(Rational) numbers.Rational.register(Integer) _register_classes()
240444070238f03da6dad0fdb8d3f34d0e71c19df34158218d49dbf74fc88851
""" Reimplementations of constructs introduced in later versions of Python than we support. Also some functions that are needed SymPy-wide and are located here for easy import. """ from typing import Tuple, Type import operator from collections import defaultdict from sympy.external import import_module """ Python 2 and Python 3 compatible imports String and Unicode compatible changes: * `unicode()` removed in Python 3, import `unicode` for Python 2/3 compatible function * Use `u()` for escaped unicode sequences (e.g. u'\u2020' -> u('\u2020')) * Use `u_decode()` to decode utf-8 formatted unicode strings Renamed function attributes: * Python 2 `.func_code`, Python 3 `.__func__`, access with `get_function_code()` * Python 2 `.func_globals`, Python 3 `.__globals__`, access with `get_function_globals()` * Python 2 `.func_name`, Python 3 `.__name__`, access with `get_function_name()` Moved modules: * `reduce()` * `StringIO()` * `cStringIO()` (same as `StingIO()` in Python 3) * Python 2 `__builtin__`, access with Python 3 name, `builtins` exec: * Use `exec_()`, with parameters `exec_(code, globs=None, locs=None)` Metaclasses: * Use `with_metaclass()`, examples below * Define class `Foo` with metaclass `Meta`, and no parent: class Foo(with_metaclass(Meta)): pass * Define class `Foo` with metaclass `Meta` and parent class `Bar`: class Foo(with_metaclass(Meta, Bar)): pass """ __all__ = [ 'PY3', 'int_info', 'SYMPY_INTS', 'clock', 'unicode', 'u_decode', 'get_function_code', 'gmpy', 'get_function_globals', 'get_function_name', 'builtins', 'reduce', 'StringIO', 'cStringIO', 'exec_', 'Mapping', 'Callable', 'MutableMapping', 'MutableSet', 'Iterable', 'Hashable', 'unwrap', 'accumulate', 'with_metaclass', 'NotIterable', 'iterable', 'is_sequence', 'as_int', 'default_sort_key', 'ordered', 'GROUND_TYPES', 'HAS_GMPY', ] import sys PY3 = True int_info = sys.int_info # String / unicode compatibility unicode = str def u_decode(x): return x # Moved definitions get_function_code = operator.attrgetter("__code__") get_function_globals = operator.attrgetter("__globals__") get_function_name = operator.attrgetter("__name__") import builtins from functools import reduce from io import StringIO cStringIO = StringIO exec_ = getattr(builtins, "exec") from collections.abc import (Mapping, Callable, MutableMapping, MutableSet, Iterable, Hashable) from inspect import unwrap from itertools import accumulate def with_metaclass(meta, *bases): """ Create a base class with a metaclass. For example, if you have the metaclass >>> class Meta(type): ... pass Use this as the metaclass by doing >>> from sympy.core.compatibility import with_metaclass >>> class MyClass(with_metaclass(Meta, object)): ... pass This is equivalent to the Python 2:: class MyClass(object): __metaclass__ = Meta or Python 3:: class MyClass(object, metaclass=Meta): pass That is, the first argument is the metaclass, and the remaining arguments are the base classes. Note that if the base class is just ``object``, you may omit it. >>> MyClass.__mro__ (<class '...MyClass'>, <... 'object'>) >>> type(MyClass) <class '...Meta'> """ # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. # Code copied from the 'six' library. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, "NewBase", (), {}) # These are in here because telling if something is an iterable just by calling # hasattr(obj, "__iter__") behaves differently in Python 2 and Python 3. In # particular, hasattr(str, "__iter__") is False in Python 2 and True in Python 3. # I think putting them here also makes it easier to use them in the core. class NotIterable: """ Use this as mixin when creating a class which is not supposed to return true when iterable() is called on its instances because calling list() on the instance, for example, would result in an infinite loop. """ pass def iterable(i, exclude=(str, dict, NotIterable)): """ Return a boolean indicating whether ``i`` is SymPy iterable. True also indicates that the iterator is finite, e.g. you can call list(...) on the instance. When SymPy is working with iterables, it is almost always assuming that the iterable is not a string or a mapping, so those are excluded by default. If you want a pure Python definition, make exclude=None. To exclude multiple items, pass them as a tuple. You can also set the _iterable attribute to True or False on your class, which will override the checks here, including the exclude test. As a rule of thumb, some SymPy functions use this to check if they should recursively map over an object. If an object is technically iterable in the Python sense but does not desire this behavior (e.g., because its iteration is not finite, or because iteration might induce an unwanted computation), it should disable it by setting the _iterable attribute to False. See also: is_sequence Examples ======== >>> from sympy.utilities.iterables import iterable >>> from sympy import Tuple >>> things = [[1], (1,), set([1]), Tuple(1), (j for j in [1, 2]), {1:2}, '1', 1] >>> for i in things: ... print('%s %s' % (iterable(i), type(i))) True <... 'list'> True <... 'tuple'> True <... 'set'> True <class 'sympy.core.containers.Tuple'> True <... 'generator'> False <... 'dict'> False <... 'str'> False <... 'int'> >>> iterable({}, exclude=None) True >>> iterable({}, exclude=str) True >>> iterable("no", exclude=str) False """ if hasattr(i, '_iterable'): return i._iterable try: iter(i) except TypeError: return False if exclude: return not isinstance(i, exclude) return True def is_sequence(i, include=None): """ Return a boolean indicating whether ``i`` is a sequence in the SymPy sense. If anything that fails the test below should be included as being a sequence for your application, set 'include' to that object's type; multiple types should be passed as a tuple of types. Note: although generators can generate a sequence, they often need special handling to make sure their elements are captured before the generator is exhausted, so these are not included by default in the definition of a sequence. See also: iterable Examples ======== >>> from sympy.utilities.iterables import is_sequence >>> from types import GeneratorType >>> is_sequence([]) True >>> is_sequence(set()) False >>> is_sequence('abc') False >>> is_sequence('abc', include=str) True >>> generator = (c for c in 'abc') >>> is_sequence(generator) False >>> is_sequence(generator, include=(str, GeneratorType)) True """ return (hasattr(i, '__getitem__') and iterable(i) or bool(include) and isinstance(i, include)) def as_int(n, strict=True): """ Convert the argument to a builtin integer. The return value is guaranteed to be equal to the input. ValueError is raised if the input has a non-integral value. When ``strict`` is True, this uses `__index__ <https://docs.python.org/3/reference/datamodel.html#object.__index__>`_ and when it is False it uses ``int``. Examples ======== >>> from sympy.core.compatibility import as_int >>> from sympy import sqrt, S The function is primarily concerned with sanitizing input for functions that need to work with builtin integers, so anything that is unambiguously an integer should be returned as an int: >>> as_int(S(3)) 3 Floats, being of limited precision, are not assumed to be exact and will raise an error unless the ``strict`` flag is False. This precision issue becomes apparent for large floating point numbers: >>> big = 1e23 >>> type(big) is float True >>> big == int(big) True >>> as_int(big) Traceback (most recent call last): ... ValueError: ... is not an integer >>> as_int(big, strict=False) 99999999999999991611392 Input that might be a complex representation of an integer value is also rejected by default: >>> one = sqrt(3 + 2*sqrt(2)) - sqrt(2) >>> int(one) == 1 True >>> as_int(one) Traceback (most recent call last): ... ValueError: ... is not an integer """ if strict: try: if type(n) is bool: raise TypeError return operator.index(n) except TypeError: raise ValueError('%s is not an integer' % (n,)) else: try: result = int(n) except TypeError: raise ValueError('%s is not an integer' % (n,)) if n != result: raise ValueError('%s is not an integer' % (n,)) return result def default_sort_key(item, order=None): """Return a key that can be used for sorting. The key has the structure: (class_key, (len(args), args), exponent.sort_key(), coefficient) This key is supplied by the sort_key routine of Basic objects when ``item`` is a Basic object or an object (other than a string) that sympifies to a Basic object. Otherwise, this function produces the key. The ``order`` argument is passed along to the sort_key routine and is used to determine how the terms *within* an expression are ordered. (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex', and reversed values of the same (e.g. 'rev-lex'). The default order value is None (which translates to 'lex'). Examples ======== >>> from sympy import S, I, default_sort_key, sin, cos, sqrt >>> from sympy.core.function import UndefinedFunction >>> from sympy.abc import x The following are equivalent ways of getting the key for an object: >>> x.sort_key() == default_sort_key(x) True Here are some examples of the key that is produced: >>> default_sort_key(UndefinedFunction('f')) ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key('1') ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key(S.One) ((1, 0, 'Number'), (0, ()), (), 1) >>> default_sort_key(2) ((1, 0, 'Number'), (0, ()), (), 2) While sort_key is a method only defined for SymPy objects, default_sort_key will accept anything as an argument so it is more robust as a sorting key. For the following, using key= lambda i: i.sort_key() would fail because 2 doesn't have a sort_key method; that's why default_sort_key is used. Note, that it also handles sympification of non-string items likes ints: >>> a = [2, I, -I] >>> sorted(a, key=default_sort_key) [2, -I, I] The returned key can be used anywhere that a key can be specified for a function, e.g. sort, min, max, etc...: >>> a.sort(key=default_sort_key); a[0] 2 >>> min(a, key=default_sort_key) 2 Note ---- The key returned is useful for getting items into a canonical order that will be the same across platforms. It is not directly useful for sorting lists of expressions: >>> a, b = x, 1/x Since ``a`` has only 1 term, its value of sort_key is unaffected by ``order``: >>> a.sort_key() == a.sort_key('rev-lex') True If ``a`` and ``b`` are combined then the key will differ because there are terms that can be ordered: >>> eq = a + b >>> eq.sort_key() == eq.sort_key('rev-lex') False >>> eq.as_ordered_terms() [x, 1/x] >>> eq.as_ordered_terms('rev-lex') [1/x, x] But since the keys for each of these terms are independent of ``order``'s value, they don't sort differently when they appear separately in a list: >>> sorted(eq.args, key=default_sort_key) [1/x, x] >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex')) [1/x, x] The order of terms obtained when using these keys is the order that would be obtained if those terms were *factors* in a product. Although it is useful for quickly putting expressions in canonical order, it does not sort expressions based on their complexity defined by the number of operations, power of variables and others: >>> sorted([sin(x)*cos(x), sin(x)], key=default_sort_key) [sin(x)*cos(x), sin(x)] >>> sorted([x, x**2, sqrt(x), x**3], key=default_sort_key) [sqrt(x), x, x**2, x**3] See Also ======== ordered, sympy.core.expr.as_ordered_factors, sympy.core.expr.as_ordered_terms """ from .singleton import S from .basic import Basic from .sympify import sympify, SympifyError from .compatibility import iterable if isinstance(item, Basic): return item.sort_key(order=order) if iterable(item, exclude=str): if isinstance(item, dict): args = item.items() unordered = True elif isinstance(item, set): args = item unordered = True else: # e.g. tuple, list args = list(item) unordered = False args = [default_sort_key(arg, order=order) for arg in args] if unordered: # e.g. dict, set args = sorted(args) cls_index, args = 10, (len(args), tuple(args)) else: if not isinstance(item, str): try: item = sympify(item, strict=True) except SympifyError: # e.g. lambda x: x pass else: if isinstance(item, Basic): # e.g int -> Integer return default_sort_key(item) # e.g. UndefinedFunction # e.g. str cls_index, args = 0, (1, (str(item),)) return (cls_index, 0, item.__class__.__name__ ), args, S.One.sort_key(), S.One def _nodes(e): """ A helper for ordered() which returns the node count of ``e`` which for Basic objects is the number of Basic nodes in the expression tree but for other objects is 1 (unless the object is an iterable or dict for which the sum of nodes is returned). """ from .basic import Basic from .function import Derivative if isinstance(e, Basic): if isinstance(e, Derivative): return _nodes(e.expr) + len(e.variables) return e.count(Basic) elif iterable(e): return 1 + sum(_nodes(ei) for ei in e) elif isinstance(e, dict): return 1 + sum(_nodes(k) + _nodes(v) for k, v in e.items()) else: return 1 def ordered(seq, keys=None, default=True, warn=False): """Return an iterator of the seq where keys are used to break ties in a conservative fashion: if, after applying a key, there are no ties then no other keys will be computed. Two default keys will be applied if 1) keys are not provided or 2) the given keys don't resolve all ties (but only if ``default`` is True). The two keys are ``_nodes`` (which places smaller expressions before large) and ``default_sort_key`` which (if the ``sort_key`` for an object is defined properly) should resolve any ties. If ``warn`` is True then an error will be raised if there were no keys remaining to break ties. This can be used if it was expected that there should be no ties between items that are not identical. Examples ======== >>> from sympy.utilities.iterables import ordered >>> from sympy import count_ops >>> from sympy.abc import x, y The count_ops is not sufficient to break ties in this list and the first two items appear in their original order (i.e. the sorting is stable): >>> list(ordered([y + 2, x + 2, x**2 + y + 3], ... count_ops, default=False, warn=False)) ... [y + 2, x + 2, x**2 + y + 3] The default_sort_key allows the tie to be broken: >>> list(ordered([y + 2, x + 2, x**2 + y + 3])) ... [x + 2, y + 2, x**2 + y + 3] Here, sequences are sorted by length, then sum: >>> seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]], [ ... lambda x: len(x), ... lambda x: sum(x)]] ... >>> list(ordered(seq, keys, default=False, warn=False)) [[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]] If ``warn`` is True, an error will be raised if there were not enough keys to break ties: >>> list(ordered(seq, keys, default=False, warn=True)) Traceback (most recent call last): ... ValueError: not enough keys to break ties Notes ===== The decorated sort is one of the fastest ways to sort a sequence for which special item comparison is desired: the sequence is decorated, sorted on the basis of the decoration (e.g. making all letters lower case) and then undecorated. If one wants to break ties for items that have the same decorated value, a second key can be used. But if the second key is expensive to compute then it is inefficient to decorate all items with both keys: only those items having identical first key values need to be decorated. This function applies keys successively only when needed to break ties. By yielding an iterator, use of the tie-breaker is delayed as long as possible. This function is best used in cases when use of the first key is expected to be a good hashing function; if there are no unique hashes from application of a key, then that key should not have been used. The exception, however, is that even if there are many collisions, if the first group is small and one does not need to process all items in the list then time will not be wasted sorting what one was not interested in. For example, if one were looking for the minimum in a list and there were several criteria used to define the sort order, then this function would be good at returning that quickly if the first group of candidates is small relative to the number of items being processed. """ d = defaultdict(list) if keys: if not isinstance(keys, (list, tuple)): keys = [keys] keys = list(keys) f = keys.pop(0) for a in seq: d[f(a)].append(a) else: if not default: raise ValueError('if default=False then keys must be provided') d[None].extend(seq) for k in sorted(d.keys()): if len(d[k]) > 1: if keys: d[k] = ordered(d[k], keys, default, warn) elif default: d[k] = ordered(d[k], (_nodes, default_sort_key,), default=False, warn=warn) elif warn: from sympy.utilities.iterables import uniq u = list(uniq(d[k])) if len(u) > 1: raise ValueError( 'not enough keys to break ties: %s' % u) yield from d[k] d.pop(k) # If HAS_GMPY is 0, no supported version of gmpy is available. Otherwise, # HAS_GMPY contains the major version number of gmpy; i.e. 1 for gmpy, and # 2 for gmpy2. # Versions of gmpy prior to 1.03 do not work correctly with int(largempz) # For example, int(gmpy.mpz(2**256)) would raise OverflowError. # See issue 4980. # Minimum version of gmpy changed to 1.13 to allow a single code base to also # work with gmpy2. def _getenv(key, default=None): from os import getenv return getenv(key, default) GROUND_TYPES = _getenv('SYMPY_GROUND_TYPES', 'auto').lower() HAS_GMPY = 0 if GROUND_TYPES != 'python': # Don't try to import gmpy2 if ground types is set to gmpy1. This is # primarily intended for testing. if GROUND_TYPES != 'gmpy1': gmpy = import_module('gmpy2', min_module_version='2.0.0', module_version_attr='version', module_version_attr_call_args=()) if gmpy: HAS_GMPY = 2 else: GROUND_TYPES = 'gmpy' if not HAS_GMPY: gmpy = import_module('gmpy', min_module_version='1.13', module_version_attr='version', module_version_attr_call_args=()) if gmpy: HAS_GMPY = 1 else: gmpy = None if GROUND_TYPES == 'auto': if HAS_GMPY: GROUND_TYPES = 'gmpy' else: GROUND_TYPES = 'python' if GROUND_TYPES == 'gmpy' and not HAS_GMPY: from warnings import warn warn("gmpy library is not installed, switching to 'python' ground types") GROUND_TYPES = 'python' # SYMPY_INTS is a tuple containing the base types for valid integer types. SYMPY_INTS = (int, ) # type: Tuple[Type, ...] if GROUND_TYPES == 'gmpy': SYMPY_INTS += (type(gmpy.mpz(0)),) from time import perf_counter as clock
fab8245e18f919c63401981307821a1d078c88fc99f825f6b5a8f0cfddaff043
"""sympify -- convert objects SymPy internal format""" import typing if typing.TYPE_CHECKING: from typing import Any, Callable, Dict, Type from inspect import getmro from .compatibility import iterable from .parameters import global_parameters class SympifyError(ValueError): def __init__(self, expr, base_exc=None): self.expr = expr self.base_exc = base_exc def __str__(self): if self.base_exc is None: return "SympifyError: %r" % (self.expr,) return ("Sympify of expression '%s' failed, because of exception being " "raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__, str(self.base_exc))) # See sympify docstring. converter = {} # type: Dict[Type[Any], Callable[[Any], Basic]] class CantSympify: """ Mix in this trait to a class to disallow sympification of its instances. Examples ======== >>> from sympy.core.sympify import sympify, CantSympify >>> class Something(dict): ... pass ... >>> sympify(Something()) {} >>> class Something(dict, CantSympify): ... pass ... >>> sympify(Something()) Traceback (most recent call last): ... SympifyError: SympifyError: {} """ pass def _is_numpy_instance(a): """ Checks if an object is an instance of a type from the numpy module. """ # This check avoids unnecessarily importing NumPy. We check the whole # __mro__ in case any base type is a numpy type. return any(type_.__module__ == 'numpy' for type_ in type(a).__mro__) def _convert_numpy_types(a, **sympify_args): """ Converts a numpy datatype input to an appropriate SymPy type. """ import numpy as np if not isinstance(a, np.floating): if np.iscomplex(a): return converter[complex](a.item()) else: return sympify(a.item(), **sympify_args) else: try: from sympy.core.numbers import Float prec = np.finfo(a).nmant + 1 # E.g. double precision means prec=53 but nmant=52 # Leading bit of mantissa is always 1, so is not stored a = str(list(np.reshape(np.asarray(a), (1, np.size(a)))[0]))[1:-1] return Float(a, precision=prec) except NotImplementedError: raise SympifyError('Translation for numpy float : %s ' 'is not implemented' % a) def sympify(a, locals=None, convert_xor=True, strict=False, rational=False, evaluate=None): """ Converts an arbitrary expression to a type that can be used inside SymPy. Explanation =========== It will convert Python ints into instances of sympy.Integer, floats into instances of sympy.Float, etc. It is also able to coerce symbolic expressions which inherit from Basic. This can be useful in cooperation with SAGE. .. warning:: Note that this function uses ``eval``, and thus shouldn't be used on unsanitized input. If the argument is already a type that SymPy understands, it will do nothing but return that value. This can be used at the beginning of a function to ensure you are working with the correct type. Examples ======== >>> from sympy import sympify >>> sympify(2).is_integer True >>> sympify(2).is_real True >>> sympify(2.0).is_real True >>> sympify("2.0").is_real True >>> sympify("2e-45").is_real True If the expression could not be converted, a SympifyError is raised. >>> sympify("x***2") Traceback (most recent call last): ... SympifyError: SympifyError: "could not parse 'x***2'" Locals ------ The sympification happens with access to everything that is loaded by ``from sympy import *``; anything used in a string that is not defined by that import will be converted to a symbol. In the following, the ``bitcount`` function is treated as a symbol and the ``O`` is interpreted as the Order object (used with series) and it raises an error when used improperly: >>> s = 'bitcount(42)' >>> sympify(s) bitcount(42) >>> sympify("O(x)") O(x) >>> sympify("O + 1") Traceback (most recent call last): ... TypeError: unbound method... In order to have ``bitcount`` be recognized it can be imported into a namespace dictionary and passed as locals: >>> ns = {} >>> exec('from sympy.core.evalf import bitcount', ns) >>> sympify(s, locals=ns) 6 In order to have the ``O`` interpreted as a Symbol, identify it as such in the namespace dictionary. This can be done in a variety of ways; all three of the following are possibilities: >>> from sympy import Symbol >>> ns["O"] = Symbol("O") # method 1 >>> exec('from sympy.abc import O', ns) # method 2 >>> ns.update(dict(O=Symbol("O"))) # method 3 >>> sympify("O + 1", locals=ns) O + 1 If you want *all* single-letter and Greek-letter variables to be symbols then you can use the clashing-symbols dictionaries that have been defined there as private variables: _clash1 (single-letter variables), _clash2 (the multi-letter Greek names) or _clash (both single and multi-letter names that are defined in abc). >>> from sympy.abc import _clash1 >>> _clash1 {'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S} >>> sympify('I & Q', _clash1) I & Q Strict ------ If the option ``strict`` is set to ``True``, only the types for which an explicit conversion has been defined are converted. In the other cases, a SympifyError is raised. >>> print(sympify(None)) None >>> sympify(None, strict=True) Traceback (most recent call last): ... SympifyError: SympifyError: None Evaluation ---------- If the option ``evaluate`` is set to ``False``, then arithmetic and operators will be converted into their SymPy equivalents and the ``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will be denested first. This is done via an AST transformation that replaces operators with their SymPy equivalents, so if an operand redefines any of those operations, the redefined operators will not be used. If argument a is not a string, the mathematical expression is evaluated before being passed to sympify, so adding evaluate=False will still return the evaluated result of expression. >>> sympify('2**2 / 3 + 5') 19/3 >>> sympify('2**2 / 3 + 5', evaluate=False) 2**2/3 + 5 >>> sympify('4/2+7', evaluate=True) 9 >>> sympify('4/2+7', evaluate=False) 4/2 + 7 >>> sympify(4/2+7, evaluate=False) 9.00000000000000 Extending --------- To extend ``sympify`` to convert custom objects (not derived from ``Basic``), just define a ``_sympy_`` method to your class. You can do that even to classes that you do not own by subclassing or adding the method at runtime. >>> from sympy import Matrix >>> class MyList1(object): ... def __iter__(self): ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] ... def _sympy_(self): return Matrix(self) >>> sympify(MyList1()) Matrix([ [1], [2]]) If you do not have control over the class definition you could also use the ``converter`` global dictionary. The key is the class and the value is a function that takes a single argument and returns the desired SymPy object, e.g. ``converter[MyList] = lambda x: Matrix(x)``. >>> class MyList2(object): # XXX Do not do this if you control the class! ... def __iter__(self): # Use _sympy_! ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] >>> from sympy.core.sympify import converter >>> converter[MyList2] = lambda x: Matrix(x) >>> sympify(MyList2()) Matrix([ [1], [2]]) Notes ===== The keywords ``rational`` and ``convert_xor`` are only used when the input is a string. convert_xor ----------- >>> sympify('x^y',convert_xor=True) x**y >>> sympify('x^y',convert_xor=False) x ^ y rational -------- >>> sympify('0.1',rational=False) 0.1 >>> sympify('0.1',rational=True) 1/10 Sometimes autosimplification during sympification results in expressions that are very different in structure than what was entered. Until such autosimplification is no longer done, the ``kernS`` function might be of some use. In the example below you can see how an expression reduces to -1 by autosimplification, but does not do so when ``kernS`` is used. >>> from sympy.core.sympify import kernS >>> from sympy.abc import x >>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 -1 >>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1' >>> sympify(s) -1 >>> kernS(s) -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 Parameters ========== a : - any object defined in SymPy - standard numeric python types: int, long, float, Decimal - strings (like "0.09", "2e-19" or 'sin(x)') - booleans, including ``None`` (will leave ``None`` unchanged) - dict, lists, sets or tuples containing any of the above convert_xor : boolean, optional If true, treats XOR as exponentiation. If False, treats XOR as XOR itself. Used only when input is a string. locals : any object defined in SymPy, optional In order to have strings be recognized it can be imported into a namespace dictionary and passed as locals. strict : boolean, optional If the option strict is set to True, only the types for which an explicit conversion has been defined are converted. In the other cases, a SympifyError is raised. rational : boolean, optional If true, converts floats into Rational. If false, it lets floats remain as it is. Used only when input is a string. evaluate : boolean, optional If False, then arithmetic and operators will be converted into their SymPy equivalents. If True the expression will be evaluated and the result will be returned. """ # XXX: If a is a Basic subclass rather than instance (e.g. sin rather than # sin(x)) then a.__sympy__ will be the property. Only on the instance will # a.__sympy__ give the *value* of the property (True). Since sympify(sin) # was used for a long time we allow it to pass. However if strict=True as # is the case in internal calls to _sympify then we only allow # is_sympy=True. # # https://github.com/sympy/sympy/issues/20124 is_sympy = getattr(a, '__sympy__', None) if is_sympy is True: return a elif is_sympy is not None: if not strict: return a else: raise SympifyError(a) if isinstance(a, CantSympify): raise SympifyError(a) cls = getattr(a, "__class__", None) if cls is None: cls = type(a) # Probably an old-style class conv = converter.get(cls, None) if conv is not None: return conv(a) for superclass in getmro(cls): try: return converter[superclass](a) except KeyError: continue if cls is type(None): if strict: raise SympifyError(a) else: return a if evaluate is None: evaluate = global_parameters.evaluate # Support for basic numpy datatypes if _is_numpy_instance(a): import numpy as np if np.isscalar(a): return _convert_numpy_types(a, locals=locals, convert_xor=convert_xor, strict=strict, rational=rational, evaluate=evaluate) _sympy_ = getattr(a, "_sympy_", None) if _sympy_ is not None: try: return a._sympy_() # XXX: Catches AttributeError: 'SympyConverter' object has no # attribute 'tuple' # This is probably a bug somewhere but for now we catch it here. except AttributeError: pass if not strict: # Put numpy array conversion _before_ float/int, see # <https://github.com/sympy/sympy/issues/13924>. flat = getattr(a, "flat", None) if flat is not None: shape = getattr(a, "shape", None) if shape is not None: from ..tensor.array import Array return Array(a.flat, a.shape) # works with e.g. NumPy arrays if not isinstance(a, str): if _is_numpy_instance(a): import numpy as np assert not isinstance(a, np.number) if isinstance(a, np.ndarray): # Scalar arrays (those with zero dimensions) have sympify # called on the scalar element. if a.ndim == 0: try: return sympify(a.item(), locals=locals, convert_xor=convert_xor, strict=strict, rational=rational, evaluate=evaluate) except SympifyError: pass else: # float and int can coerce size-one numpy arrays to their lone # element. See issue https://github.com/numpy/numpy/issues/10404. for coerce in (float, int): try: return sympify(coerce(a)) except (TypeError, ValueError, AttributeError, SympifyError): continue if strict: raise SympifyError(a) if iterable(a): try: return type(a)([sympify(x, locals=locals, convert_xor=convert_xor, rational=rational) for x in a]) except TypeError: # Not all iterables are rebuildable with their type. pass if isinstance(a, dict): try: return type(a)([sympify(x, locals=locals, convert_xor=convert_xor, rational=rational) for x in a.items()]) except TypeError: # Not all iterables are rebuildable with their type. pass if not isinstance(a, str): try: a = str(a) except Exception as exc: raise SympifyError(a, exc) from sympy.utilities.exceptions import SymPyDeprecationWarning SymPyDeprecationWarning( feature="String fallback in sympify", useinstead= \ 'sympify(str(obj)) or ' + \ 'sympy.core.sympify.converter or obj._sympy_', issue=18066, deprecated_since_version='1.6' ).warn() from sympy.parsing.sympy_parser import (parse_expr, TokenError, standard_transformations) from sympy.parsing.sympy_parser import convert_xor as t_convert_xor from sympy.parsing.sympy_parser import rationalize as t_rationalize transformations = standard_transformations if rational: transformations += (t_rationalize,) if convert_xor: transformations += (t_convert_xor,) try: a = a.replace('\n', '') expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate) except (TokenError, SyntaxError) as exc: raise SympifyError('could not parse %r' % a, exc) return expr def _sympify(a): """ Short version of sympify for internal usage for __add__ and __eq__ methods where it is ok to allow some things (like Python integers and floats) in the expression. This excludes things (like strings) that are unwise to allow into such an expression. >>> from sympy import Integer >>> Integer(1) == 1 True >>> Integer(1) == '1' False >>> from sympy.abc import x >>> x + 1 x + 1 >>> x + '1' Traceback (most recent call last): ... TypeError: unsupported operand type(s) for +: 'Symbol' and 'str' see: sympify """ return sympify(a, strict=True) def kernS(s): """Use a hack to try keep autosimplification from distributing a a number into an Add; this modification doesn't prevent the 2-arg Mul from becoming an Add, however. Examples ======== >>> from sympy.core.sympify import kernS >>> from sympy.abc import x, y The 2-arg Mul distributes a number (or minus sign) across the terms of an expression, but kernS will prevent that: >>> 2*(x + y), -(x + 1) (2*x + 2*y, -x - 1) >>> kernS('2*(x + y)') 2*(x + y) >>> kernS('-(x + 1)') -(x + 1) If use of the hack fails, the un-hacked string will be passed to sympify... and you get what you get. XXX This hack should not be necessary once issue 4596 has been resolved. """ import string from random import choice from sympy.core.symbol import Symbol hit = False quoted = '"' in s or "'" in s if '(' in s and not quoted: if s.count('(') != s.count(")"): raise SympifyError('unmatched left parenthesis') # strip all space from s s = ''.join(s.split()) olds = s # now use space to represent a symbol that # will # step 1. turn potential 2-arg Muls into 3-arg versions # 1a. *( -> * *( s = s.replace('*(', '* *(') # 1b. close up exponentials s = s.replace('** *', '**') # 2. handle the implied multiplication of a negated # parenthesized expression in two steps # 2a: -(...) --> -( *(...) target = '-( *(' s = s.replace('-(', target) # 2b: double the matching closing parenthesis # -( *(...) --> -( *(...)) i = nest = 0 assert target.endswith('(') # assumption below while True: j = s.find(target, i) if j == -1: break j += len(target) - 1 for j in range(j, len(s)): if s[j] == "(": nest += 1 elif s[j] == ")": nest -= 1 if nest == 0: break s = s[:j] + ")" + s[j:] i = j + 2 # the first char after 2nd ) if ' ' in s: # get a unique kern kern = '_' while kern in s: kern += choice(string.ascii_letters + string.digits) s = s.replace(' ', kern) hit = kern in s else: hit = False for i in range(2): try: expr = sympify(s) break except TypeError: # the kern might cause unknown errors... if hit: s = olds # maybe it didn't like the kern; use un-kerned s hit = False continue expr = sympify(s) # let original error raise if not hit: return expr rep = {Symbol(kern): 1} def _clear(expr): if isinstance(expr, (list, tuple, set)): return type(expr)([_clear(e) for e in expr]) if hasattr(expr, 'subs'): return expr.subs(rep, hack2=True) return expr expr = _clear(expr) # hope that kern is not there anymore return expr # Avoid circular import from .basic import Basic
fc6f62ba73371ce8fa74ce95ae0ed4c8f200cf1097999d76bc8e975cae4dc536
"""Module for SymPy containers (SymPy objects that store other SymPy objects) The containers implemented in this module are subclassed to Basic. They are supposed to work seamlessly within the SymPy framework. """ from collections import OrderedDict from collections.abc import MutableSet from sympy.core.basic import Basic from sympy.core.compatibility import as_int from sympy.core.sympify import _sympify, sympify, converter, SympifyError from sympy.utilities.iterables import iterable class Tuple(Basic): """ Wrapper around the builtin tuple object. Explanation =========== The Tuple is a subclass of Basic, so that it works well in the SymPy framework. The wrapped tuple is available as self.args, but you can also access elements or slices with [:] syntax. Parameters ========== sympify : bool If ``False``, ``sympify`` is not called on ``args``. This can be used for speedups for very large tuples where the elements are known to already be sympy objects. Examples ======== >>> from sympy import symbols >>> from sympy.core.containers import Tuple >>> a, b, c, d = symbols('a b c d') >>> Tuple(a, b, c)[1:] (b, c) >>> Tuple(a, b, c).subs(a, d) (d, b, c) """ def __new__(cls, *args, **kwargs): if kwargs.get('sympify', True): args = (sympify(arg) for arg in args) obj = Basic.__new__(cls, *args) return obj def __getitem__(self, i): if isinstance(i, slice): indices = i.indices(len(self)) return Tuple(*(self.args[j] for j in range(*indices))) return self.args[i] def __len__(self): return len(self.args) def __contains__(self, item): return item in self.args def __iter__(self): return iter(self.args) def __add__(self, other): if isinstance(other, Tuple): return Tuple(*(self.args + other.args)) elif isinstance(other, tuple): return Tuple(*(self.args + other)) else: return NotImplemented def __radd__(self, other): if isinstance(other, Tuple): return Tuple(*(other.args + self.args)) elif isinstance(other, tuple): return Tuple(*(other + self.args)) else: return NotImplemented def __mul__(self, other): try: n = as_int(other) except ValueError: raise TypeError("Can't multiply sequence by non-integer of type '%s'" % type(other)) return self.func(*(self.args*n)) __rmul__ = __mul__ def __eq__(self, other): if isinstance(other, Basic): return super().__eq__(other) return self.args == other def __ne__(self, other): if isinstance(other, Basic): return super().__ne__(other) return self.args != other def __hash__(self): return hash(self.args) def _to_mpmath(self, prec): return tuple(a._to_mpmath(prec) for a in self.args) def __lt__(self, other): return _sympify(self.args < other.args) def __le__(self, other): return _sympify(self.args <= other.args) # XXX: Basic defines count() as something different, so we can't # redefine it here. Originally this lead to cse() test failure. def tuple_count(self, value): """T.count(value) -> integer -- return number of occurrences of value""" return self.args.count(value) def index(self, value, start=None, stop=None): """Searches and returns the first index of the value.""" # XXX: One would expect: # # return self.args.index(value, start, stop) # # here. Any trouble with that? Yes: # # >>> (1,).index(1, None, None) # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # TypeError: slice indices must be integers or None or have an __index__ method # # See: http://bugs.python.org/issue13340 if start is None and stop is None: return self.args.index(value) elif stop is None: return self.args.index(value, start) else: return self.args.index(value, start, stop) converter[tuple] = lambda tup: Tuple(*tup) def tuple_wrapper(method): """ Decorator that converts any tuple in the function arguments into a Tuple. Explanation =========== The motivation for this is to provide simple user interfaces. The user can call a function with regular tuples in the argument, and the wrapper will convert them to Tuples before handing them to the function. Explanation =========== >>> from sympy.core.containers import tuple_wrapper >>> def f(*args): ... return args >>> g = tuple_wrapper(f) The decorated function g sees only the Tuple argument: >>> g(0, (1, 2), 3) (0, (1, 2), 3) """ def wrap_tuples(*args, **kw_args): newargs = [] for arg in args: if type(arg) is tuple: newargs.append(Tuple(*arg)) else: newargs.append(arg) return method(*newargs, **kw_args) return wrap_tuples class Dict(Basic): """ Wrapper around the builtin dict object Explanation =========== The Dict is a subclass of Basic, so that it works well in the SymPy framework. Because it is immutable, it may be included in sets, but its values must all be given at instantiation and cannot be changed afterwards. Otherwise it behaves identically to the Python dict. Examples ======== >>> from sympy import Symbol >>> from sympy.core.containers import Dict >>> D = Dict({1: 'one', 2: 'two'}) >>> for key in D: ... if key == 1: ... print('%s %s' % (key, D[key])) 1 one The args are sympified so the 1 and 2 are Integers and the values are Symbols. Queries automatically sympify args so the following work: >>> 1 in D True >>> D.has(Symbol('one')) # searches keys and values True >>> 'one' in D # not in the keys False >>> D[1] one """ def __new__(cls, *args): if len(args) == 1 and isinstance(args[0], (dict, Dict)): items = [Tuple(k, v) for k, v in args[0].items()] elif iterable(args) and all(len(arg) == 2 for arg in args): items = [Tuple(k, v) for k, v in args] else: raise TypeError('Pass Dict args as Dict((k1, v1), ...) or Dict({k1: v1, ...})') elements = frozenset(items) obj = Basic.__new__(cls, elements) obj.elements = elements obj._dict = dict(items) # In case Tuple decides it wants to sympify return obj def __getitem__(self, key): """x.__getitem__(y) <==> x[y]""" try: key = _sympify(key) except SympifyError: raise KeyError(key) return self._dict[key] def __setitem__(self, key, value): raise NotImplementedError("SymPy Dicts are Immutable") @property def args(self): """Returns a tuple of arguments of 'self'. See Also ======== sympy.core.basic.Basic.args """ return tuple(self.elements) def items(self): '''Returns a set-like object providing a view on dict's items. ''' return self._dict.items() def keys(self): '''Returns the list of the dict's keys.''' return self._dict.keys() def values(self): '''Returns the list of the dict's values.''' return self._dict.values() def __iter__(self): '''x.__iter__() <==> iter(x)''' return iter(self._dict) def __len__(self): '''x.__len__() <==> len(x)''' return self._dict.__len__() def get(self, key, default=None): '''Returns the value for key if the key is in the dictionary.''' try: key = _sympify(key) except SympifyError: return default return self._dict.get(key, default) def __contains__(self, key): '''D.__contains__(k) -> True if D has a key k, else False''' try: key = _sympify(key) except SympifyError: return False return key in self._dict def __lt__(self, other): return _sympify(self.args < other.args) @property def _sorted_args(self): from sympy.utilities import default_sort_key return tuple(sorted(self.args, key=default_sort_key)) # this handles dict, defaultdict, OrderedDict converter[dict] = lambda d: Dict(*d.items()) class OrderedSet(MutableSet): def __init__(self, iterable=None): if iterable: self.map = OrderedDict((item, None) for item in iterable) else: self.map = OrderedDict() def __len__(self): return len(self.map) def __contains__(self, key): return key in self.map def add(self, key): self.map[key] = None def discard(self, key): self.map.pop(key) def pop(self, last=True): return self.map.popitem(last=last)[0] def __iter__(self): yield from self.map.keys() def __repr__(self): if not self.map: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self.map.keys())) def intersection(self, other): result = [] for val in self: if val in other: result.append(val) return self.__class__(result) def difference(self, other): result = [] for val in self: if val not in other: result.append(val) return self.__class__(result) def update(self, iterable): for val in iterable: self.add(val)
190a3d748fc1bc026d60754515f9aba0daf42fc77bd54788fad53b48626072d9
""" Base class to provide str and repr hooks that `init_printing` can overwrite. This is exposed publicly in the `printing.defaults` module, but cannot be defined there without causing circular imports. """ class Printable: """ The default implementation of printing for SymPy classes. This implements a hack that allows us to print elements of built-in Python containers in a readable way. Natively Python uses ``repr()`` even if ``str()`` was explicitly requested. Mix in this trait into a class to get proper default printing. This also adds support for LaTeX printing in jupyter notebooks. """ # Since this class is used as a mixin we set empty slots. That means that # instances of any subclasses that use slots will not need to have a # __dict__. __slots__ = () # Note, we always use the default ordering (lex) in __str__ and __repr__, # regardless of the global setting. See issue 5487. def __str__(self): from sympy.printing.str import sstr return sstr(self, order=None) __repr__ = __str__ def _repr_disabled(self): """ No-op repr function used to disable jupyter display hooks. When :func:`sympy.init_printing` is used to disable certain display formats, this function is copied into the appropriate ``_repr_*_`` attributes. While we could just set the attributes to `None``, doing it this way allows derived classes to call `super()`. """ return None # We don't implement _repr_png_ here because it would add a large amount of # data to any notebook containing SymPy expressions, without adding # anything useful to the notebook. It can still enabled manually, e.g., # for the qtconsole, with init_printing(). _repr_png_ = _repr_disabled _repr_svg_ = _repr_disabled def _repr_latex_(self): """ IPython/Jupyter LaTeX printing To change the behavior of this (e.g., pass in some settings to LaTeX), use init_printing(). init_printing() will also enable LaTeX printing for built in numeric types like ints and container types that contain SymPy objects, like lists and dictionaries of expressions. """ from sympy.printing.latex import latex s = latex(self, mode='plain') return "$\\displaystyle %s$" % s
825787984b0254baad77f56866689af391e6a094947502168c1f4bc81a3a7887
from collections import defaultdict from functools import cmp_to_key, reduce import operator from .sympify import sympify from .basic import Basic from .singleton import S from .operations import AssocOp, AssocOpDispatcher from .cache import cacheit from .logic import fuzzy_not, _fuzzy_group from .expr import Expr from .parameters import global_parameters # internal marker to indicate: # "there are still non-commutative objects -- don't forget to process them" class NC_Marker: is_Order = False is_Mul = False is_Number = False is_Poly = False is_commutative = False # Key for sorting commutative args in canonical order _args_sortkey = cmp_to_key(Basic.compare) def _mulsort(args): # in-place sorting of args args.sort(key=_args_sortkey) def _unevaluated_Mul(*args): """Return a well-formed unevaluated Mul: Numbers are collected and put in slot 0, any arguments that are Muls will be flattened, and args are sorted. Use this when args have changed but you still want to return an unevaluated Mul. Examples ======== >>> from sympy.core.mul import _unevaluated_Mul as uMul >>> from sympy import S, sqrt, Mul >>> from sympy.abc import x >>> a = uMul(*[S(3.0), x, S(2)]) >>> a.args[0] 6.00000000000000 >>> a.args[1] x Two unevaluated Muls with the same arguments will always compare as equal during testing: >>> m = uMul(sqrt(2), sqrt(3)) >>> m == uMul(sqrt(3), sqrt(2)) True >>> u = Mul(sqrt(3), sqrt(2), evaluate=False) >>> m == uMul(u) True >>> m == Mul(*m.args) False """ args = list(args) newargs = [] ncargs = [] co = S.One while args: a = args.pop() if a.is_Mul: c, nc = a.args_cnc() args.extend(c) if nc: ncargs.append(Mul._from_args(nc)) elif a.is_Number: co *= a else: newargs.append(a) _mulsort(newargs) if co is not S.One: newargs.insert(0, co) if ncargs: newargs.append(Mul._from_args(ncargs)) return Mul._from_args(newargs) class Mul(Expr, AssocOp): __slots__ = () is_Mul = True _args_type = Expr def __neg__(self): c, args = self.as_coeff_mul() c = -c if c is not S.One: if args[0].is_Number: args = list(args) if c is S.NegativeOne: args[0] = -args[0] else: args[0] *= c else: args = (c,) + args return self._from_args(args, self.is_commutative) @classmethod def flatten(cls, seq): """Return commutative, noncommutative and order arguments by combining related terms. Notes ===== * In an expression like ``a*b*c``, python process this through sympy as ``Mul(Mul(a, b), c)``. This can have undesirable consequences. - Sometimes terms are not combined as one would like: {c.f. https://github.com/sympy/sympy/issues/4596} >>> from sympy import Mul, sqrt >>> from sympy.abc import x, y, z >>> 2*(x + 1) # this is the 2-arg Mul behavior 2*x + 2 >>> y*(x + 1)*2 2*y*(x + 1) >>> 2*(x + 1)*y # 2-arg result will be obtained first y*(2*x + 2) >>> Mul(2, x + 1, y) # all 3 args simultaneously processed 2*y*(x + 1) >>> 2*((x + 1)*y) # parentheses can control this behavior 2*y*(x + 1) Powers with compound bases may not find a single base to combine with unless all arguments are processed at once. Post-processing may be necessary in such cases. {c.f. https://github.com/sympy/sympy/issues/5728} >>> a = sqrt(x*sqrt(y)) >>> a**3 (x*sqrt(y))**(3/2) >>> Mul(a,a,a) (x*sqrt(y))**(3/2) >>> a*a*a x*sqrt(y)*sqrt(x*sqrt(y)) >>> _.subs(a.base, z).subs(z, a.base) (x*sqrt(y))**(3/2) - If more than two terms are being multiplied then all the previous terms will be re-processed for each new argument. So if each of ``a``, ``b`` and ``c`` were :class:`Mul` expression, then ``a*b*c`` (or building up the product with ``*=``) will process all the arguments of ``a`` and ``b`` twice: once when ``a*b`` is computed and again when ``c`` is multiplied. Using ``Mul(a, b, c)`` will process all arguments once. * The results of Mul are cached according to arguments, so flatten will only be called once for ``Mul(a, b, c)``. If you can structure a calculation so the arguments are most likely to be repeats then this can save time in computing the answer. For example, say you had a Mul, M, that you wished to divide by ``d[i]`` and multiply by ``n[i]`` and you suspect there are many repeats in ``n``. It would be better to compute ``M*n[i]/d[i]`` rather than ``M/d[i]*n[i]`` since every time n[i] is a repeat, the product, ``M*n[i]`` will be returned without flattening -- the cached value will be returned. If you divide by the ``d[i]`` first (and those are more unique than the ``n[i]``) then that will create a new Mul, ``M/d[i]`` the args of which will be traversed again when it is multiplied by ``n[i]``. {c.f. https://github.com/sympy/sympy/issues/5706} This consideration is moot if the cache is turned off. NB -- The validity of the above notes depends on the implementation details of Mul and flatten which may change at any time. Therefore, you should only consider them when your code is highly performance sensitive. Removal of 1 from the sequence is already handled by AssocOp.__new__. """ from sympy.calculus.util import AccumBounds from sympy.matrices.expressions import MatrixExpr rv = None if len(seq) == 2: a, b = seq if b.is_Rational: a, b = b, a seq = [a, b] assert not a is S.One if not a.is_zero and a.is_Rational: r, b = b.as_coeff_Mul() if b.is_Add: if r is not S.One: # 2-arg hack # leave the Mul as a Mul? ar = a*r if ar is S.One: arb = b else: arb = cls(a*r, b, evaluate=False) rv = [arb], [], None elif global_parameters.distribute and b.is_commutative: r, b = b.as_coeff_Add() bargs = [_keep_coeff(a, bi) for bi in Add.make_args(b)] _addsort(bargs) ar = a*r if ar: bargs.insert(0, ar) bargs = [Add._from_args(bargs)] rv = bargs, [], None if rv: return rv # apply associativity, separate commutative part of seq c_part = [] # out: commutative factors nc_part = [] # out: non-commutative factors nc_seq = [] coeff = S.One # standalone term # e.g. 3 * ... c_powers = [] # (base,exp) n # e.g. (x,n) for x num_exp = [] # (num-base, exp) y # e.g. (3, y) for ... * 3 * ... neg1e = S.Zero # exponent on -1 extracted from Number-based Pow and I pnum_rat = {} # (num-base, Rat-exp) 1/2 # e.g. (3, 1/2) for ... * 3 * ... order_symbols = None # --- PART 1 --- # # "collect powers and coeff": # # o coeff # o c_powers # o num_exp # o neg1e # o pnum_rat # # NOTE: this is optimized for all-objects-are-commutative case for o in seq: # O(x) if o.is_Order: o, order_symbols = o.as_expr_variables(order_symbols) # Mul([...]) if o.is_Mul: if o.is_commutative: seq.extend(o.args) # XXX zerocopy? else: # NCMul can have commutative parts as well for q in o.args: if q.is_commutative: seq.append(q) else: nc_seq.append(q) # append non-commutative marker, so we don't forget to # process scheduled non-commutative objects seq.append(NC_Marker) continue # 3 elif o.is_Number: if o is S.NaN or coeff is S.ComplexInfinity and o.is_zero: # we know for sure the result will be nan return [S.NaN], [], None elif coeff.is_Number or isinstance(coeff, AccumBounds): # it could be zoo coeff *= o if coeff is S.NaN: # we know for sure the result will be nan return [S.NaN], [], None continue elif isinstance(o, AccumBounds): coeff = o.__mul__(coeff) continue elif o is S.ComplexInfinity: if not coeff: # 0 * zoo = NaN return [S.NaN], [], None coeff = S.ComplexInfinity continue elif o is S.ImaginaryUnit: neg1e += S.Half continue elif o.is_commutative: # e # o = b b, e = o.as_base_exp() # y # 3 if o.is_Pow: if b.is_Number: # get all the factors with numeric base so they can be # combined below, but don't combine negatives unless # the exponent is an integer if e.is_Rational: if e.is_Integer: coeff *= Pow(b, e) # it is an unevaluated power continue elif e.is_negative: # also a sign of an unevaluated power seq.append(Pow(b, e)) continue elif b.is_negative: neg1e += e b = -b if b is not S.One: pnum_rat.setdefault(b, []).append(e) continue elif b.is_positive or e.is_integer: num_exp.append((b, e)) continue c_powers.append((b, e)) # NON-COMMUTATIVE # TODO: Make non-commutative exponents not combine automatically else: if o is not NC_Marker: nc_seq.append(o) # process nc_seq (if any) while nc_seq: o = nc_seq.pop(0) if not nc_part: nc_part.append(o) continue # b c b+c # try to combine last terms: a * a -> a o1 = nc_part.pop() b1, e1 = o1.as_base_exp() b2, e2 = o.as_base_exp() new_exp = e1 + e2 # Only allow powers to combine if the new exponent is # not an Add. This allow things like a**2*b**3 == a**5 # if a.is_commutative == False, but prohibits # a**x*a**y and x**a*x**b from combining (x,y commute). if b1 == b2 and (not new_exp.is_Add): o12 = b1 ** new_exp # now o12 could be a commutative object if o12.is_commutative: seq.append(o12) continue else: nc_seq.insert(0, o12) else: nc_part.append(o1) nc_part.append(o) # We do want a combined exponent if it would not be an Add, such as # y 2y 3y # x * x -> x # We determine if two exponents have the same term by using # as_coeff_Mul. # # Unfortunately, this isn't smart enough to consider combining into # exponents that might already be adds, so things like: # z - y y # x * x will be left alone. This is because checking every possible # combination can slow things down. # gather exponents of common bases... def _gather(c_powers): common_b = {} # b:e for b, e in c_powers: co = e.as_coeff_Mul() common_b.setdefault(b, {}).setdefault( co[1], []).append(co[0]) for b, d in common_b.items(): for di, li in d.items(): d[di] = Add(*li) new_c_powers = [] for b, e in common_b.items(): new_c_powers.extend([(b, c*t) for t, c in e.items()]) return new_c_powers # in c_powers c_powers = _gather(c_powers) # and in num_exp num_exp = _gather(num_exp) # --- PART 2 --- # # o process collected powers (x**0 -> 1; x**1 -> x; otherwise Pow) # o combine collected powers (2**x * 3**x -> 6**x) # with numeric base # ................................ # now we have: # - coeff: # - c_powers: (b, e) # - num_exp: (2, e) # - pnum_rat: {(1/3, [1/3, 2/3, 1/4])} # 0 1 # x -> 1 x -> x # this should only need to run twice; if it fails because # it needs to be run more times, perhaps this should be # changed to a "while True" loop -- the only reason it # isn't such now is to allow a less-than-perfect result to # be obtained rather than raising an error or entering an # infinite loop for i in range(2): new_c_powers = [] changed = False for b, e in c_powers: if e.is_zero: # canceling out infinities yields NaN if (b.is_Add or b.is_Mul) and any(infty in b.args for infty in (S.ComplexInfinity, S.Infinity, S.NegativeInfinity)): return [S.NaN], [], None continue if e is S.One: if b.is_Number: coeff *= b continue p = b if e is not S.One: p = Pow(b, e) # check to make sure that the base doesn't change # after exponentiation; to allow for unevaluated # Pow, we only do so if b is not already a Pow if p.is_Pow and not b.is_Pow: bi = b b, e = p.as_base_exp() if b != bi: changed = True c_part.append(p) new_c_powers.append((b, e)) # there might have been a change, but unless the base # matches some other base, there is nothing to do if changed and len({ b for b, e in new_c_powers}) != len(new_c_powers): # start over again c_part = [] c_powers = _gather(new_c_powers) else: break # x x x # 2 * 3 -> 6 inv_exp_dict = {} # exp:Mul(num-bases) x x # e.g. x:6 for ... * 2 * 3 * ... for b, e in num_exp: inv_exp_dict.setdefault(e, []).append(b) for e, b in inv_exp_dict.items(): inv_exp_dict[e] = cls(*b) c_part.extend([Pow(b, e) for e, b in inv_exp_dict.items() if e]) # b, e -> e' = sum(e), b # {(1/5, [1/3]), (1/2, [1/12, 1/4]} -> {(1/3, [1/5, 1/2])} comb_e = {} for b, e in pnum_rat.items(): comb_e.setdefault(Add(*e), []).append(b) del pnum_rat # process them, reducing exponents to values less than 1 # and updating coeff if necessary else adding them to # num_rat for further processing num_rat = [] for e, b in comb_e.items(): b = cls(*b) if e.q == 1: coeff *= Pow(b, e) continue if e.p > e.q: e_i, ep = divmod(e.p, e.q) coeff *= Pow(b, e_i) e = Rational(ep, e.q) num_rat.append((b, e)) del comb_e # extract gcd of bases in num_rat # 2**(1/3)*6**(1/4) -> 2**(1/3+1/4)*3**(1/4) pnew = defaultdict(list) i = 0 # steps through num_rat which may grow while i < len(num_rat): bi, ei = num_rat[i] grow = [] for j in range(i + 1, len(num_rat)): bj, ej = num_rat[j] g = bi.gcd(bj) if g is not S.One: # 4**r1*6**r2 -> 2**(r1+r2) * 2**r1 * 3**r2 # this might have a gcd with something else e = ei + ej if e.q == 1: coeff *= Pow(g, e) else: if e.p > e.q: e_i, ep = divmod(e.p, e.q) # change e in place coeff *= Pow(g, e_i) e = Rational(ep, e.q) grow.append((g, e)) # update the jth item num_rat[j] = (bj/g, ej) # update bi that we are checking with bi = bi/g if bi is S.One: break if bi is not S.One: obj = Pow(bi, ei) if obj.is_Number: coeff *= obj else: # changes like sqrt(12) -> 2*sqrt(3) for obj in Mul.make_args(obj): if obj.is_Number: coeff *= obj else: assert obj.is_Pow bi, ei = obj.args pnew[ei].append(bi) num_rat.extend(grow) i += 1 # combine bases of the new powers for e, b in pnew.items(): pnew[e] = cls(*b) # handle -1 and I if neg1e: # treat I as (-1)**(1/2) and compute -1's total exponent p, q = neg1e.as_numer_denom() # if the integer part is odd, extract -1 n, p = divmod(p, q) if n % 2: coeff = -coeff # if it's a multiple of 1/2 extract I if q == 2: c_part.append(S.ImaginaryUnit) elif p: # see if there is any positive base this power of # -1 can join neg1e = Rational(p, q) for e, b in pnew.items(): if e == neg1e and b.is_positive: pnew[e] = -b break else: # keep it separate; we've already evaluated it as # much as possible so evaluate=False c_part.append(Pow(S.NegativeOne, neg1e, evaluate=False)) # add all the pnew powers c_part.extend([Pow(b, e) for e, b in pnew.items()]) # oo, -oo if (coeff is S.Infinity) or (coeff is S.NegativeInfinity): def _handle_for_oo(c_part, coeff_sign): new_c_part = [] for t in c_part: if t.is_extended_positive: continue if t.is_extended_negative: coeff_sign *= -1 continue new_c_part.append(t) return new_c_part, coeff_sign c_part, coeff_sign = _handle_for_oo(c_part, 1) nc_part, coeff_sign = _handle_for_oo(nc_part, coeff_sign) coeff *= coeff_sign # zoo if coeff is S.ComplexInfinity: # zoo might be # infinite_real + bounded_im # bounded_real + infinite_im # infinite_real + infinite_im # and non-zero real or imaginary will not change that status. c_part = [c for c in c_part if not (fuzzy_not(c.is_zero) and c.is_extended_real is not None)] nc_part = [c for c in nc_part if not (fuzzy_not(c.is_zero) and c.is_extended_real is not None)] # 0 elif coeff.is_zero: # we know for sure the result will be 0 except the multiplicand # is infinity or a matrix if any(isinstance(c, MatrixExpr) for c in nc_part): return [coeff], nc_part, order_symbols if any(c.is_finite == False for c in c_part): return [S.NaN], [], order_symbols return [coeff], [], order_symbols # check for straggling Numbers that were produced _new = [] for i in c_part: if i.is_Number: coeff *= i else: _new.append(i) c_part = _new # order commutative part canonically _mulsort(c_part) # current code expects coeff to be always in slot-0 if coeff is not S.One: c_part.insert(0, coeff) # we are done if (global_parameters.distribute and not nc_part and len(c_part) == 2 and c_part[0].is_Number and c_part[0].is_finite and c_part[1].is_Add): # 2*(1+a) -> 2 + 2 * a coeff = c_part[0] c_part = [Add(*[coeff*f for f in c_part[1].args])] return c_part, nc_part, order_symbols def _eval_power(self, e): # don't break up NC terms: (A*B)**3 != A**3*B**3, it is A*B*A*B*A*B cargs, nc = self.args_cnc(split_1=False) if e.is_Integer: return Mul(*[Pow(b, e, evaluate=False) for b in cargs]) * \ Pow(Mul._from_args(nc), e, evaluate=False) if e.is_Rational and e.q == 2: from sympy.core.power import integer_nthroot from sympy.functions.elementary.complexes import sign if self.is_imaginary: a = self.as_real_imag()[1] if a.is_Rational: n, d = abs(a/2).as_numer_denom() n, t = integer_nthroot(n, 2) if t: d, t = integer_nthroot(d, 2) if t: r = sympify(n)/d return _unevaluated_Mul(r**e.p, (1 + sign(a)*S.ImaginaryUnit)**e.p) p = Pow(self, e, evaluate=False) if e.is_Rational or e.is_Float: return p._eval_expand_power_base() return p @classmethod def class_key(cls): return 3, 0, cls.__name__ def _eval_evalf(self, prec): c, m = self.as_coeff_Mul() if c is S.NegativeOne: if m.is_Mul: rv = -AssocOp._eval_evalf(m, prec) else: mnew = m._eval_evalf(prec) if mnew is not None: m = mnew rv = -m else: rv = AssocOp._eval_evalf(self, prec) if rv.is_number: return rv.expand() return rv @property def _mpc_(self): """ Convert self to an mpmath mpc if possible """ from sympy.core.numbers import I, Float im_part, imag_unit = self.as_coeff_Mul() if not imag_unit == I: # ValueError may seem more reasonable but since it's a @property, # we need to use AttributeError to keep from confusing things like # hasattr. raise AttributeError("Cannot convert Mul to mpc. Must be of the form Number*I") return (Float(0)._mpf_, Float(im_part)._mpf_) @cacheit def as_two_terms(self): """Return head and tail of self. This is the most efficient way to get the head and tail of an expression. - if you want only the head, use self.args[0]; - if you want to process the arguments of the tail then use self.as_coef_mul() which gives the head and a tuple containing the arguments of the tail when treated as a Mul. - if you want the coefficient when self is treated as an Add then use self.as_coeff_add()[0] Examples ======== >>> from sympy.abc import x, y >>> (3*x*y).as_two_terms() (3, x*y) """ args = self.args if len(args) == 1: return S.One, self elif len(args) == 2: return args else: return args[0], self._new_rawargs(*args[1:]) @cacheit def as_coefficients_dict(self): """Return a dictionary mapping terms to their coefficient. Since the dictionary is a defaultdict, inquiries about terms which were not present will return a coefficient of 0. The dictionary is considered to have a single term. Examples ======== >>> from sympy.abc import a, x >>> (3*a*x).as_coefficients_dict() {a*x: 3} >>> _[a] 0 """ d = defaultdict(int) args = self.args if len(args) == 1 or not args[0].is_Number: d[self] = S.One else: d[self._new_rawargs(*args[1:])] = args[0] return d @cacheit def as_coeff_mul(self, *deps, rational=True, **kwargs): if deps: from sympy.utilities.iterables import sift l1, l2 = sift(self.args, lambda x: x.has(*deps), binary=True) return self._new_rawargs(*l2), tuple(l1) args = self.args if args[0].is_Number: if not rational or args[0].is_Rational: return args[0], args[1:] elif args[0].is_extended_negative: return S.NegativeOne, (-args[0],) + args[1:] return S.One, args def as_coeff_Mul(self, rational=False): """ Efficiently extract the coefficient of a product. """ coeff, args = self.args[0], self.args[1:] if coeff.is_Number: if not rational or coeff.is_Rational: if len(args) == 1: return coeff, args[0] else: return coeff, self._new_rawargs(*args) elif coeff.is_extended_negative: return S.NegativeOne, self._new_rawargs(*((-coeff,) + args)) return S.One, self def as_real_imag(self, deep=True, **hints): from sympy import Abs, expand_mul, im, re other = [] coeffr = [] coeffi = [] addterms = S.One for a in self.args: r, i = a.as_real_imag() if i.is_zero: coeffr.append(r) elif r.is_zero: coeffi.append(i*S.ImaginaryUnit) elif a.is_commutative: # search for complex conjugate pairs: for i, x in enumerate(other): if x == a.conjugate(): coeffr.append(Abs(x)**2) del other[i] break else: if a.is_Add: addterms *= a else: other.append(a) else: other.append(a) m = self.func(*other) if hints.get('ignore') == m: return if len(coeffi) % 2: imco = im(coeffi.pop(0)) # all other pairs make a real factor; they will be # put into reco below else: imco = S.Zero reco = self.func(*(coeffr + coeffi)) r, i = (reco*re(m), reco*im(m)) if addterms == 1: if m == 1: if imco.is_zero: return (reco, S.Zero) else: return (S.Zero, reco*imco) if imco is S.Zero: return (r, i) return (-imco*i, imco*r) addre, addim = expand_mul(addterms, deep=False).as_real_imag() if imco is S.Zero: return (r*addre - i*addim, i*addre + r*addim) else: r, i = -imco*i, imco*r return (r*addre - i*addim, r*addim + i*addre) @staticmethod def _expandsums(sums): """ Helper function for _eval_expand_mul. sums must be a list of instances of Basic. """ L = len(sums) if L == 1: return sums[0].args terms = [] left = Mul._expandsums(sums[:L//2]) right = Mul._expandsums(sums[L//2:]) terms = [Mul(a, b) for a in left for b in right] added = Add(*terms) return Add.make_args(added) # it may have collapsed down to one term def _eval_expand_mul(self, **hints): from sympy import fraction # Handle things like 1/(x*(x + 1)), which are automatically converted # to 1/x*1/(x + 1) expr = self n, d = fraction(expr) if d.is_Mul: n, d = [i._eval_expand_mul(**hints) if i.is_Mul else i for i in (n, d)] expr = n/d if not expr.is_Mul: return expr plain, sums, rewrite = [], [], False for factor in expr.args: if factor.is_Add: sums.append(factor) rewrite = True else: if factor.is_commutative: plain.append(factor) else: sums.append(Basic(factor)) # Wrapper if not rewrite: return expr else: plain = self.func(*plain) if sums: deep = hints.get("deep", False) terms = self.func._expandsums(sums) args = [] for term in terms: t = self.func(plain, term) if t.is_Mul and any(a.is_Add for a in t.args) and deep: t = t._eval_expand_mul() args.append(t) return Add(*args) else: return plain @cacheit def _eval_derivative(self, s): args = list(self.args) terms = [] for i in range(len(args)): d = args[i].diff(s) if d: # Note: reduce is used in step of Mul as Mul is unable to # handle subtypes and operation priority: terms.append(reduce(lambda x, y: x*y, (args[:i] + [d] + args[i + 1:]), S.One)) return Add.fromiter(terms) @cacheit def _eval_derivative_n_times(self, s, n): from sympy import Integer, factorial, prod, Sum, Max from sympy.ntheory.multinomial import multinomial_coefficients_iterator from .function import AppliedUndef from .symbol import Symbol, symbols, Dummy if not isinstance(s, AppliedUndef) and not isinstance(s, Symbol): # other types of s may not be well behaved, e.g. # (cos(x)*sin(y)).diff([[x, y, z]]) return super()._eval_derivative_n_times(s, n) args = self.args m = len(args) if isinstance(n, (int, Integer)): # https://en.wikipedia.org/wiki/General_Leibniz_rule#More_than_two_factors terms = [] for kvals, c in multinomial_coefficients_iterator(m, n): p = prod([arg.diff((s, k)) for k, arg in zip(kvals, args)]) terms.append(c * p) return Add(*terms) kvals = symbols("k1:%i" % m, cls=Dummy) klast = n - sum(kvals) nfact = factorial(n) e, l = (# better to use the multinomial? nfact/prod(map(factorial, kvals))/factorial(klast)*\ prod([args[t].diff((s, kvals[t])) for t in range(m-1)])*\ args[-1].diff((s, Max(0, klast))), [(k, 0, n) for k in kvals]) return Sum(e, *l) def _eval_difference_delta(self, n, step): from sympy.series.limitseq import difference_delta as dd arg0 = self.args[0] rest = Mul(*self.args[1:]) return (arg0.subs(n, n + step) * dd(rest, n, step) + dd(arg0, n, step) * rest) def _matches_simple(self, expr, repl_dict): # handle (w*3).matches('x*5') -> {w: x*5/3} coeff, terms = self.as_coeff_Mul() terms = Mul.make_args(terms) if len(terms) == 1: newexpr = self.__class__._combine_inverse(expr, coeff) return terms[0].matches(newexpr, repl_dict) return def matches(self, expr, repl_dict={}, old=False): expr = sympify(expr) repl_dict = repl_dict.copy() if self.is_commutative and expr.is_commutative: return self._matches_commutative(expr, repl_dict, old) elif self.is_commutative is not expr.is_commutative: return None # Proceed only if both both expressions are non-commutative c1, nc1 = self.args_cnc() c2, nc2 = expr.args_cnc() c1, c2 = [c or [1] for c in [c1, c2]] # TODO: Should these be self.func? comm_mul_self = Mul(*c1) comm_mul_expr = Mul(*c2) repl_dict = comm_mul_self.matches(comm_mul_expr, repl_dict, old) # If the commutative arguments didn't match and aren't equal, then # then the expression as a whole doesn't match if repl_dict is None and c1 != c2: return None # Now match the non-commutative arguments, expanding powers to # multiplications nc1 = Mul._matches_expand_pows(nc1) nc2 = Mul._matches_expand_pows(nc2) repl_dict = Mul._matches_noncomm(nc1, nc2, repl_dict) return repl_dict or None @staticmethod def _matches_expand_pows(arg_list): new_args = [] for arg in arg_list: if arg.is_Pow and arg.exp > 0: new_args.extend([arg.base] * arg.exp) else: new_args.append(arg) return new_args @staticmethod def _matches_noncomm(nodes, targets, repl_dict={}): """Non-commutative multiplication matcher. `nodes` is a list of symbols within the matcher multiplication expression, while `targets` is a list of arguments in the multiplication expression being matched against. """ repl_dict = repl_dict.copy() # List of possible future states to be considered agenda = [] # The current matching state, storing index in nodes and targets state = (0, 0) node_ind, target_ind = state # Mapping between wildcard indices and the index ranges they match wildcard_dict = {} repl_dict = repl_dict.copy() while target_ind < len(targets) and node_ind < len(nodes): node = nodes[node_ind] if node.is_Wild: Mul._matches_add_wildcard(wildcard_dict, state) states_matches = Mul._matches_new_states(wildcard_dict, state, nodes, targets) if states_matches: new_states, new_matches = states_matches agenda.extend(new_states) if new_matches: for match in new_matches: repl_dict[match] = new_matches[match] if not agenda: return None else: state = agenda.pop() node_ind, target_ind = state return repl_dict @staticmethod def _matches_add_wildcard(dictionary, state): node_ind, target_ind = state if node_ind in dictionary: begin, end = dictionary[node_ind] dictionary[node_ind] = (begin, target_ind) else: dictionary[node_ind] = (target_ind, target_ind) @staticmethod def _matches_new_states(dictionary, state, nodes, targets): node_ind, target_ind = state node = nodes[node_ind] target = targets[target_ind] # Don't advance at all if we've exhausted the targets but not the nodes if target_ind >= len(targets) - 1 and node_ind < len(nodes) - 1: return None if node.is_Wild: match_attempt = Mul._matches_match_wilds(dictionary, node_ind, nodes, targets) if match_attempt: # If the same node has been matched before, don't return # anything if the current match is diverging from the previous # match other_node_inds = Mul._matches_get_other_nodes(dictionary, nodes, node_ind) for ind in other_node_inds: other_begin, other_end = dictionary[ind] curr_begin, curr_end = dictionary[node_ind] other_targets = targets[other_begin:other_end + 1] current_targets = targets[curr_begin:curr_end + 1] for curr, other in zip(current_targets, other_targets): if curr != other: return None # A wildcard node can match more than one target, so only the # target index is advanced new_state = [(node_ind, target_ind + 1)] # Only move on to the next node if there is one if node_ind < len(nodes) - 1: new_state.append((node_ind + 1, target_ind + 1)) return new_state, match_attempt else: # If we're not at a wildcard, then make sure we haven't exhausted # nodes but not targets, since in this case one node can only match # one target if node_ind >= len(nodes) - 1 and target_ind < len(targets) - 1: return None match_attempt = node.matches(target) if match_attempt: return [(node_ind + 1, target_ind + 1)], match_attempt elif node == target: return [(node_ind + 1, target_ind + 1)], None else: return None @staticmethod def _matches_match_wilds(dictionary, wildcard_ind, nodes, targets): """Determine matches of a wildcard with sub-expression in `target`.""" wildcard = nodes[wildcard_ind] begin, end = dictionary[wildcard_ind] terms = targets[begin:end + 1] # TODO: Should this be self.func? mul = Mul(*terms) if len(terms) > 1 else terms[0] return wildcard.matches(mul) @staticmethod def _matches_get_other_nodes(dictionary, nodes, node_ind): """Find other wildcards that may have already been matched.""" other_node_inds = [] for ind in dictionary: if nodes[ind] == nodes[node_ind]: other_node_inds.append(ind) return other_node_inds @staticmethod def _combine_inverse(lhs, rhs): """ Returns lhs/rhs, but treats arguments like symbols, so things like oo/oo return 1 (instead of a nan) and ``I`` behaves like a symbol instead of sqrt(-1). """ from .symbol import Dummy if lhs == rhs: return S.One def check(l, r): if l.is_Float and r.is_comparable: # if both objects are added to 0 they will share the same "normalization" # and are more likely to compare the same. Since Add(foo, 0) will not allow # the 0 to pass, we use __add__ directly. return l.__add__(0) == r.evalf().__add__(0) return False if check(lhs, rhs) or check(rhs, lhs): return S.One if any(i.is_Pow or i.is_Mul for i in (lhs, rhs)): # gruntz and limit wants a literal I to not combine # with a power of -1 d = Dummy('I') _i = {S.ImaginaryUnit: d} i_ = {d: S.ImaginaryUnit} a = lhs.xreplace(_i).as_powers_dict() b = rhs.xreplace(_i).as_powers_dict() blen = len(b) for bi in tuple(b.keys()): if bi in a: a[bi] -= b.pop(bi) if not a[bi]: a.pop(bi) if len(b) != blen: lhs = Mul(*[k**v for k, v in a.items()]).xreplace(i_) rhs = Mul(*[k**v for k, v in b.items()]).xreplace(i_) return lhs/rhs def as_powers_dict(self): d = defaultdict(int) for term in self.args: for b, e in term.as_powers_dict().items(): d[b] += e return d def as_numer_denom(self): # don't use _from_args to rebuild the numerators and denominators # as the order is not guaranteed to be the same once they have # been separated from each other numers, denoms = list(zip(*[f.as_numer_denom() for f in self.args])) return self.func(*numers), self.func(*denoms) def as_base_exp(self): e1 = None bases = [] nc = 0 for m in self.args: b, e = m.as_base_exp() if not b.is_commutative: nc += 1 if e1 is None: e1 = e elif e != e1 or nc > 1: return self, S.One bases.append(b) return self.func(*bases), e1 def _eval_is_polynomial(self, syms): return all(term._eval_is_polynomial(syms) for term in self.args) def _eval_is_rational_function(self, syms): return all(term._eval_is_rational_function(syms) for term in self.args) def _eval_is_meromorphic(self, x, a): return _fuzzy_group((arg.is_meromorphic(x, a) for arg in self.args), quick_exit=True) def _eval_is_algebraic_expr(self, syms): return all(term._eval_is_algebraic_expr(syms) for term in self.args) _eval_is_commutative = lambda self: _fuzzy_group( a.is_commutative for a in self.args) def _eval_is_complex(self): comp = _fuzzy_group(a.is_complex for a in self.args) if comp is False: if any(a.is_infinite for a in self.args): if any(a.is_zero is not False for a in self.args): return None return False return comp def _eval_is_finite(self): if all(a.is_finite for a in self.args): return True if any(a.is_infinite for a in self.args): if all(a.is_zero is False for a in self.args): return False def _eval_is_infinite(self): if any(a.is_infinite for a in self.args): if any(a.is_zero for a in self.args): return S.NaN.is_infinite if any(a.is_zero is None for a in self.args): return None return True def _eval_is_rational(self): r = _fuzzy_group((a.is_rational for a in self.args), quick_exit=True) if r: return r elif r is False: return self.is_zero def _eval_is_algebraic(self): r = _fuzzy_group((a.is_algebraic for a in self.args), quick_exit=True) if r: return r elif r is False: return self.is_zero def _eval_is_zero(self): zero = infinite = False for a in self.args: z = a.is_zero if z: if infinite: return # 0*oo is nan and nan.is_zero is None zero = True else: if not a.is_finite: if zero: return # 0*oo is nan and nan.is_zero is None infinite = True if zero is False and z is None: # trap None zero = None return zero # without involving odd/even checks this code would suffice: #_eval_is_integer = lambda self: _fuzzy_group( # (a.is_integer for a in self.args), quick_exit=True) def _eval_is_integer(self): is_rational = self._eval_is_rational() if is_rational is False: return False numerators = [] denominators = [] for a in self.args: if a.is_integer: numerators.append(a) elif a.is_Rational: n, d = a.as_numer_denom() numerators.append(n) denominators.append(d) elif a.is_Pow: b, e = a.as_base_exp() if not b.is_integer or not e.is_integer: return if e.is_negative: denominators.append(b) else: # for integer b and positive integer e: a = b**e would be integer assert not e.is_positive # for self being rational and e equal to zero: a = b**e would be 1 assert not e.is_zero return # sign of e unknown -> self.is_integer cannot be decided else: return if not denominators: return True odd = lambda ints: all(i.is_odd for i in ints) even = lambda ints: any(i.is_even for i in ints) if odd(numerators) and even(denominators): return False elif even(numerators) and denominators == [2]: return True def _eval_is_polar(self): has_polar = any(arg.is_polar for arg in self.args) return has_polar and \ all(arg.is_polar or arg.is_positive for arg in self.args) def _eval_is_extended_real(self): return self._eval_real_imag(True) def _eval_real_imag(self, real): zero = False t_not_re_im = None for t in self.args: if (t.is_complex or t.is_infinite) is False and t.is_extended_real is False: return False elif t.is_imaginary: # I real = not real elif t.is_extended_real: # 2 if not zero: z = t.is_zero if not z and zero is False: zero = z elif z: if all(a.is_finite for a in self.args): return True return elif t.is_extended_real is False: # symbolic or literal like `2 + I` or symbolic imaginary if t_not_re_im: return # complex terms might cancel t_not_re_im = t elif t.is_imaginary is False: # symbolic like `2` or `2 + I` if t_not_re_im: return # complex terms might cancel t_not_re_im = t else: return if t_not_re_im: if t_not_re_im.is_extended_real is False: if real: # like 3 return zero # 3*(smthng like 2 + I or i) is not real if t_not_re_im.is_imaginary is False: # symbolic 2 or 2 + I if not real: # like I return zero # I*(smthng like 2 or 2 + I) is not real elif zero is False: return real # can't be trumped by 0 elif real: return real # doesn't matter what zero is def _eval_is_imaginary(self): z = self.is_zero if z: return False if self.is_finite is False: return False elif z is False and self.is_finite is True: return self._eval_real_imag(False) def _eval_is_hermitian(self): return self._eval_herm_antiherm(True) def _eval_herm_antiherm(self, real): one_nc = zero = one_neither = False for t in self.args: if not t.is_commutative: if one_nc: return one_nc = True if t.is_antihermitian: real = not real elif t.is_hermitian: if not zero: z = t.is_zero if not z and zero is False: zero = z elif z: if all(a.is_finite for a in self.args): return True return elif t.is_hermitian is False: if one_neither: return one_neither = True else: return if one_neither: if real: return zero elif zero is False or real: return real def _eval_is_antihermitian(self): z = self.is_zero if z: return False elif z is False: return self._eval_herm_antiherm(False) def _eval_is_irrational(self): for t in self.args: a = t.is_irrational if a: others = list(self.args) others.remove(t) if all((x.is_rational and fuzzy_not(x.is_zero)) is True for x in others): return True return if a is None: return if all(x.is_real for x in self.args): return False def _eval_is_extended_positive(self): """Return True if self is positive, False if not, and None if it cannot be determined. Explanation =========== This algorithm is non-recursive and works by keeping track of the sign which changes when a negative or nonpositive is encountered. Whether a nonpositive or nonnegative is seen is also tracked since the presence of these makes it impossible to return True, but possible to return False if the end result is nonpositive. e.g. pos * neg * nonpositive -> pos or zero -> None is returned pos * neg * nonnegative -> neg or zero -> False is returned """ return self._eval_pos_neg(1) def _eval_pos_neg(self, sign): saw_NON = saw_NOT = False for t in self.args: if t.is_extended_positive: continue elif t.is_extended_negative: sign = -sign elif t.is_zero: if all(a.is_finite for a in self.args): return False return elif t.is_extended_nonpositive: sign = -sign saw_NON = True elif t.is_extended_nonnegative: saw_NON = True # FIXME: is_positive/is_negative is False doesn't take account of # Symbol('x', infinite=True, extended_real=True) which has # e.g. is_positive is False but has uncertain sign. elif t.is_positive is False: sign = -sign if saw_NOT: return saw_NOT = True elif t.is_negative is False: if saw_NOT: return saw_NOT = True else: return if sign == 1 and saw_NON is False and saw_NOT is False: return True if sign < 0: return False def _eval_is_extended_negative(self): return self._eval_pos_neg(-1) def _eval_is_odd(self): is_integer = self.is_integer if is_integer: r, acc = True, 1 for t in self.args: if not t.is_integer: return None elif t.is_even: r = False elif t.is_integer: if r is False: pass elif acc != 1 and (acc + t).is_odd: r = False elif t.is_odd is None: r = None acc = t return r # !integer -> !odd elif is_integer is False: return False def _eval_is_even(self): is_integer = self.is_integer if is_integer: return fuzzy_not(self.is_odd) elif is_integer is False: return False def _eval_is_composite(self): """ Here we count the number of arguments that have a minimum value greater than two. If there are more than one of such a symbol then the result is composite. Else, the result cannot be determined. """ number_of_args = 0 # count of symbols with minimum value greater than one for arg in self.args: if not (arg.is_integer and arg.is_positive): return None if (arg-1).is_positive: number_of_args += 1 if number_of_args > 1: return True def _eval_subs(self, old, new): from sympy.functions.elementary.complexes import sign from sympy.ntheory.factor_ import multiplicity from sympy.simplify.powsimp import powdenest from sympy.simplify.radsimp import fraction if not old.is_Mul: return None # try keep replacement literal so -2*x doesn't replace 4*x if old.args[0].is_Number and old.args[0] < 0: if self.args[0].is_Number: if self.args[0] < 0: return self._subs(-old, -new) return None def base_exp(a): # if I and -1 are in a Mul, they get both end up with # a -1 base (see issue 6421); all we want here are the # true Pow or exp separated into base and exponent from sympy import exp if a.is_Pow or isinstance(a, exp): return a.as_base_exp() return a, S.One def breakup(eq): """break up powers of eq when treated as a Mul: b**(Rational*e) -> b**e, Rational commutatives come back as a dictionary {b**e: Rational} noncommutatives come back as a list [(b**e, Rational)] """ (c, nc) = (defaultdict(int), list()) for a in Mul.make_args(eq): a = powdenest(a) (b, e) = base_exp(a) if e is not S.One: (co, _) = e.as_coeff_mul() b = Pow(b, e/co) e = co if a.is_commutative: c[b] += e else: nc.append([b, e]) return (c, nc) def rejoin(b, co): """ Put rational back with exponent; in general this is not ok, but since we took it from the exponent for analysis, it's ok to put it back. """ (b, e) = base_exp(b) return Pow(b, e*co) def ndiv(a, b): """if b divides a in an extractive way (like 1/4 divides 1/2 but not vice versa, and 2/5 does not divide 1/3) then return the integer number of times it divides, else return 0. """ if not b.q % a.q or not a.q % b.q: return int(a/b) return 0 # give Muls in the denominator a chance to be changed (see issue 5651) # rv will be the default return value rv = None n, d = fraction(self) self2 = self if d is not S.One: self2 = n._subs(old, new)/d._subs(old, new) if not self2.is_Mul: return self2._subs(old, new) if self2 != self: rv = self2 # Now continue with regular substitution. # handle the leading coefficient and use it to decide if anything # should even be started; we always know where to find the Rational # so it's a quick test co_self = self2.args[0] co_old = old.args[0] co_xmul = None if co_old.is_Rational and co_self.is_Rational: # if coeffs are the same there will be no updating to do # below after breakup() step; so skip (and keep co_xmul=None) if co_old != co_self: co_xmul = co_self.extract_multiplicatively(co_old) elif co_old.is_Rational: return rv # break self and old into factors (c, nc) = breakup(self2) (old_c, old_nc) = breakup(old) # update the coefficients if we had an extraction # e.g. if co_self were 2*(3/35*x)**2 and co_old = 3/5 # then co_self in c is replaced by (3/5)**2 and co_residual # is 2*(1/7)**2 if co_xmul and co_xmul.is_Rational and abs(co_old) != 1: mult = S(multiplicity(abs(co_old), co_self)) c.pop(co_self) if co_old in c: c[co_old] += mult else: c[co_old] = mult co_residual = co_self/co_old**mult else: co_residual = 1 # do quick tests to see if we can't succeed ok = True if len(old_nc) > len(nc): # more non-commutative terms ok = False elif len(old_c) > len(c): # more commutative terms ok = False elif {i[0] for i in old_nc}.difference({i[0] for i in nc}): # unmatched non-commutative bases ok = False elif set(old_c).difference(set(c)): # unmatched commutative terms ok = False elif any(sign(c[b]) != sign(old_c[b]) for b in old_c): # differences in sign ok = False if not ok: return rv if not old_c: cdid = None else: rat = [] for (b, old_e) in old_c.items(): c_e = c[b] rat.append(ndiv(c_e, old_e)) if not rat[-1]: return rv cdid = min(rat) if not old_nc: ncdid = None for i in range(len(nc)): nc[i] = rejoin(*nc[i]) else: ncdid = 0 # number of nc replacements we did take = len(old_nc) # how much to look at each time limit = cdid or S.Infinity # max number that we can take failed = [] # failed terms will need subs if other terms pass i = 0 while limit and i + take <= len(nc): hit = False # the bases must be equivalent in succession, and # the powers must be extractively compatible on the # first and last factor but equal in between. rat = [] for j in range(take): if nc[i + j][0] != old_nc[j][0]: break elif j == 0: rat.append(ndiv(nc[i + j][1], old_nc[j][1])) elif j == take - 1: rat.append(ndiv(nc[i + j][1], old_nc[j][1])) elif nc[i + j][1] != old_nc[j][1]: break else: rat.append(1) j += 1 else: ndo = min(rat) if ndo: if take == 1: if cdid: ndo = min(cdid, ndo) nc[i] = Pow(new, ndo)*rejoin(nc[i][0], nc[i][1] - ndo*old_nc[0][1]) else: ndo = 1 # the left residual l = rejoin(nc[i][0], nc[i][1] - ndo* old_nc[0][1]) # eliminate all middle terms mid = new # the right residual (which may be the same as the middle if take == 2) ir = i + take - 1 r = (nc[ir][0], nc[ir][1] - ndo* old_nc[-1][1]) if r[1]: if i + take < len(nc): nc[i:i + take] = [l*mid, r] else: r = rejoin(*r) nc[i:i + take] = [l*mid*r] else: # there was nothing left on the right nc[i:i + take] = [l*mid] limit -= ndo ncdid += ndo hit = True if not hit: # do the subs on this failing factor failed.append(i) i += 1 else: if not ncdid: return rv # although we didn't fail, certain nc terms may have # failed so we rebuild them after attempting a partial # subs on them failed.extend(range(i, len(nc))) for i in failed: nc[i] = rejoin(*nc[i]).subs(old, new) # rebuild the expression if cdid is None: do = ncdid elif ncdid is None: do = cdid else: do = min(ncdid, cdid) margs = [] for b in c: if b in old_c: # calculate the new exponent e = c[b] - old_c[b]*do margs.append(rejoin(b, e)) else: margs.append(rejoin(b.subs(old, new), c[b])) if cdid and not ncdid: # in case we are replacing commutative with non-commutative, # we want the new term to come at the front just like the # rest of this routine margs = [Pow(new, cdid)] + margs return co_residual*self2.func(*margs)*self2.func(*nc) def _eval_nseries(self, x, n, logx, cdir=0): from sympy import degree, Mul, Order, ceiling, powsimp, PolynomialError from itertools import product def coeff_exp(term, x): coeff, exp = S.One, S.Zero for factor in Mul.make_args(term): if factor.has(x): base, exp = factor.as_base_exp() if base != x: try: return term.leadterm(x) except ValueError: return term, S.Zero else: coeff *= factor return coeff, exp ords = [] try: for t in self.args: coeff, exp = t.leadterm(x) if not coeff.has(x): ords.append((t, exp)) else: raise ValueError n0 = sum(t[1] for t in ords) facs = [] for t, m in ords: n1 = ceiling(n - n0 + m) s = t.nseries(x, n=n1, logx=logx, cdir=cdir) ns = s.getn() if ns is not None: if ns < n1: # less than expected n -= n1 - ns # reduce n facs.append(s.removeO()) except (ValueError, NotImplementedError, TypeError, AttributeError): facs = [t.nseries(x, n=n, logx=logx, cdir=cdir) for t in self.args] res = powsimp(self.func(*facs).expand(), combine='exp', deep=True) if res.has(Order): res += Order(x**n, x) return res res = 0 ords2 = [Add.make_args(factor) for factor in facs] for fac in product(*ords2): ords3 = [coeff_exp(term, x) for term in fac] coeffs, powers = zip(*ords3) power = sum(powers) if power < n: res += Mul(*coeffs)*(x**power) if self.is_polynomial(x): try: if degree(self, x) != degree(res, x): res += Order(x**n, x) except PolynomialError: pass else: return res for i in (1, 2, 3): if (res - self).subs(x, i) is not S.Zero: res += Order(x**n, x) break return res def _eval_as_leading_term(self, x, cdir=0): return self.func(*[t.as_leading_term(x, cdir=cdir) for t in self.args]) def _eval_conjugate(self): return self.func(*[t.conjugate() for t in self.args]) def _eval_transpose(self): return self.func(*[t.transpose() for t in self.args[::-1]]) def _eval_adjoint(self): return self.func(*[t.adjoint() for t in self.args[::-1]]) def _sage_(self): s = 1 for x in self.args: s *= x._sage_() return s def as_content_primitive(self, radical=False, clear=True): """Return the tuple (R, self/R) where R is the positive Rational extracted from self. Examples ======== >>> from sympy import sqrt >>> (-3*sqrt(2)*(2 - 2*sqrt(2))).as_content_primitive() (6, -sqrt(2)*(1 - sqrt(2))) See docstring of Expr.as_content_primitive for more examples. """ coef = S.One args = [] for i, a in enumerate(self.args): c, p = a.as_content_primitive(radical=radical, clear=clear) coef *= c if p is not S.One: args.append(p) # don't use self._from_args here to reconstruct args # since there may be identical args now that should be combined # e.g. (2+2*x)*(3+3*x) should be (6, (1 + x)**2) not (6, (1+x)*(1+x)) return coef, self.func(*args) def as_ordered_factors(self, order=None): """Transform an expression into an ordered list of factors. Examples ======== >>> from sympy import sin, cos >>> from sympy.abc import x, y >>> (2*x*y*sin(x)*cos(x)).as_ordered_factors() [2, x, y, sin(x), cos(x)] """ cpart, ncpart = self.args_cnc() cpart.sort(key=lambda expr: expr.sort_key(order=order)) return cpart + ncpart @property def _sorted_args(self): return tuple(self.as_ordered_factors()) mul = AssocOpDispatcher('mul') def prod(a, start=1): """Return product of elements of a. Start with int 1 so if only ints are included then an int result is returned. Examples ======== >>> from sympy import prod, S >>> prod(range(3)) 0 >>> type(_) is int True >>> prod([S(2), 3]) 6 >>> _.is_Integer True You can start the product at something other than 1: >>> prod([1, 2], 3) 6 """ return reduce(operator.mul, a, start) def _keep_coeff(coeff, factors, clear=True, sign=False): """Return ``coeff*factors`` unevaluated if necessary. If ``clear`` is False, do not keep the coefficient as a factor if it can be distributed on a single factor such that one or more terms will still have integer coefficients. If ``sign`` is True, allow a coefficient of -1 to remain factored out. Examples ======== >>> from sympy.core.mul import _keep_coeff >>> from sympy.abc import x, y >>> from sympy import S >>> _keep_coeff(S.Half, x + 2) (x + 2)/2 >>> _keep_coeff(S.Half, x + 2, clear=False) x/2 + 1 >>> _keep_coeff(S.Half, (x + 2)*y, clear=False) y*(x + 2)/2 >>> _keep_coeff(S(-1), x + y) -x - y >>> _keep_coeff(S(-1), x + y, sign=True) -(x + y) """ if not coeff.is_Number: if factors.is_Number: factors, coeff = coeff, factors else: return coeff*factors if coeff is S.One: return factors elif coeff is S.NegativeOne and not sign: return -factors elif factors.is_Add: if not clear and coeff.is_Rational and coeff.q != 1: q = S(coeff.q) for i in factors.args: c, t = i.as_coeff_Mul() r = c/q if r == int(r): return coeff*factors return Mul(coeff, factors, evaluate=False) elif factors.is_Mul: margs = list(factors.args) if margs[0].is_Number: margs[0] *= coeff if margs[0] == 1: margs.pop(0) else: margs.insert(0, coeff) return Mul._from_args(margs) else: return coeff*factors def expand_2arg(e): from sympy.simplify.simplify import bottom_up def do(e): if e.is_Mul: c, r = e.as_coeff_Mul() if c.is_Number and r.is_Add: return _unevaluated_Add(*[c*ri for ri in r.args]) return e return bottom_up(e, do) from .numbers import Rational from .power import Pow from .add import Add, _addsort, _unevaluated_Add
0b366c286967077118c60afc704a2a75703eafb7873d95c430ea1a7e343966a4
"""Tools for setting up printing in interactive sessions. """ import sys from distutils.version import LooseVersion as V from io import BytesIO from sympy import latex as default_latex from sympy import preview from sympy.utilities.misc import debug from sympy.printing.defaults import Printable def _init_python_printing(stringify_func, **settings): """Setup printing in Python interactive session. """ import sys import builtins def _displayhook(arg): """Python's pretty-printer display hook. This function was adapted from: http://www.python.org/dev/peps/pep-0217/ """ if arg is not None: builtins._ = None print(stringify_func(arg, **settings)) builtins._ = arg sys.displayhook = _displayhook def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor, backcolor, fontsize, latex_mode, print_builtin, latex_printer, scale, **settings): """Setup printing in IPython interactive session. """ try: from IPython.lib.latextools import latex_to_png except ImportError: pass # Guess best font color if none was given based on the ip.colors string. # From the IPython documentation: # It has four case-insensitive values: 'nocolor', 'neutral', 'linux', # 'lightbg'. The default is neutral, which should be legible on either # dark or light terminal backgrounds. linux is optimised for dark # backgrounds and lightbg for light ones. if forecolor is None: color = ip.colors.lower() if color == 'lightbg': forecolor = 'Black' elif color == 'linux': forecolor = 'White' else: # No idea, go with gray. forecolor = 'Gray' debug("init_printing: Automatic foreground color:", forecolor) preamble = "\\documentclass[varwidth,%s]{standalone}\n" \ "\\usepackage{amsmath,amsfonts}%s\\begin{document}" if euler: addpackages = '\\usepackage{euler}' else: addpackages = '' if use_latex == "svg": addpackages = addpackages + "\n\\special{color %s}" % forecolor preamble = preamble % (fontsize, addpackages) imagesize = 'tight' offset = "0cm,0cm" resolution = round(150*scale) dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % ( imagesize, resolution, backcolor, forecolor, offset) dvioptions = dvi.split() svg_scale = 150/72*scale dvioptions_svg = ["--no-fonts", "--scale={}".format(svg_scale)] debug("init_printing: DVIOPTIONS:", dvioptions) debug("init_printing: DVIOPTIONS_SVG:", dvioptions_svg) debug("init_printing: PREAMBLE:", preamble) latex = latex_printer or default_latex def _print_plain(arg, p, cycle): """caller for pretty, for use in IPython 0.11""" if _can_print(arg): p.text(stringify_func(arg)) else: p.text(IPython.lib.pretty.pretty(arg)) def _preview_wrapper(o): exprbuffer = BytesIO() try: preview(o, output='png', viewer='BytesIO', outputbuffer=exprbuffer, preamble=preamble, dvioptions=dvioptions) except Exception as e: # IPython swallows exceptions debug("png printing:", "_preview_wrapper exception raised:", repr(e)) raise return exprbuffer.getvalue() def _svg_wrapper(o): exprbuffer = BytesIO() try: preview(o, output='svg', viewer='BytesIO', outputbuffer=exprbuffer, preamble=preamble, dvioptions=dvioptions_svg) except Exception as e: # IPython swallows exceptions debug("svg printing:", "_preview_wrapper exception raised:", repr(e)) raise return exprbuffer.getvalue().decode('utf-8') def _matplotlib_wrapper(o): # mathtext does not understand certain latex flags, so we try to # replace them with suitable subs o = o.replace(r'\operatorname', '') o = o.replace(r'\overline', r'\bar') # mathtext can't render some LaTeX commands. For example, it can't # render any LaTeX environments such as array or matrix. So here we # ensure that if mathtext fails to render, we return None. try: try: return latex_to_png(o, color=forecolor, scale=scale) except TypeError: # Old IPython version without color and scale return latex_to_png(o) except ValueError as e: debug('matplotlib exception caught:', repr(e)) return None # Hook methods for builtin sympy printers printing_hooks = ('_latex', '_sympystr', '_pretty', '_sympyrepr') def _can_print(o): """Return True if type o can be printed with one of the sympy printers. If o is a container type, this is True if and only if every element of o can be printed in this way. """ try: # If you're adding another type, make sure you add it to printable_types # later in this file as well builtin_types = (list, tuple, set, frozenset) if isinstance(o, builtin_types): # If the object is a custom subclass with a custom str or # repr, use that instead. if (type(o).__str__ not in (i.__str__ for i in builtin_types) or type(o).__repr__ not in (i.__repr__ for i in builtin_types)): return False return all(_can_print(i) for i in o) elif isinstance(o, dict): return all(_can_print(i) and _can_print(o[i]) for i in o) elif isinstance(o, bool): return False elif isinstance(o, Printable): # types known to sympy return True elif any(hasattr(o, hook) for hook in printing_hooks): # types which add support themselves return True elif isinstance(o, (float, int)) and print_builtin: return True return False except RuntimeError: return False # This is in case maximum recursion depth is reached. # Since RecursionError is for versions of Python 3.5+ # so this is to guard against RecursionError for older versions. def _print_latex_png(o): """ A function that returns a png rendered by an external latex distribution, falling back to matplotlib rendering """ if _can_print(o): s = latex(o, mode=latex_mode, **settings) if latex_mode == 'plain': s = '$\\displaystyle %s$' % s try: return _preview_wrapper(s) except RuntimeError as e: debug('preview failed with:', repr(e), ' Falling back to matplotlib backend') if latex_mode != 'inline': s = latex(o, mode='inline', **settings) return _matplotlib_wrapper(s) def _print_latex_svg(o): """ A function that returns a svg rendered by an external latex distribution, no fallback available. """ if _can_print(o): s = latex(o, mode=latex_mode, **settings) if latex_mode == 'plain': s = '$\\displaystyle %s$' % s try: return _svg_wrapper(s) except RuntimeError as e: debug('preview failed with:', repr(e), ' No fallback available.') def _print_latex_matplotlib(o): """ A function that returns a png rendered by mathtext """ if _can_print(o): s = latex(o, mode='inline', **settings) return _matplotlib_wrapper(s) def _print_latex_text(o): """ A function to generate the latex representation of sympy expressions. """ if _can_print(o): s = latex(o, mode=latex_mode, **settings) if latex_mode == 'plain': return '$\\displaystyle %s$' % s return s def _result_display(self, arg): """IPython's pretty-printer display hook, for use in IPython 0.10 This function was adapted from: ipython/IPython/hooks.py:155 """ if self.rc.pprint: out = stringify_func(arg) if '\n' in out: print() print(out) else: print(repr(arg)) import IPython if V(IPython.__version__) >= '0.11': # Printable is our own type, so we handle it with methods instead of # the approach required by builtin types. This allows downstream # packages to override the methods in their own subclasses of Printable, # which avoids the effects of gh-16002. printable_types = [float, tuple, list, set, frozenset, dict, int] plaintext_formatter = ip.display_formatter.formatters['text/plain'] # Exception to the rule above: IPython has better dispatching rules # for plaintext printing (xref ipython/ipython#8938), and we can't # use `_repr_pretty_` without hitting a recursion error in _print_plain. for cls in printable_types + [Printable]: plaintext_formatter.for_type(cls, _print_plain) svg_formatter = ip.display_formatter.formatters['image/svg+xml'] if use_latex in ('svg', ): debug("init_printing: using svg formatter") for cls in printable_types: svg_formatter.for_type(cls, _print_latex_svg) Printable._repr_svg_ = _print_latex_svg else: debug("init_printing: not using any svg formatter") for cls in printable_types: # Better way to set this, but currently does not work in IPython #png_formatter.for_type(cls, None) if cls in svg_formatter.type_printers: svg_formatter.type_printers.pop(cls) Printable._repr_svg_ = Printable._repr_disabled png_formatter = ip.display_formatter.formatters['image/png'] if use_latex in (True, 'png'): debug("init_printing: using png formatter") for cls in printable_types: png_formatter.for_type(cls, _print_latex_png) Printable._repr_png_ = _print_latex_png elif use_latex == 'matplotlib': debug("init_printing: using matplotlib formatter") for cls in printable_types: png_formatter.for_type(cls, _print_latex_matplotlib) Printable._repr_png_ = _print_latex_matplotlib else: debug("init_printing: not using any png formatter") for cls in printable_types: # Better way to set this, but currently does not work in IPython #png_formatter.for_type(cls, None) if cls in png_formatter.type_printers: png_formatter.type_printers.pop(cls) Printable._repr_png_ = Printable._repr_disabled latex_formatter = ip.display_formatter.formatters['text/latex'] if use_latex in (True, 'mathjax'): debug("init_printing: using mathjax formatter") for cls in printable_types: latex_formatter.for_type(cls, _print_latex_text) Printable._repr_latex_ = _print_latex_text else: debug("init_printing: not using text/latex formatter") for cls in printable_types: # Better way to set this, but currently does not work in IPython #latex_formatter.for_type(cls, None) if cls in latex_formatter.type_printers: latex_formatter.type_printers.pop(cls) Printable._repr_latex_ = Printable._repr_disabled else: ip.set_hook('result_display', _result_display) def _is_ipython(shell): """Is a shell instance an IPython shell?""" # shortcut, so we don't import IPython if we don't have to if 'IPython' not in sys.modules: return False try: from IPython.core.interactiveshell import InteractiveShell except ImportError: # IPython < 0.11 try: from IPython.iplib import InteractiveShell except ImportError: # Reaching this points means IPython has changed in a backward-incompatible way # that we don't know about. Warn? return False return isinstance(shell, InteractiveShell) # Used by the doctester to override the default for no_global NO_GLOBAL = False def init_printing(pretty_print=True, order=None, use_unicode=None, use_latex=None, wrap_line=None, num_columns=None, no_global=False, ip=None, euler=False, forecolor=None, backcolor='Transparent', fontsize='10pt', latex_mode='plain', print_builtin=True, str_printer=None, pretty_printer=None, latex_printer=None, scale=1.0, **settings): r""" Initializes pretty-printer depending on the environment. Parameters ========== pretty_print : boolean, default=True If True, use pretty_print to stringify or the provided pretty printer; if False, use sstrrepr to stringify or the provided string printer. order : string or None, default='lex' There are a few different settings for this parameter: lex (default), which is lexographic order; grlex, which is graded lexographic order; grevlex, which is reversed graded lexographic order; old, which is used for compatibility reasons and for long expressions; None, which sets it to lex. use_unicode : boolean or None, default=None If True, use unicode characters; if False, do not use unicode characters; if None, make a guess based on the environment. use_latex : string, boolean, or None, default=None If True, use default LaTeX rendering in GUI interfaces (png and mathjax); if False, do not use LaTeX rendering; if None, make a guess based on the environment; if 'png', enable latex rendering with an external latex compiler, falling back to matplotlib if external compilation fails; if 'matplotlib', enable LaTeX rendering with matplotlib; if 'mathjax', enable LaTeX text generation, for example MathJax rendering in IPython notebook or text rendering in LaTeX documents; if 'svg', enable LaTeX rendering with an external latex compiler, no fallback wrap_line : boolean If True, lines will wrap at the end; if False, they will not wrap but continue as one line. This is only relevant if ``pretty_print`` is True. num_columns : int or None, default=None If int, number of columns before wrapping is set to num_columns; if None, number of columns before wrapping is set to terminal width. This is only relevant if ``pretty_print`` is True. no_global : boolean, default=False If True, the settings become system wide; if False, use just for this console/session. ip : An interactive console This can either be an instance of IPython, or a class that derives from code.InteractiveConsole. euler : boolean, optional, default=False Loads the euler package in the LaTeX preamble for handwritten style fonts (http://www.ctan.org/pkg/euler). forecolor : string or None, optional, default=None DVI setting for foreground color. None means that either 'Black', 'White', or 'Gray' will be selected based on a guess of the IPython terminal color setting. See notes. backcolor : string, optional, default='Transparent' DVI setting for background color. See notes. fontsize : string, optional, default='10pt' A font size to pass to the LaTeX documentclass function in the preamble. Note that the options are limited by the documentclass. Consider using scale instead. latex_mode : string, optional, default='plain' The mode used in the LaTeX printer. Can be one of: {'inline'|'plain'|'equation'|'equation*'}. print_builtin : boolean, optional, default=True If ``True`` then floats and integers will be printed. If ``False`` the printer will only print SymPy types. str_printer : function, optional, default=None A custom string printer function. This should mimic sympy.printing.sstrrepr(). pretty_printer : function, optional, default=None A custom pretty printer. This should mimic sympy.printing.pretty(). latex_printer : function, optional, default=None A custom LaTeX printer. This should mimic sympy.printing.latex(). scale : float, optional, default=1.0 Scale the LaTeX output when using the ``png`` or ``svg`` backends. Useful for high dpi screens. settings : Any additional settings for the ``latex`` and ``pretty`` commands can be used to fine-tune the output. Examples ======== >>> from sympy.interactive import init_printing >>> from sympy import Symbol, sqrt >>> from sympy.abc import x, y >>> sqrt(5) sqrt(5) >>> init_printing(pretty_print=True) # doctest: +SKIP >>> sqrt(5) # doctest: +SKIP ___ \/ 5 >>> theta = Symbol('theta') # doctest: +SKIP >>> init_printing(use_unicode=True) # doctest: +SKIP >>> theta # doctest: +SKIP \u03b8 >>> init_printing(use_unicode=False) # doctest: +SKIP >>> theta # doctest: +SKIP theta >>> init_printing(order='lex') # doctest: +SKIP >>> str(y + x + y**2 + x**2) # doctest: +SKIP x**2 + x + y**2 + y >>> init_printing(order='grlex') # doctest: +SKIP >>> str(y + x + y**2 + x**2) # doctest: +SKIP x**2 + x + y**2 + y >>> init_printing(order='grevlex') # doctest: +SKIP >>> str(y * x**2 + x * y**2) # doctest: +SKIP x**2*y + x*y**2 >>> init_printing(order='old') # doctest: +SKIP >>> str(x**2 + y**2 + x + y) # doctest: +SKIP x**2 + x + y**2 + y >>> init_printing(num_columns=10) # doctest: +SKIP >>> x**2 + x + y**2 + y # doctest: +SKIP x + y + x**2 + y**2 Notes ===== The foreground and background colors can be selected when using 'png' or 'svg' LaTeX rendering. Note that before the ``init_printing`` command is executed, the LaTeX rendering is handled by the IPython console and not SymPy. The colors can be selected among the 68 standard colors known to ``dvips``, for a list see [1]_. In addition, the background color can be set to 'Transparent' (which is the default value). When using the 'Auto' foreground color, the guess is based on the ``colors`` variable in the IPython console, see [2]_. Hence, if that variable is set correctly in your IPython console, there is a high chance that the output will be readable, although manual settings may be needed. References ========== .. [1] https://en.wikibooks.org/wiki/LaTeX/Colors#The_68_standard_colors_known_to_dvips .. [2] https://ipython.readthedocs.io/en/stable/config/details.html#terminal-colors See Also ======== sympy.printing.latex sympy.printing.pretty """ import sys from sympy.printing.printer import Printer if pretty_print: if pretty_printer is not None: stringify_func = pretty_printer else: from sympy.printing import pretty as stringify_func else: if str_printer is not None: stringify_func = str_printer else: from sympy.printing import sstrrepr as stringify_func # Even if ip is not passed, double check that not in IPython shell in_ipython = False if ip is None: try: ip = get_ipython() except NameError: pass else: in_ipython = (ip is not None) if ip and not in_ipython: in_ipython = _is_ipython(ip) if in_ipython and pretty_print: try: import IPython # IPython 1.0 deprecates the frontend module, so we import directly # from the terminal module to prevent a deprecation message from being # shown. if V(IPython.__version__) >= '1.0': from IPython.terminal.interactiveshell import TerminalInteractiveShell else: from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell from code import InteractiveConsole except ImportError: pass else: # This will be True if we are in the qtconsole or notebook if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \ and 'ipython-console' not in ''.join(sys.argv): if use_unicode is None: debug("init_printing: Setting use_unicode to True") use_unicode = True if use_latex is None: debug("init_printing: Setting use_latex to True") use_latex = True if not NO_GLOBAL and not no_global: Printer.set_global_settings(order=order, use_unicode=use_unicode, wrap_line=wrap_line, num_columns=num_columns) else: _stringify_func = stringify_func if pretty_print: stringify_func = lambda expr, **settings: \ _stringify_func(expr, order=order, use_unicode=use_unicode, wrap_line=wrap_line, num_columns=num_columns, **settings) else: stringify_func = \ lambda expr, **settings: _stringify_func( expr, order=order, **settings) if in_ipython: mode_in_settings = settings.pop("mode", None) if mode_in_settings: debug("init_printing: Mode is not able to be set due to internals" "of IPython printing") _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor, backcolor, fontsize, latex_mode, print_builtin, latex_printer, scale, **settings) else: _init_python_printing(stringify_func, **settings)
c2772ee66346ccfc9de85e281127f8f2f0dad871386199726148e7573c360227
"""Tools for setting up interactive sessions. """ from distutils.version import LooseVersion as V from sympy.interactive.printing import init_printing preexec_source = """\ from __future__ import division from sympy import * x, y, z, t = symbols('x y z t') k, m, n = symbols('k m n', integer=True) f, g, h = symbols('f g h', cls=Function) init_printing() """ verbose_message = """\ These commands were executed: %(source)s Documentation can be found at https://docs.sympy.org/%(version)s """ no_ipython = """\ Couldn't locate IPython. Having IPython installed is greatly recommended. See http://ipython.scipy.org for more details. If you use Debian/Ubuntu, just install the 'ipython' package and start isympy again. """ def _make_message(ipython=True, quiet=False, source=None): """Create a banner for an interactive session. """ from sympy import __version__ as sympy_version from sympy.polys.domains import GROUND_TYPES from sympy.utilities.misc import ARCH from sympy import SYMPY_DEBUG import sys import os if quiet: return "" python_version = "%d.%d.%d" % sys.version_info[:3] if ipython: shell_name = "IPython" else: shell_name = "Python" info = ['ground types: %s' % GROUND_TYPES] cache = os.getenv('SYMPY_USE_CACHE') if cache is not None and cache.lower() == 'no': info.append('cache: off') if SYMPY_DEBUG: info.append('debugging: on') args = shell_name, sympy_version, python_version, ARCH, ', '.join(info) message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args if source is None: source = preexec_source _source = "" for line in source.split('\n')[:-1]: if not line: _source += '\n' else: _source += '>>> ' + line + '\n' doc_version = sympy_version if 'dev' in doc_version: doc_version = "dev" else: doc_version = "%s/" % doc_version message += '\n' + verbose_message % {'source': _source, 'version': doc_version} return message def int_to_Integer(s): """ Wrap integer literals with Integer. This is based on the decistmt example from http://docs.python.org/library/tokenize.html. Only integer literals are converted. Float literals are left alone. Examples ======== >>> from __future__ import division >>> from sympy import Integer # noqa: F401 >>> from sympy.interactive.session import int_to_Integer >>> s = '1.2 + 1/2 - 0x12 + a1' >>> int_to_Integer(s) '1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 ' >>> s = 'print (1/2)' >>> int_to_Integer(s) 'print (Integer (1 )/Integer (2 ))' >>> exec(s) 0.5 >>> exec(int_to_Integer(s)) 1/2 """ from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP from io import StringIO def _is_int(num): """ Returns true if string value num (with token NUMBER) represents an integer. """ # XXX: Is there something in the standard library that will do this? if '.' in num or 'j' in num.lower() or 'e' in num.lower(): return False return True result = [] g = generate_tokens(StringIO(s).readline) # tokenize the string for toknum, tokval, _, _, _ in g: if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens result.extend([ (NAME, 'Integer'), (OP, '('), (NUMBER, tokval), (OP, ')') ]) else: result.append((toknum, tokval)) return untokenize(result) def enable_automatic_int_sympification(shell): """ Allow IPython to automatically convert integer literals to Integer. """ import ast old_run_cell = shell.run_cell def my_run_cell(cell, *args, **kwargs): try: # Check the cell for syntax errors. This way, the syntax error # will show the original input, not the transformed input. The # downside here is that IPython magic like %timeit will not work # with transformed input (but on the other hand, IPython magic # that doesn't expect transformed input will continue to work). ast.parse(cell) except SyntaxError: pass else: cell = int_to_Integer(cell) old_run_cell(cell, *args, **kwargs) shell.run_cell = my_run_cell def enable_automatic_symbols(shell): """Allow IPython to automatically create symbols (``isympy -a``). """ # XXX: This should perhaps use tokenize, like int_to_Integer() above. # This would avoid re-executing the code, which can lead to subtle # issues. For example: # # In [1]: a = 1 # # In [2]: for i in range(10): # ...: a += 1 # ...: # # In [3]: a # Out[3]: 11 # # In [4]: a = 1 # # In [5]: for i in range(10): # ...: a += 1 # ...: print b # ...: # b # b # b # b # b # b # b # b # b # b # # In [6]: a # Out[6]: 12 # # Note how the for loop is executed again because `b` was not defined, but `a` # was already incremented once, so the result is that it is incremented # multiple times. import re re_nameerror = re.compile( "name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined") def _handler(self, etype, value, tb, tb_offset=None): """Handle :exc:`NameError` exception and allow injection of missing symbols. """ if etype is NameError and tb.tb_next and not tb.tb_next.tb_next: match = re_nameerror.match(str(value)) if match is not None: # XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion. self.run_cell("%(symbol)s = Symbol('%(symbol)s')" % {'symbol': match.group("symbol")}, store_history=False) try: code = self.user_ns['In'][-1] except (KeyError, IndexError): pass else: self.run_cell(code, store_history=False) return None finally: self.run_cell("del %s" % match.group("symbol"), store_history=False) stb = self.InteractiveTB.structured_traceback( etype, value, tb, tb_offset=tb_offset) self._showtraceback(etype, value, stb) shell.set_custom_exc((NameError,), _handler) def init_ipython_session(shell=None, argv=[], auto_symbols=False, auto_int_to_Integer=False): """Construct new IPython session. """ import IPython if V(IPython.__version__) >= '0.11': if not shell: # use an app to parse the command line, and init config # IPython 1.0 deprecates the frontend module, so we import directly # from the terminal module to prevent a deprecation message from being # shown. if V(IPython.__version__) >= '1.0': from IPython.terminal import ipapp else: from IPython.frontend.terminal import ipapp app = ipapp.TerminalIPythonApp() # don't draw IPython banner during initialization: app.display_banner = False app.initialize(argv) shell = app.shell if auto_symbols: enable_automatic_symbols(shell) if auto_int_to_Integer: enable_automatic_int_sympification(shell) return shell else: from IPython.Shell import make_IPython return make_IPython(argv) def init_python_session(): """Construct new Python session. """ from code import InteractiveConsole class SymPyConsole(InteractiveConsole): """An interactive console with readline support. """ def __init__(self): InteractiveConsole.__init__(self) try: import readline except ImportError: pass else: import os import atexit readline.parse_and_bind('tab: complete') if hasattr(readline, 'read_history_file'): history = os.path.expanduser('~/.sympy-history') try: readline.read_history_file(history) except OSError: pass atexit.register(readline.write_history_file, history) return SymPyConsole() def init_session(ipython=None, pretty_print=True, order=None, use_unicode=None, use_latex=None, quiet=False, auto_symbols=False, auto_int_to_Integer=False, str_printer=None, pretty_printer=None, latex_printer=None, argv=[]): """ Initialize an embedded IPython or Python session. The IPython session is initiated with the --pylab option, without the numpy imports, so that matplotlib plotting can be interactive. Parameters ========== pretty_print: boolean If True, use pretty_print to stringify; if False, use sstrrepr to stringify. order: string or None There are a few different settings for this parameter: lex (default), which is lexographic order; grlex, which is graded lexographic order; grevlex, which is reversed graded lexographic order; old, which is used for compatibility reasons and for long expressions; None, which sets it to lex. use_unicode: boolean or None If True, use unicode characters; if False, do not use unicode characters. use_latex: boolean or None If True, use latex rendering if IPython GUI's; if False, do not use latex rendering. quiet: boolean If True, init_session will not print messages regarding its status; if False, init_session will print messages regarding its status. auto_symbols: boolean If True, IPython will automatically create symbols for you. If False, it will not. The default is False. auto_int_to_Integer: boolean If True, IPython will automatically wrap int literals with Integer, so that things like 1/2 give Rational(1, 2). If False, it will not. The default is False. ipython: boolean or None If True, printing will initialize for an IPython console; if False, printing will initialize for a normal console; The default is None, which automatically determines whether we are in an ipython instance or not. str_printer: function, optional, default=None A custom string printer function. This should mimic sympy.printing.sstrrepr(). pretty_printer: function, optional, default=None A custom pretty printer. This should mimic sympy.printing.pretty(). latex_printer: function, optional, default=None A custom LaTeX printer. This should mimic sympy.printing.latex() This should mimic sympy.printing.latex(). argv: list of arguments for IPython See sympy.bin.isympy for options that can be used to initialize IPython. See Also ======== sympy.interactive.printing.init_printing: for examples and the rest of the parameters. Examples ======== >>> from sympy import init_session, Symbol, sin, sqrt >>> sin(x) #doctest: +SKIP NameError: name 'x' is not defined >>> init_session() #doctest: +SKIP >>> sin(x) #doctest: +SKIP sin(x) >>> sqrt(5) #doctest: +SKIP ___ \\/ 5 >>> init_session(pretty_print=False) #doctest: +SKIP >>> sqrt(5) #doctest: +SKIP sqrt(5) >>> y + x + y**2 + x**2 #doctest: +SKIP x**2 + x + y**2 + y >>> init_session(order='grlex') #doctest: +SKIP >>> y + x + y**2 + x**2 #doctest: +SKIP x**2 + y**2 + x + y >>> init_session(order='grevlex') #doctest: +SKIP >>> y * x**2 + x * y**2 #doctest: +SKIP x**2*y + x*y**2 >>> init_session(order='old') #doctest: +SKIP >>> x**2 + y**2 + x + y #doctest: +SKIP x + y + x**2 + y**2 >>> theta = Symbol('theta') #doctest: +SKIP >>> theta #doctest: +SKIP theta >>> init_session(use_unicode=True) #doctest: +SKIP >>> theta # doctest: +SKIP \u03b8 """ import sys in_ipython = False if ipython is not False: try: import IPython except ImportError: if ipython is True: raise RuntimeError("IPython is not available on this system") ip = None else: try: from IPython import get_ipython ip = get_ipython() except ImportError: ip = None in_ipython = bool(ip) if ipython is None: ipython = in_ipython if ipython is False: ip = init_python_session() mainloop = ip.interact else: ip = init_ipython_session(ip, argv=argv, auto_symbols=auto_symbols, auto_int_to_Integer=auto_int_to_Integer) if V(IPython.__version__) >= '0.11': # runsource is gone, use run_cell instead, which doesn't # take a symbol arg. The second arg is `store_history`, # and False means don't add the line to IPython's history. ip.runsource = lambda src, symbol='exec': ip.run_cell(src, False) #Enable interactive plotting using pylab. try: ip.enable_pylab(import_all=False) except Exception: # Causes an import error if matplotlib is not installed. # Causes other errors (depending on the backend) if there # is no display, or if there is some problem in the # backend, so we have a bare "except Exception" here pass if not in_ipython: mainloop = ip.mainloop if auto_symbols and (not ipython or V(IPython.__version__) < '0.11'): raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above") if auto_int_to_Integer and (not ipython or V(IPython.__version__) < '0.11'): raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above") _preexec_source = preexec_source ip.runsource(_preexec_source, symbol='exec') init_printing(pretty_print=pretty_print, order=order, use_unicode=use_unicode, use_latex=use_latex, ip=ip, str_printer=str_printer, pretty_printer=pretty_printer, latex_printer=latex_printer) message = _make_message(ipython, quiet, _preexec_source) if not in_ipython: print(message) mainloop() sys.exit('Exiting ...') else: print(message) import atexit atexit.register(lambda: print("Exiting ...\n"))
a2b04d5c57bb477fcd5866936cd2bfd5496a348f4f304a3f1655a1de2efa7107
"""User-friendly public interface to polynomial functions. """ from functools import wraps, reduce from operator import mul from sympy.core import ( S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple ) from sympy.core.basic import preorder_traversal from sympy.core.compatibility import iterable, ordered from sympy.core.decorators import _sympifyit from sympy.core.evalf import pure_complex from sympy.core.function import Derivative from sympy.core.mul import _keep_coeff from sympy.core.relational import Relational from sympy.core.symbol import Symbol from sympy.core.sympify import sympify, _sympify from sympy.logic.boolalg import BooleanAtom from sympy.polys import polyoptions as options from sympy.polys.constructor import construct_domain from sympy.polys.domains import FF, QQ, ZZ from sympy.polys.domains.domainelement import DomainElement from sympy.polys.fglmtools import matrix_fglm from sympy.polys.groebnertools import groebner as _groebner from sympy.polys.monomials import Monomial from sympy.polys.orderings import monomial_key from sympy.polys.polyclasses import DMP, DMF, ANP from sympy.polys.polyerrors import ( OperationNotSupported, DomainError, CoercionFailed, UnificationFailed, GeneratorsNeeded, PolynomialError, MultivariatePolynomialError, ExactQuotientFailed, PolificationFailed, ComputationFailed, GeneratorsError, ) from sympy.polys.polyutils import ( basic_from_dict, _sort_gens, _unify_gens, _dict_reorder, _dict_from_expr, _parallel_dict_from_expr, ) from sympy.polys.rationaltools import together from sympy.polys.rootisolation import dup_isolate_real_roots_list from sympy.utilities import group, sift, public, filldedent from sympy.utilities.exceptions import SymPyDeprecationWarning # Required to avoid errors import sympy.polys import mpmath from mpmath.libmp.libhyper import NoConvergence def _polifyit(func): @wraps(func) def wrapper(f, g): g = _sympify(g) if isinstance(g, Poly): return func(f, g) elif isinstance(g, Expr): try: g = f.from_expr(g, *f.gens) except PolynomialError: if g.is_Matrix: return NotImplemented expr_method = getattr(f.as_expr(), func.__name__) result = expr_method(g) if result is not NotImplemented: SymPyDeprecationWarning( feature="Mixing Poly with non-polynomial expressions in binary operations", issue=18613, deprecated_since_version="1.6", useinstead="the as_expr or as_poly method to convert types").warn() return result else: return func(f, g) else: return NotImplemented return wrapper @public class Poly(Basic): """ Generic class for representing and operating on polynomial expressions. Poly is a subclass of Basic rather than Expr but instances can be converted to Expr with the ``as_expr`` method. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y Create a univariate polynomial: >>> Poly(x*(x**2 + x - 1)**2) Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ') Create a univariate polynomial with specific domain: >>> from sympy import sqrt >>> Poly(x**2 + 2*x + sqrt(3), domain='R') Poly(1.0*x**2 + 2.0*x + 1.73205080756888, x, domain='RR') Create a multivariate polynomial: >>> Poly(y*x**2 + x*y + 1) Poly(x**2*y + x*y + 1, x, y, domain='ZZ') Create a univariate polynomial, where y is a constant: >>> Poly(y*x**2 + x*y + 1,x) Poly(y*x**2 + y*x + 1, x, domain='ZZ[y]') You can evaluate the above polynomial as a function of y: >>> Poly(y*x**2 + x*y + 1,x).eval(2) 6*y + 1 See Also ======== sympy.core.expr.Expr """ __slots__ = ('rep', 'gens') is_commutative = True is_Poly = True _op_priority = 10.001 def __new__(cls, rep, *gens, **args): """Create a new polynomial instance out of something useful. """ opt = options.build_options(gens, args) if 'order' in opt: raise NotImplementedError("'order' keyword is not implemented yet") if isinstance(rep, (DMP, DMF, ANP, DomainElement)): return cls._from_domain_element(rep, opt) elif iterable(rep, exclude=str): if isinstance(rep, dict): return cls._from_dict(rep, opt) else: return cls._from_list(list(rep), opt) else: rep = sympify(rep) if rep.is_Poly: return cls._from_poly(rep, opt) else: return cls._from_expr(rep, opt) # Poly does not pass its args to Basic.__new__ to be stored in _args so we # have to emulate them here with an args property that derives from rep # and gens which are instance attributes. This also means we need to # define _hashable_content. The _hashable_content is rep and gens but args # uses expr instead of rep (expr is the Basic version of rep). Passing # expr in args means that Basic methods like subs should work. Using rep # otherwise means that Poly can remain more efficient than Basic by # avoiding creating a Basic instance just to be hashable. @classmethod def new(cls, rep, *gens): """Construct :class:`Poly` instance from raw representation. """ if not isinstance(rep, DMP): raise PolynomialError( "invalid polynomial representation: %s" % rep) elif rep.lev != len(gens) - 1: raise PolynomialError("invalid arguments: %s, %s" % (rep, gens)) obj = Basic.__new__(cls) obj.rep = rep obj.gens = gens return obj @property def expr(self): return basic_from_dict(self.rep.to_sympy_dict(), *self.gens) @property def args(self): return (self.expr,) + self.gens def _hashable_content(self): return (self.rep,) + self.gens @classmethod def from_dict(cls, rep, *gens, **args): """Construct a polynomial from a ``dict``. """ opt = options.build_options(gens, args) return cls._from_dict(rep, opt) @classmethod def from_list(cls, rep, *gens, **args): """Construct a polynomial from a ``list``. """ opt = options.build_options(gens, args) return cls._from_list(rep, opt) @classmethod def from_poly(cls, rep, *gens, **args): """Construct a polynomial from a polynomial. """ opt = options.build_options(gens, args) return cls._from_poly(rep, opt) @classmethod def from_expr(cls, rep, *gens, **args): """Construct a polynomial from an expression. """ opt = options.build_options(gens, args) return cls._from_expr(rep, opt) @classmethod def _from_dict(cls, rep, opt): """Construct a polynomial from a ``dict``. """ gens = opt.gens if not gens: raise GeneratorsNeeded( "can't initialize from 'dict' without generators") level = len(gens) - 1 domain = opt.domain if domain is None: domain, rep = construct_domain(rep, opt=opt) else: for monom, coeff in rep.items(): rep[monom] = domain.convert(coeff) return cls.new(DMP.from_dict(rep, level, domain), *gens) @classmethod def _from_list(cls, rep, opt): """Construct a polynomial from a ``list``. """ gens = opt.gens if not gens: raise GeneratorsNeeded( "can't initialize from 'list' without generators") elif len(gens) != 1: raise MultivariatePolynomialError( "'list' representation not supported") level = len(gens) - 1 domain = opt.domain if domain is None: domain, rep = construct_domain(rep, opt=opt) else: rep = list(map(domain.convert, rep)) return cls.new(DMP.from_list(rep, level, domain), *gens) @classmethod def _from_poly(cls, rep, opt): """Construct a polynomial from a polynomial. """ if cls != rep.__class__: rep = cls.new(rep.rep, *rep.gens) gens = opt.gens field = opt.field domain = opt.domain if gens and rep.gens != gens: if set(rep.gens) != set(gens): return cls._from_expr(rep.as_expr(), opt) else: rep = rep.reorder(*gens) if 'domain' in opt and domain: rep = rep.set_domain(domain) elif field is True: rep = rep.to_field() return rep @classmethod def _from_expr(cls, rep, opt): """Construct a polynomial from an expression. """ rep, opt = _dict_from_expr(rep, opt) return cls._from_dict(rep, opt) @classmethod def _from_domain_element(cls, rep, opt): gens = opt.gens domain = opt.domain level = len(gens) - 1 rep = [domain.convert(rep)] return cls.new(DMP.from_list(rep, level, domain), *gens) def __hash__(self): return super().__hash__() @property def free_symbols(self): """ Free symbols of a polynomial expression. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y, z >>> Poly(x**2 + 1).free_symbols {x} >>> Poly(x**2 + y).free_symbols {x, y} >>> Poly(x**2 + y, x).free_symbols {x, y} >>> Poly(x**2 + y, x, z).free_symbols {x, y} """ symbols = set() gens = self.gens for i in range(len(gens)): for monom in self.monoms(): if monom[i]: symbols |= gens[i].free_symbols break return symbols | self.free_symbols_in_domain @property def free_symbols_in_domain(self): """ Free symbols of the domain of ``self``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + 1).free_symbols_in_domain set() >>> Poly(x**2 + y).free_symbols_in_domain set() >>> Poly(x**2 + y, x).free_symbols_in_domain {y} """ domain, symbols = self.rep.dom, set() if domain.is_Composite: for gen in domain.symbols: symbols |= gen.free_symbols elif domain.is_EX: for coeff in self.coeffs(): symbols |= coeff.free_symbols return symbols @property def gen(self): """ Return the principal generator. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).gen x """ return self.gens[0] @property def domain(self): """Get the ground domain of ``self``. """ return self.get_domain() @property def zero(self): """Return zero polynomial with ``self``'s properties. """ return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens) @property def one(self): """Return one polynomial with ``self``'s properties. """ return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens) @property def unit(self): """Return unit polynomial with ``self``'s properties. """ return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens) def unify(f, g): """ Make ``f`` and ``g`` belong to the same domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f, g = Poly(x/2 + 1), Poly(2*x + 1) >>> f Poly(1/2*x + 1, x, domain='QQ') >>> g Poly(2*x + 1, x, domain='ZZ') >>> F, G = f.unify(g) >>> F Poly(1/2*x + 1, x, domain='QQ') >>> G Poly(2*x + 1, x, domain='QQ') """ _, per, F, G = f._unify(g) return per(F), per(G) def _unify(f, g): g = sympify(g) if not g.is_Poly: try: return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g)) except CoercionFailed: raise UnificationFailed("can't unify %s with %s" % (f, g)) if isinstance(f.rep, DMP) and isinstance(g.rep, DMP): gens = _unify_gens(f.gens, g.gens) dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1 if f.gens != gens: f_monoms, f_coeffs = _dict_reorder( f.rep.to_dict(), f.gens, gens) if f.rep.dom != dom: f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs] F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev) else: F = f.rep.convert(dom) if g.gens != gens: g_monoms, g_coeffs = _dict_reorder( g.rep.to_dict(), g.gens, gens) if g.rep.dom != dom: g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs] G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev) else: G = g.rep.convert(dom) else: raise UnificationFailed("can't unify %s with %s" % (f, g)) cls = f.__class__ def per(rep, dom=dom, gens=gens, remove=None): if remove is not None: gens = gens[:remove] + gens[remove + 1:] if not gens: return dom.to_sympy(rep) return cls.new(rep, *gens) return dom, per, F, G def per(f, rep, gens=None, remove=None): """ Create a Poly out of the given representation. Examples ======== >>> from sympy import Poly, ZZ >>> from sympy.abc import x, y >>> from sympy.polys.polyclasses import DMP >>> a = Poly(x**2 + 1) >>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y]) Poly(y + 1, y, domain='ZZ') """ if gens is None: gens = f.gens if remove is not None: gens = gens[:remove] + gens[remove + 1:] if not gens: return f.rep.dom.to_sympy(rep) return f.__class__.new(rep, *gens) def set_domain(f, domain): """Set the ground domain of ``f``. """ opt = options.build_options(f.gens, {'domain': domain}) return f.per(f.rep.convert(opt.domain)) def get_domain(f): """Get the ground domain of ``f``. """ return f.rep.dom def set_modulus(f, modulus): """ Set the modulus of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2) Poly(x**2 + 1, x, modulus=2) """ modulus = options.Modulus.preprocess(modulus) return f.set_domain(FF(modulus)) def get_modulus(f): """ Get the modulus of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, modulus=2).get_modulus() 2 """ domain = f.get_domain() if domain.is_FiniteField: return Integer(domain.characteristic()) else: raise PolynomialError("not a polynomial over a Galois field") def _eval_subs(f, old, new): """Internal implementation of :func:`subs`. """ if old in f.gens: if new.is_number: return f.eval(old, new) else: try: return f.replace(old, new) except PolynomialError: pass return f.as_expr().subs(old, new) def exclude(f): """ Remove unnecessary generators from ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import a, b, c, d, x >>> Poly(a + x, a, b, c, d, x).exclude() Poly(a + x, a, x, domain='ZZ') """ J, new = f.rep.exclude() gens = [] for j in range(len(f.gens)): if j not in J: gens.append(f.gens[j]) return f.per(new, gens=gens) def replace(f, x, y=None, **_ignore): # XXX this does not match Basic's signature """ Replace ``x`` with ``y`` in generators list. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + 1, x).replace(x, y) Poly(y**2 + 1, y, domain='ZZ') """ if y is None: if f.is_univariate: x, y = f.gen, x else: raise PolynomialError( "syntax supported only in univariate case") if x == y or x not in f.gens: return f if x in f.gens and y not in f.gens: dom = f.get_domain() if not dom.is_Composite or y not in dom.symbols: gens = list(f.gens) gens[gens.index(x)] = y return f.per(f.rep, gens=gens) raise PolynomialError("can't replace %s with %s in %s" % (x, y, f)) def match(f, *args, **kwargs): """Match expression from Poly. See Basic.match()""" return f.as_expr().match(*args, **kwargs) def reorder(f, *gens, **args): """ Efficiently apply new order of generators. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + x*y**2, x, y).reorder(y, x) Poly(y**2*x + x**2, y, x, domain='ZZ') """ opt = options.Options((), args) if not gens: gens = _sort_gens(f.gens, opt=opt) elif set(f.gens) != set(gens): raise PolynomialError( "generators list can differ only up to order of elements") rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens)))) return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens) def ltrim(f, gen): """ Remove dummy generators from ``f`` that are to the left of specified ``gen`` in the generators as ordered. When ``gen`` is an integer, it refers to the generator located at that position within the tuple of generators of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y, z >>> Poly(y**2 + y*z**2, x, y, z).ltrim(y) Poly(y**2 + y*z**2, y, z, domain='ZZ') >>> Poly(z, x, y, z).ltrim(-1) Poly(z, z, domain='ZZ') """ rep = f.as_dict(native=True) j = f._gen_to_level(gen) terms = {} for monom, coeff in rep.items(): if any(monom[:j]): # some generator is used in the portion to be trimmed raise PolynomialError("can't left trim %s" % f) terms[monom[j:]] = coeff gens = f.gens[j:] return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens) def has_only_gens(f, *gens): """ Return ``True`` if ``Poly(f, *gens)`` retains ground domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y, z >>> Poly(x*y + 1, x, y, z).has_only_gens(x, y) True >>> Poly(x*y + z, x, y, z).has_only_gens(x, y) False """ indices = set() for gen in gens: try: index = f.gens.index(gen) except ValueError: raise GeneratorsError( "%s doesn't have %s as generator" % (f, gen)) else: indices.add(index) for monom in f.monoms(): for i, elt in enumerate(monom): if i not in indices and elt: return False return True def to_ring(f): """ Make the ground domain a ring. Examples ======== >>> from sympy import Poly, QQ >>> from sympy.abc import x >>> Poly(x**2 + 1, domain=QQ).to_ring() Poly(x**2 + 1, x, domain='ZZ') """ if hasattr(f.rep, 'to_ring'): result = f.rep.to_ring() else: # pragma: no cover raise OperationNotSupported(f, 'to_ring') return f.per(result) def to_field(f): """ Make the ground domain a field. Examples ======== >>> from sympy import Poly, ZZ >>> from sympy.abc import x >>> Poly(x**2 + 1, x, domain=ZZ).to_field() Poly(x**2 + 1, x, domain='QQ') """ if hasattr(f.rep, 'to_field'): result = f.rep.to_field() else: # pragma: no cover raise OperationNotSupported(f, 'to_field') return f.per(result) def to_exact(f): """ Make the ground domain exact. Examples ======== >>> from sympy import Poly, RR >>> from sympy.abc import x >>> Poly(x**2 + 1.0, x, domain=RR).to_exact() Poly(x**2 + 1, x, domain='QQ') """ if hasattr(f.rep, 'to_exact'): result = f.rep.to_exact() else: # pragma: no cover raise OperationNotSupported(f, 'to_exact') return f.per(result) def retract(f, field=None): """ Recalculate the ground domain of a polynomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = Poly(x**2 + 1, x, domain='QQ[y]') >>> f Poly(x**2 + 1, x, domain='QQ[y]') >>> f.retract() Poly(x**2 + 1, x, domain='ZZ') >>> f.retract(field=True) Poly(x**2 + 1, x, domain='QQ') """ dom, rep = construct_domain(f.as_dict(zero=True), field=field, composite=f.domain.is_Composite or None) return f.from_dict(rep, f.gens, domain=dom) def slice(f, x, m, n=None): """Take a continuous subsequence of terms of ``f``. """ if n is None: j, m, n = 0, x, m else: j = f._gen_to_level(x) m, n = int(m), int(n) if hasattr(f.rep, 'slice'): result = f.rep.slice(m, n, j) else: # pragma: no cover raise OperationNotSupported(f, 'slice') return f.per(result) def coeffs(f, order=None): """ Returns all non-zero coefficients from ``f`` in lex order. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 + 2*x + 3, x).coeffs() [1, 2, 3] See Also ======== all_coeffs coeff_monomial nth """ return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)] def monoms(f, order=None): """ Returns all non-zero monomials from ``f`` in lex order. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms() [(2, 0), (1, 2), (1, 1), (0, 1)] See Also ======== all_monoms """ return f.rep.monoms(order=order) def terms(f, order=None): """ Returns all non-zero terms from ``f`` in lex order. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms() [((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)] See Also ======== all_terms """ return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)] def all_coeffs(f): """ Returns all coefficients from a univariate polynomial ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 + 2*x - 1, x).all_coeffs() [1, 0, 2, -1] """ return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()] def all_monoms(f): """ Returns all monomials from a univariate polynomial ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 + 2*x - 1, x).all_monoms() [(3,), (2,), (1,), (0,)] See Also ======== all_terms """ return f.rep.all_monoms() def all_terms(f): """ Returns all terms from a univariate polynomial ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 + 2*x - 1, x).all_terms() [((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)] """ return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()] def termwise(f, func, *gens, **args): """ Apply a function to all terms of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> def func(k, coeff): ... k = k[0] ... return coeff//10**(2-k) >>> Poly(x**2 + 20*x + 400).termwise(func) Poly(x**2 + 2*x + 4, x, domain='ZZ') """ terms = {} for monom, coeff in f.terms(): result = func(monom, coeff) if isinstance(result, tuple): monom, coeff = result else: coeff = result if coeff: if monom not in terms: terms[monom] = coeff else: raise PolynomialError( "%s monomial was generated twice" % monom) return f.from_dict(terms, *(gens or f.gens), **args) def length(f): """ Returns the number of non-zero terms in ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 2*x - 1).length() 3 """ return len(f.as_dict()) def as_dict(f, native=False, zero=False): """ Switch to a ``dict`` representation. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict() {(0, 1): -1, (1, 2): 2, (2, 0): 1} """ if native: return f.rep.to_dict(zero=zero) else: return f.rep.to_sympy_dict(zero=zero) def as_list(f, native=False): """Switch to a ``list`` representation. """ if native: return f.rep.to_list() else: return f.rep.to_sympy_list() def as_expr(f, *gens): """ Convert a Poly instance to an Expr instance. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> f = Poly(x**2 + 2*x*y**2 - y, x, y) >>> f.as_expr() x**2 + 2*x*y**2 - y >>> f.as_expr({x: 5}) 10*y**2 - y + 25 >>> f.as_expr(5, 6) 379 """ if not gens: return f.expr if len(gens) == 1 and isinstance(gens[0], dict): mapping = gens[0] gens = list(f.gens) for gen, value in mapping.items(): try: index = gens.index(gen) except ValueError: raise GeneratorsError( "%s doesn't have %s as generator" % (f, gen)) else: gens[index] = value return basic_from_dict(f.rep.to_sympy_dict(), *gens) def as_poly(self, *gens, **args): """Converts ``self`` to a polynomial or returns ``None``. >>> from sympy import sin >>> from sympy.abc import x, y >>> print((x**2 + x*y).as_poly()) Poly(x**2 + x*y, x, y, domain='ZZ') >>> print((x**2 + x*y).as_poly(x, y)) Poly(x**2 + x*y, x, y, domain='ZZ') >>> print((x**2 + sin(y)).as_poly(x, y)) None """ try: poly = Poly(self, *gens, **args) if not poly.is_Poly: return None else: return poly except PolynomialError: return None def lift(f): """ Convert algebraic coefficients to rationals. Examples ======== >>> from sympy import Poly, I >>> from sympy.abc import x >>> Poly(x**2 + I*x + 1, x, extension=I).lift() Poly(x**4 + 3*x**2 + 1, x, domain='QQ') """ if hasattr(f.rep, 'lift'): result = f.rep.lift() else: # pragma: no cover raise OperationNotSupported(f, 'lift') return f.per(result) def deflate(f): """ Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate() ((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ')) """ if hasattr(f.rep, 'deflate'): J, result = f.rep.deflate() else: # pragma: no cover raise OperationNotSupported(f, 'deflate') return J, f.per(result) def inject(f, front=False): """ Inject ground domain generators into ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> f = Poly(x**2*y + x*y**3 + x*y + 1, x) >>> f.inject() Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ') >>> f.inject(front=True) Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ') """ dom = f.rep.dom if dom.is_Numerical: return f elif not dom.is_Poly: raise DomainError("can't inject generators over %s" % dom) if hasattr(f.rep, 'inject'): result = f.rep.inject(front=front) else: # pragma: no cover raise OperationNotSupported(f, 'inject') if front: gens = dom.symbols + f.gens else: gens = f.gens + dom.symbols return f.new(result, *gens) def eject(f, *gens): """ Eject selected generators into the ground domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y) >>> f.eject(x) Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]') >>> f.eject(y) Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]') """ dom = f.rep.dom if not dom.is_Numerical: raise DomainError("can't eject generators over %s" % dom) k = len(gens) if f.gens[:k] == gens: _gens, front = f.gens[k:], True elif f.gens[-k:] == gens: _gens, front = f.gens[:-k], False else: raise NotImplementedError( "can only eject front or back generators") dom = dom.inject(*gens) if hasattr(f.rep, 'eject'): result = f.rep.eject(dom, front=front) else: # pragma: no cover raise OperationNotSupported(f, 'eject') return f.new(result, *_gens) def terms_gcd(f): """ Remove GCD of terms from the polynomial ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd() ((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ')) """ if hasattr(f.rep, 'terms_gcd'): J, result = f.rep.terms_gcd() else: # pragma: no cover raise OperationNotSupported(f, 'terms_gcd') return J, f.per(result) def add_ground(f, coeff): """ Add an element of the ground domain to ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x + 1).add_ground(2) Poly(x + 3, x, domain='ZZ') """ if hasattr(f.rep, 'add_ground'): result = f.rep.add_ground(coeff) else: # pragma: no cover raise OperationNotSupported(f, 'add_ground') return f.per(result) def sub_ground(f, coeff): """ Subtract an element of the ground domain from ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x + 1).sub_ground(2) Poly(x - 1, x, domain='ZZ') """ if hasattr(f.rep, 'sub_ground'): result = f.rep.sub_ground(coeff) else: # pragma: no cover raise OperationNotSupported(f, 'sub_ground') return f.per(result) def mul_ground(f, coeff): """ Multiply ``f`` by a an element of the ground domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x + 1).mul_ground(2) Poly(2*x + 2, x, domain='ZZ') """ if hasattr(f.rep, 'mul_ground'): result = f.rep.mul_ground(coeff) else: # pragma: no cover raise OperationNotSupported(f, 'mul_ground') return f.per(result) def quo_ground(f, coeff): """ Quotient of ``f`` by a an element of the ground domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x + 4).quo_ground(2) Poly(x + 2, x, domain='ZZ') >>> Poly(2*x + 3).quo_ground(2) Poly(x + 1, x, domain='ZZ') """ if hasattr(f.rep, 'quo_ground'): result = f.rep.quo_ground(coeff) else: # pragma: no cover raise OperationNotSupported(f, 'quo_ground') return f.per(result) def exquo_ground(f, coeff): """ Exact quotient of ``f`` by a an element of the ground domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x + 4).exquo_ground(2) Poly(x + 2, x, domain='ZZ') >>> Poly(2*x + 3).exquo_ground(2) Traceback (most recent call last): ... ExactQuotientFailed: 2 does not divide 3 in ZZ """ if hasattr(f.rep, 'exquo_ground'): result = f.rep.exquo_ground(coeff) else: # pragma: no cover raise OperationNotSupported(f, 'exquo_ground') return f.per(result) def abs(f): """ Make all coefficients in ``f`` positive. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).abs() Poly(x**2 + 1, x, domain='ZZ') """ if hasattr(f.rep, 'abs'): result = f.rep.abs() else: # pragma: no cover raise OperationNotSupported(f, 'abs') return f.per(result) def neg(f): """ Negate all coefficients in ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).neg() Poly(-x**2 + 1, x, domain='ZZ') >>> -Poly(x**2 - 1, x) Poly(-x**2 + 1, x, domain='ZZ') """ if hasattr(f.rep, 'neg'): result = f.rep.neg() else: # pragma: no cover raise OperationNotSupported(f, 'neg') return f.per(result) def add(f, g): """ Add two polynomials ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).add(Poly(x - 2, x)) Poly(x**2 + x - 1, x, domain='ZZ') >>> Poly(x**2 + 1, x) + Poly(x - 2, x) Poly(x**2 + x - 1, x, domain='ZZ') """ g = sympify(g) if not g.is_Poly: return f.add_ground(g) _, per, F, G = f._unify(g) if hasattr(f.rep, 'add'): result = F.add(G) else: # pragma: no cover raise OperationNotSupported(f, 'add') return per(result) def sub(f, g): """ Subtract two polynomials ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).sub(Poly(x - 2, x)) Poly(x**2 - x + 3, x, domain='ZZ') >>> Poly(x**2 + 1, x) - Poly(x - 2, x) Poly(x**2 - x + 3, x, domain='ZZ') """ g = sympify(g) if not g.is_Poly: return f.sub_ground(g) _, per, F, G = f._unify(g) if hasattr(f.rep, 'sub'): result = F.sub(G) else: # pragma: no cover raise OperationNotSupported(f, 'sub') return per(result) def mul(f, g): """ Multiply two polynomials ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).mul(Poly(x - 2, x)) Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ') >>> Poly(x**2 + 1, x)*Poly(x - 2, x) Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ') """ g = sympify(g) if not g.is_Poly: return f.mul_ground(g) _, per, F, G = f._unify(g) if hasattr(f.rep, 'mul'): result = F.mul(G) else: # pragma: no cover raise OperationNotSupported(f, 'mul') return per(result) def sqr(f): """ Square a polynomial ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x - 2, x).sqr() Poly(x**2 - 4*x + 4, x, domain='ZZ') >>> Poly(x - 2, x)**2 Poly(x**2 - 4*x + 4, x, domain='ZZ') """ if hasattr(f.rep, 'sqr'): result = f.rep.sqr() else: # pragma: no cover raise OperationNotSupported(f, 'sqr') return f.per(result) def pow(f, n): """ Raise ``f`` to a non-negative power ``n``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x - 2, x).pow(3) Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ') >>> Poly(x - 2, x)**3 Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ') """ n = int(n) if hasattr(f.rep, 'pow'): result = f.rep.pow(n) else: # pragma: no cover raise OperationNotSupported(f, 'pow') return f.per(result) def pdiv(f, g): """ Polynomial pseudo-division of ``f`` by ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x)) (Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ')) """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'pdiv'): q, r = F.pdiv(G) else: # pragma: no cover raise OperationNotSupported(f, 'pdiv') return per(q), per(r) def prem(f, g): """ Polynomial pseudo-remainder of ``f`` by ``g``. Caveat: The function prem(f, g, x) can be safely used to compute in Z[x] _only_ subresultant polynomial remainder sequences (prs's). To safely compute Euclidean and Sturmian prs's in Z[x] employ anyone of the corresponding functions found in the module sympy.polys.subresultants_qq_zz. The functions in the module with suffix _pg compute prs's in Z[x] employing rem(f, g, x), whereas the functions with suffix _amv compute prs's in Z[x] employing rem_z(f, g, x). The function rem_z(f, g, x) differs from prem(f, g, x) in that to compute the remainder polynomials in Z[x] it premultiplies the divident times the absolute value of the leading coefficient of the divisor raised to the power degree(f, x) - degree(g, x) + 1. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x)) Poly(20, x, domain='ZZ') """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'prem'): result = F.prem(G) else: # pragma: no cover raise OperationNotSupported(f, 'prem') return per(result) def pquo(f, g): """ Polynomial pseudo-quotient of ``f`` by ``g``. See the Caveat note in the function prem(f, g). Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x)) Poly(2*x + 4, x, domain='ZZ') >>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x)) Poly(2*x + 2, x, domain='ZZ') """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'pquo'): result = F.pquo(G) else: # pragma: no cover raise OperationNotSupported(f, 'pquo') return per(result) def pexquo(f, g): """ Polynomial exact pseudo-quotient of ``f`` by ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x)) Poly(2*x + 2, x, domain='ZZ') >>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x)) Traceback (most recent call last): ... ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1 """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'pexquo'): try: result = F.pexquo(G) except ExactQuotientFailed as exc: raise exc.new(f.as_expr(), g.as_expr()) else: # pragma: no cover raise OperationNotSupported(f, 'pexquo') return per(result) def div(f, g, auto=True): """ Polynomial division with remainder of ``f`` by ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x)) (Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ')) >>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False) (Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ')) """ dom, per, F, G = f._unify(g) retract = False if auto and dom.is_Ring and not dom.is_Field: F, G = F.to_field(), G.to_field() retract = True if hasattr(f.rep, 'div'): q, r = F.div(G) else: # pragma: no cover raise OperationNotSupported(f, 'div') if retract: try: Q, R = q.to_ring(), r.to_ring() except CoercionFailed: pass else: q, r = Q, R return per(q), per(r) def rem(f, g, auto=True): """ Computes the polynomial remainder of ``f`` by ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x)) Poly(5, x, domain='ZZ') >>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False) Poly(x**2 + 1, x, domain='ZZ') """ dom, per, F, G = f._unify(g) retract = False if auto and dom.is_Ring and not dom.is_Field: F, G = F.to_field(), G.to_field() retract = True if hasattr(f.rep, 'rem'): r = F.rem(G) else: # pragma: no cover raise OperationNotSupported(f, 'rem') if retract: try: r = r.to_ring() except CoercionFailed: pass return per(r) def quo(f, g, auto=True): """ Computes polynomial quotient of ``f`` by ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x)) Poly(1/2*x + 1, x, domain='QQ') >>> Poly(x**2 - 1, x).quo(Poly(x - 1, x)) Poly(x + 1, x, domain='ZZ') """ dom, per, F, G = f._unify(g) retract = False if auto and dom.is_Ring and not dom.is_Field: F, G = F.to_field(), G.to_field() retract = True if hasattr(f.rep, 'quo'): q = F.quo(G) else: # pragma: no cover raise OperationNotSupported(f, 'quo') if retract: try: q = q.to_ring() except CoercionFailed: pass return per(q) def exquo(f, g, auto=True): """ Computes polynomial exact quotient of ``f`` by ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x)) Poly(x + 1, x, domain='ZZ') >>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x)) Traceback (most recent call last): ... ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1 """ dom, per, F, G = f._unify(g) retract = False if auto and dom.is_Ring and not dom.is_Field: F, G = F.to_field(), G.to_field() retract = True if hasattr(f.rep, 'exquo'): try: q = F.exquo(G) except ExactQuotientFailed as exc: raise exc.new(f.as_expr(), g.as_expr()) else: # pragma: no cover raise OperationNotSupported(f, 'exquo') if retract: try: q = q.to_ring() except CoercionFailed: pass return per(q) def _gen_to_level(f, gen): """Returns level associated with the given generator. """ if isinstance(gen, int): length = len(f.gens) if -length <= gen < length: if gen < 0: return length + gen else: return gen else: raise PolynomialError("-%s <= gen < %s expected, got %s" % (length, length, gen)) else: try: return f.gens.index(sympify(gen)) except ValueError: raise PolynomialError( "a valid generator expected, got %s" % gen) def degree(f, gen=0): """ Returns degree of ``f`` in ``x_j``. The degree of 0 is negative infinity. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + y*x + 1, x, y).degree() 2 >>> Poly(x**2 + y*x + y, x, y).degree(y) 1 >>> Poly(0, x).degree() -oo """ j = f._gen_to_level(gen) if hasattr(f.rep, 'degree'): return f.rep.degree(j) else: # pragma: no cover raise OperationNotSupported(f, 'degree') def degree_list(f): """ Returns a list of degrees of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + y*x + 1, x, y).degree_list() (2, 1) """ if hasattr(f.rep, 'degree_list'): return f.rep.degree_list() else: # pragma: no cover raise OperationNotSupported(f, 'degree_list') def total_degree(f): """ Returns the total degree of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + y*x + 1, x, y).total_degree() 2 >>> Poly(x + y**5, x, y).total_degree() 5 """ if hasattr(f.rep, 'total_degree'): return f.rep.total_degree() else: # pragma: no cover raise OperationNotSupported(f, 'total_degree') def homogenize(f, s): """ Returns the homogeneous polynomial of ``f``. A homogeneous polynomial is a polynomial whose all monomials with non-zero coefficients have the same total degree. If you only want to check if a polynomial is homogeneous, then use :func:`Poly.is_homogeneous`. If you want not only to check if a polynomial is homogeneous but also compute its homogeneous order, then use :func:`Poly.homogeneous_order`. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y, z >>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3) >>> f.homogenize(z) Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ') """ if not isinstance(s, Symbol): raise TypeError("``Symbol`` expected, got %s" % type(s)) if s in f.gens: i = f.gens.index(s) gens = f.gens else: i = len(f.gens) gens = f.gens + (s,) if hasattr(f.rep, 'homogenize'): return f.per(f.rep.homogenize(i), gens=gens) raise OperationNotSupported(f, 'homogeneous_order') def homogeneous_order(f): """ Returns the homogeneous order of ``f``. A homogeneous polynomial is a polynomial whose all monomials with non-zero coefficients have the same total degree. This degree is the homogeneous order of ``f``. If you only want to check if a polynomial is homogeneous, then use :func:`Poly.is_homogeneous`. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4) >>> f.homogeneous_order() 5 """ if hasattr(f.rep, 'homogeneous_order'): return f.rep.homogeneous_order() else: # pragma: no cover raise OperationNotSupported(f, 'homogeneous_order') def LC(f, order=None): """ Returns the leading coefficient of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC() 4 """ if order is not None: return f.coeffs(order)[0] if hasattr(f.rep, 'LC'): result = f.rep.LC() else: # pragma: no cover raise OperationNotSupported(f, 'LC') return f.rep.dom.to_sympy(result) def TC(f): """ Returns the trailing coefficient of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 + 2*x**2 + 3*x, x).TC() 0 """ if hasattr(f.rep, 'TC'): result = f.rep.TC() else: # pragma: no cover raise OperationNotSupported(f, 'TC') return f.rep.dom.to_sympy(result) def EC(f, order=None): """ Returns the last non-zero coefficient of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 + 2*x**2 + 3*x, x).EC() 3 """ if hasattr(f.rep, 'coeffs'): return f.coeffs(order)[-1] else: # pragma: no cover raise OperationNotSupported(f, 'EC') def coeff_monomial(f, monom): """ Returns the coefficient of ``monom`` in ``f`` if there, else None. Examples ======== >>> from sympy import Poly, exp >>> from sympy.abc import x, y >>> p = Poly(24*x*y*exp(8) + 23*x, x, y) >>> p.coeff_monomial(x) 23 >>> p.coeff_monomial(y) 0 >>> p.coeff_monomial(x*y) 24*exp(8) Note that ``Expr.coeff()`` behaves differently, collecting terms if possible; the Poly must be converted to an Expr to use that method, however: >>> p.as_expr().coeff(x) 24*y*exp(8) + 23 >>> p.as_expr().coeff(y) 24*x*exp(8) >>> p.as_expr().coeff(x*y) 24*exp(8) See Also ======== nth: more efficient query using exponents of the monomial's generators """ return f.nth(*Monomial(monom, f.gens).exponents) def nth(f, *N): """ Returns the ``n``-th coefficient of ``f`` where ``N`` are the exponents of the generators in the term of interest. Examples ======== >>> from sympy import Poly, sqrt >>> from sympy.abc import x, y >>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2) 2 >>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2) 2 >>> Poly(4*sqrt(x)*y) Poly(4*y*(sqrt(x)), y, sqrt(x), domain='ZZ') >>> _.nth(1, 1) 4 See Also ======== coeff_monomial """ if hasattr(f.rep, 'nth'): if len(N) != len(f.gens): raise ValueError('exponent of each generator must be specified') result = f.rep.nth(*list(map(int, N))) else: # pragma: no cover raise OperationNotSupported(f, 'nth') return f.rep.dom.to_sympy(result) def coeff(f, x, n=1, right=False): # the semantics of coeff_monomial and Expr.coeff are different; # if someone is working with a Poly, they should be aware of the # differences and chose the method best suited for the query. # Alternatively, a pure-polys method could be written here but # at this time the ``right`` keyword would be ignored because Poly # doesn't work with non-commutatives. raise NotImplementedError( 'Either convert to Expr with `as_expr` method ' 'to use Expr\'s coeff method or else use the ' '`coeff_monomial` method of Polys.') def LM(f, order=None): """ Returns the leading monomial of ``f``. The Leading monomial signifies the monomial having the highest power of the principal generator in the expression f. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM() x**2*y**0 """ return Monomial(f.monoms(order)[0], f.gens) def EM(f, order=None): """ Returns the last non-zero monomial of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM() x**0*y**1 """ return Monomial(f.monoms(order)[-1], f.gens) def LT(f, order=None): """ Returns the leading term of ``f``. The Leading term signifies the term having the highest power of the principal generator in the expression f along with its coefficient. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT() (x**2*y**0, 4) """ monom, coeff = f.terms(order)[0] return Monomial(monom, f.gens), coeff def ET(f, order=None): """ Returns the last non-zero term of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET() (x**0*y**1, 3) """ monom, coeff = f.terms(order)[-1] return Monomial(monom, f.gens), coeff def max_norm(f): """ Returns maximum norm of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(-x**2 + 2*x - 3, x).max_norm() 3 """ if hasattr(f.rep, 'max_norm'): result = f.rep.max_norm() else: # pragma: no cover raise OperationNotSupported(f, 'max_norm') return f.rep.dom.to_sympy(result) def l1_norm(f): """ Returns l1 norm of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(-x**2 + 2*x - 3, x).l1_norm() 6 """ if hasattr(f.rep, 'l1_norm'): result = f.rep.l1_norm() else: # pragma: no cover raise OperationNotSupported(f, 'l1_norm') return f.rep.dom.to_sympy(result) def clear_denoms(self, convert=False): """ Clear denominators, but keep the ground domain. Examples ======== >>> from sympy import Poly, S, QQ >>> from sympy.abc import x >>> f = Poly(x/2 + S(1)/3, x, domain=QQ) >>> f.clear_denoms() (6, Poly(3*x + 2, x, domain='QQ')) >>> f.clear_denoms(convert=True) (6, Poly(3*x + 2, x, domain='ZZ')) """ f = self if not f.rep.dom.is_Field: return S.One, f dom = f.get_domain() if dom.has_assoc_Ring: dom = f.rep.dom.get_ring() if hasattr(f.rep, 'clear_denoms'): coeff, result = f.rep.clear_denoms() else: # pragma: no cover raise OperationNotSupported(f, 'clear_denoms') coeff, f = dom.to_sympy(coeff), f.per(result) if not convert or not dom.has_assoc_Ring: return coeff, f else: return coeff, f.to_ring() def rat_clear_denoms(self, g): """ Clear denominators in a rational function ``f/g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> f = Poly(x**2/y + 1, x) >>> g = Poly(x**3 + y, x) >>> p, q = f.rat_clear_denoms(g) >>> p Poly(x**2 + y, x, domain='ZZ[y]') >>> q Poly(y*x**3 + y**2, x, domain='ZZ[y]') """ f = self dom, per, f, g = f._unify(g) f = per(f) g = per(g) if not (dom.is_Field and dom.has_assoc_Ring): return f, g a, f = f.clear_denoms(convert=True) b, g = g.clear_denoms(convert=True) f = f.mul_ground(b) g = g.mul_ground(a) return f, g def integrate(self, *specs, **args): """ Computes indefinite integral of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + 2*x + 1, x).integrate() Poly(1/3*x**3 + x**2 + x, x, domain='QQ') >>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0)) Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ') """ f = self if args.get('auto', True) and f.rep.dom.is_Ring: f = f.to_field() if hasattr(f.rep, 'integrate'): if not specs: return f.per(f.rep.integrate(m=1)) rep = f.rep for spec in specs: if type(spec) is tuple: gen, m = spec else: gen, m = spec, 1 rep = rep.integrate(int(m), f._gen_to_level(gen)) return f.per(rep) else: # pragma: no cover raise OperationNotSupported(f, 'integrate') def diff(f, *specs, **kwargs): """ Computes partial derivative of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + 2*x + 1, x).diff() Poly(2*x + 2, x, domain='ZZ') >>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1)) Poly(2*x*y, x, y, domain='ZZ') """ if not kwargs.get('evaluate', True): return Derivative(f, *specs, **kwargs) if hasattr(f.rep, 'diff'): if not specs: return f.per(f.rep.diff(m=1)) rep = f.rep for spec in specs: if type(spec) is tuple: gen, m = spec else: gen, m = spec, 1 rep = rep.diff(int(m), f._gen_to_level(gen)) return f.per(rep) else: # pragma: no cover raise OperationNotSupported(f, 'diff') _eval_derivative = diff def eval(self, x, a=None, auto=True): """ Evaluate ``f`` at ``a`` in the given variable. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y, z >>> Poly(x**2 + 2*x + 3, x).eval(2) 11 >>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2) Poly(5*y + 8, y, domain='ZZ') >>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z) >>> f.eval({x: 2}) Poly(5*y + 2*z + 6, y, z, domain='ZZ') >>> f.eval({x: 2, y: 5}) Poly(2*z + 31, z, domain='ZZ') >>> f.eval({x: 2, y: 5, z: 7}) 45 >>> f.eval((2, 5)) Poly(2*z + 31, z, domain='ZZ') >>> f(2, 5) Poly(2*z + 31, z, domain='ZZ') """ f = self if a is None: if isinstance(x, dict): mapping = x for gen, value in mapping.items(): f = f.eval(gen, value) return f elif isinstance(x, (tuple, list)): values = x if len(values) > len(f.gens): raise ValueError("too many values provided") for gen, value in zip(f.gens, values): f = f.eval(gen, value) return f else: j, a = 0, x else: j = f._gen_to_level(x) if not hasattr(f.rep, 'eval'): # pragma: no cover raise OperationNotSupported(f, 'eval') try: result = f.rep.eval(a, j) except CoercionFailed: if not auto: raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom)) else: a_domain, [a] = construct_domain([a]) new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens) f = f.set_domain(new_domain) a = new_domain.convert(a, a_domain) result = f.rep.eval(a, j) return f.per(result, remove=j) def __call__(f, *values): """ Evaluate ``f`` at the give values. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y, z >>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z) >>> f(2) Poly(5*y + 2*z + 6, y, z, domain='ZZ') >>> f(2, 5) Poly(2*z + 31, z, domain='ZZ') >>> f(2, 5, 7) 45 """ return f.eval(values) def half_gcdex(f, g, auto=True): """ Half extended Euclidean algorithm of ``f`` and ``g``. Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15 >>> g = x**3 + x**2 - 4*x - 4 >>> Poly(f).half_gcdex(Poly(g)) (Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ')) """ dom, per, F, G = f._unify(g) if auto and dom.is_Ring: F, G = F.to_field(), G.to_field() if hasattr(f.rep, 'half_gcdex'): s, h = F.half_gcdex(G) else: # pragma: no cover raise OperationNotSupported(f, 'half_gcdex') return per(s), per(h) def gcdex(f, g, auto=True): """ Extended Euclidean algorithm of ``f`` and ``g``. Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15 >>> g = x**3 + x**2 - 4*x - 4 >>> Poly(f).gcdex(Poly(g)) (Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'), Poly(x + 1, x, domain='QQ')) """ dom, per, F, G = f._unify(g) if auto and dom.is_Ring: F, G = F.to_field(), G.to_field() if hasattr(f.rep, 'gcdex'): s, t, h = F.gcdex(G) else: # pragma: no cover raise OperationNotSupported(f, 'gcdex') return per(s), per(t), per(h) def invert(f, g, auto=True): """ Invert ``f`` modulo ``g`` when possible. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x)) Poly(-4/3, x, domain='QQ') >>> Poly(x**2 - 1, x).invert(Poly(x - 1, x)) Traceback (most recent call last): ... NotInvertible: zero divisor """ dom, per, F, G = f._unify(g) if auto and dom.is_Ring: F, G = F.to_field(), G.to_field() if hasattr(f.rep, 'invert'): result = F.invert(G) else: # pragma: no cover raise OperationNotSupported(f, 'invert') return per(result) def revert(f, n): """ Compute ``f**(-1)`` mod ``x**n``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(1, x).revert(2) Poly(1, x, domain='ZZ') >>> Poly(1 + x, x).revert(1) Poly(1, x, domain='ZZ') >>> Poly(x**2 - 1, x).revert(1) Traceback (most recent call last): ... NotReversible: only unity is reversible in a ring >>> Poly(1/x, x).revert(1) Traceback (most recent call last): ... PolynomialError: 1/x contains an element of the generators set """ if hasattr(f.rep, 'revert'): result = f.rep.revert(int(n)) else: # pragma: no cover raise OperationNotSupported(f, 'revert') return f.per(result) def subresultants(f, g): """ Computes the subresultant PRS of ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x)) [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'), Poly(-2, x, domain='ZZ')] """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'subresultants'): result = F.subresultants(G) else: # pragma: no cover raise OperationNotSupported(f, 'subresultants') return list(map(per, result)) def resultant(f, g, includePRS=False): """ Computes the resultant of ``f`` and ``g`` via PRS. If includePRS=True, it includes the subresultant PRS in the result. Because the PRS is used to calculate the resultant, this is more efficient than calling :func:`subresultants` separately. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = Poly(x**2 + 1, x) >>> f.resultant(Poly(x**2 - 1, x)) 4 >>> f.resultant(Poly(x**2 - 1, x), includePRS=True) (4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'), Poly(-2, x, domain='ZZ')]) """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'resultant'): if includePRS: result, R = F.resultant(G, includePRS=includePRS) else: result = F.resultant(G) else: # pragma: no cover raise OperationNotSupported(f, 'resultant') if includePRS: return (per(result, remove=0), list(map(per, R))) return per(result, remove=0) def discriminant(f): """ Computes the discriminant of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + 2*x + 3, x).discriminant() -8 """ if hasattr(f.rep, 'discriminant'): result = f.rep.discriminant() else: # pragma: no cover raise OperationNotSupported(f, 'discriminant') return f.per(result, remove=0) def dispersionset(f, g=None): r"""Compute the *dispersion set* of two polynomials. For two polynomials `f(x)` and `g(x)` with `\deg f > 0` and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as: .. math:: \operatorname{J}(f, g) & := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\ & = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\} For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`. Examples ======== >>> from sympy import poly >>> from sympy.polys.dispersion import dispersion, dispersionset >>> from sympy.abc import x Dispersion set and dispersion of a simple polynomial: >>> fp = poly((x - 3)*(x + 3), x) >>> sorted(dispersionset(fp)) [0, 6] >>> dispersion(fp) 6 Note that the definition of the dispersion is not symmetric: >>> fp = poly(x**4 - 3*x**2 + 1, x) >>> gp = fp.shift(-3) >>> sorted(dispersionset(fp, gp)) [2, 3, 4] >>> dispersion(fp, gp) 4 >>> sorted(dispersionset(gp, fp)) [] >>> dispersion(gp, fp) -oo Computing the dispersion also works over field extensions: >>> from sympy import sqrt >>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>') >>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>') >>> sorted(dispersionset(fp, gp)) [2] >>> sorted(dispersionset(gp, fp)) [1, 4] We can even perform the computations for polynomials having symbolic coefficients: >>> from sympy.abc import a >>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x) >>> sorted(dispersionset(fp)) [0, 1] See Also ======== dispersion References ========== 1. [ManWright94]_ 2. [Koepf98]_ 3. [Abramov71]_ 4. [Man93]_ """ from sympy.polys.dispersion import dispersionset return dispersionset(f, g) def dispersion(f, g=None): r"""Compute the *dispersion* of polynomials. For two polynomials `f(x)` and `g(x)` with `\deg f > 0` and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as: .. math:: \operatorname{dis}(f, g) & := \max\{ J(f,g) \cup \{0\} \} \\ & = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \} and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`. Examples ======== >>> from sympy import poly >>> from sympy.polys.dispersion import dispersion, dispersionset >>> from sympy.abc import x Dispersion set and dispersion of a simple polynomial: >>> fp = poly((x - 3)*(x + 3), x) >>> sorted(dispersionset(fp)) [0, 6] >>> dispersion(fp) 6 Note that the definition of the dispersion is not symmetric: >>> fp = poly(x**4 - 3*x**2 + 1, x) >>> gp = fp.shift(-3) >>> sorted(dispersionset(fp, gp)) [2, 3, 4] >>> dispersion(fp, gp) 4 >>> sorted(dispersionset(gp, fp)) [] >>> dispersion(gp, fp) -oo Computing the dispersion also works over field extensions: >>> from sympy import sqrt >>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>') >>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>') >>> sorted(dispersionset(fp, gp)) [2] >>> sorted(dispersionset(gp, fp)) [1, 4] We can even perform the computations for polynomials having symbolic coefficients: >>> from sympy.abc import a >>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x) >>> sorted(dispersionset(fp)) [0, 1] See Also ======== dispersionset References ========== 1. [ManWright94]_ 2. [Koepf98]_ 3. [Abramov71]_ 4. [Man93]_ """ from sympy.polys.dispersion import dispersion return dispersion(f, g) def cofactors(f, g): """ Returns the GCD of ``f`` and ``g`` and their cofactors. Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and ``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors of ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x)) (Poly(x - 1, x, domain='ZZ'), Poly(x + 1, x, domain='ZZ'), Poly(x - 2, x, domain='ZZ')) """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'cofactors'): h, cff, cfg = F.cofactors(G) else: # pragma: no cover raise OperationNotSupported(f, 'cofactors') return per(h), per(cff), per(cfg) def gcd(f, g): """ Returns the polynomial GCD of ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x)) Poly(x - 1, x, domain='ZZ') """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'gcd'): result = F.gcd(G) else: # pragma: no cover raise OperationNotSupported(f, 'gcd') return per(result) def lcm(f, g): """ Returns polynomial LCM of ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x)) Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ') """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'lcm'): result = F.lcm(G) else: # pragma: no cover raise OperationNotSupported(f, 'lcm') return per(result) def trunc(f, p): """ Reduce ``f`` modulo a constant ``p``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3) Poly(-x**3 - x + 1, x, domain='ZZ') """ p = f.rep.dom.convert(p) if hasattr(f.rep, 'trunc'): result = f.rep.trunc(p) else: # pragma: no cover raise OperationNotSupported(f, 'trunc') return f.per(result) def monic(self, auto=True): """ Divides all coefficients by ``LC(f)``. Examples ======== >>> from sympy import Poly, ZZ >>> from sympy.abc import x >>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic() Poly(x**2 + 2*x + 3, x, domain='QQ') >>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic() Poly(x**2 + 4/3*x + 2/3, x, domain='QQ') """ f = self if auto and f.rep.dom.is_Ring: f = f.to_field() if hasattr(f.rep, 'monic'): result = f.rep.monic() else: # pragma: no cover raise OperationNotSupported(f, 'monic') return f.per(result) def content(f): """ Returns the GCD of polynomial coefficients. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(6*x**2 + 8*x + 12, x).content() 2 """ if hasattr(f.rep, 'content'): result = f.rep.content() else: # pragma: no cover raise OperationNotSupported(f, 'content') return f.rep.dom.to_sympy(result) def primitive(f): """ Returns the content and a primitive form of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x**2 + 8*x + 12, x).primitive() (2, Poly(x**2 + 4*x + 6, x, domain='ZZ')) """ if hasattr(f.rep, 'primitive'): cont, result = f.rep.primitive() else: # pragma: no cover raise OperationNotSupported(f, 'primitive') return f.rep.dom.to_sympy(cont), f.per(result) def compose(f, g): """ Computes the functional composition of ``f`` and ``g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + x, x).compose(Poly(x - 1, x)) Poly(x**2 - x, x, domain='ZZ') """ _, per, F, G = f._unify(g) if hasattr(f.rep, 'compose'): result = F.compose(G) else: # pragma: no cover raise OperationNotSupported(f, 'compose') return per(result) def decompose(f): """ Computes a functional decomposition of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose() [Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')] """ if hasattr(f.rep, 'decompose'): result = f.rep.decompose() else: # pragma: no cover raise OperationNotSupported(f, 'decompose') return list(map(f.per, result)) def shift(f, a): """ Efficiently compute Taylor shift ``f(x + a)``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 2*x + 1, x).shift(2) Poly(x**2 + 2*x + 1, x, domain='ZZ') """ if hasattr(f.rep, 'shift'): result = f.rep.shift(a) else: # pragma: no cover raise OperationNotSupported(f, 'shift') return f.per(result) def transform(f, p, q): """ Efficiently evaluate the functional transformation ``q**n * f(p/q)``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 2*x + 1, x).transform(Poly(x + 1, x), Poly(x - 1, x)) Poly(4, x, domain='ZZ') """ P, Q = p.unify(q) F, P = f.unify(P) F, Q = F.unify(Q) if hasattr(F.rep, 'transform'): result = F.rep.transform(P.rep, Q.rep) else: # pragma: no cover raise OperationNotSupported(F, 'transform') return F.per(result) def sturm(self, auto=True): """ Computes the Sturm sequence of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 - 2*x**2 + x - 3, x).sturm() [Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'), Poly(3*x**2 - 4*x + 1, x, domain='QQ'), Poly(2/9*x + 25/9, x, domain='QQ'), Poly(-2079/4, x, domain='QQ')] """ f = self if auto and f.rep.dom.is_Ring: f = f.to_field() if hasattr(f.rep, 'sturm'): result = f.rep.sturm() else: # pragma: no cover raise OperationNotSupported(f, 'sturm') return list(map(f.per, result)) def gff_list(f): """ Computes greatest factorial factorization of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = x**5 + 2*x**4 - x**3 - 2*x**2 >>> Poly(f).gff_list() [(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)] """ if hasattr(f.rep, 'gff_list'): result = f.rep.gff_list() else: # pragma: no cover raise OperationNotSupported(f, 'gff_list') return [(f.per(g), k) for g, k in result] def norm(f): """ Computes the product, ``Norm(f)``, of the conjugates of a polynomial ``f`` defined over a number field ``K``. Examples ======== >>> from sympy import Poly, sqrt >>> from sympy.abc import x >>> a, b = sqrt(2), sqrt(3) A polynomial over a quadratic extension. Two conjugates x - a and x + a. >>> f = Poly(x - a, x, extension=a) >>> f.norm() Poly(x**2 - 2, x, domain='QQ') A polynomial over a quartic extension. Four conjugates x - a, x - a, x + a and x + a. >>> f = Poly(x - a, x, extension=(a, b)) >>> f.norm() Poly(x**4 - 4*x**2 + 4, x, domain='QQ') """ if hasattr(f.rep, 'norm'): r = f.rep.norm() else: # pragma: no cover raise OperationNotSupported(f, 'norm') return f.per(r) def sqf_norm(f): """ Computes square-free norm of ``f``. Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and ``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``, where ``a`` is the algebraic extension of the ground domain. Examples ======== >>> from sympy import Poly, sqrt >>> from sympy.abc import x >>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm() >>> s 1 >>> f Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>') >>> r Poly(x**4 - 4*x**2 + 16, x, domain='QQ') """ if hasattr(f.rep, 'sqf_norm'): s, g, r = f.rep.sqf_norm() else: # pragma: no cover raise OperationNotSupported(f, 'sqf_norm') return s, f.per(g), f.per(r) def sqf_part(f): """ Computes square-free part of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**3 - 3*x - 2, x).sqf_part() Poly(x**2 - x - 2, x, domain='ZZ') """ if hasattr(f.rep, 'sqf_part'): result = f.rep.sqf_part() else: # pragma: no cover raise OperationNotSupported(f, 'sqf_part') return f.per(result) def sqf_list(f, all=False): """ Returns a list of square-free factors of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16 >>> Poly(f).sqf_list() (2, [(Poly(x + 1, x, domain='ZZ'), 2), (Poly(x + 2, x, domain='ZZ'), 3)]) >>> Poly(f).sqf_list(all=True) (2, [(Poly(1, x, domain='ZZ'), 1), (Poly(x + 1, x, domain='ZZ'), 2), (Poly(x + 2, x, domain='ZZ'), 3)]) """ if hasattr(f.rep, 'sqf_list'): coeff, factors = f.rep.sqf_list(all) else: # pragma: no cover raise OperationNotSupported(f, 'sqf_list') return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors] def sqf_list_include(f, all=False): """ Returns a list of square-free factors of ``f``. Examples ======== >>> from sympy import Poly, expand >>> from sympy.abc import x >>> f = expand(2*(x + 1)**3*x**4) >>> f 2*x**7 + 6*x**6 + 6*x**5 + 2*x**4 >>> Poly(f).sqf_list_include() [(Poly(2, x, domain='ZZ'), 1), (Poly(x + 1, x, domain='ZZ'), 3), (Poly(x, x, domain='ZZ'), 4)] >>> Poly(f).sqf_list_include(all=True) [(Poly(2, x, domain='ZZ'), 1), (Poly(1, x, domain='ZZ'), 2), (Poly(x + 1, x, domain='ZZ'), 3), (Poly(x, x, domain='ZZ'), 4)] """ if hasattr(f.rep, 'sqf_list_include'): factors = f.rep.sqf_list_include(all) else: # pragma: no cover raise OperationNotSupported(f, 'sqf_list_include') return [(f.per(g), k) for g, k in factors] def factor_list(f): """ Returns a list of irreducible factors of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y >>> Poly(f).factor_list() (2, [(Poly(x + y, x, y, domain='ZZ'), 1), (Poly(x**2 + 1, x, y, domain='ZZ'), 2)]) """ if hasattr(f.rep, 'factor_list'): try: coeff, factors = f.rep.factor_list() except DomainError: return S.One, [(f, 1)] else: # pragma: no cover raise OperationNotSupported(f, 'factor_list') return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors] def factor_list_include(f): """ Returns a list of irreducible factors of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y >>> Poly(f).factor_list_include() [(Poly(2*x + 2*y, x, y, domain='ZZ'), 1), (Poly(x**2 + 1, x, y, domain='ZZ'), 2)] """ if hasattr(f.rep, 'factor_list_include'): try: factors = f.rep.factor_list_include() except DomainError: return [(f, 1)] else: # pragma: no cover raise OperationNotSupported(f, 'factor_list_include') return [(f.per(g), k) for g, k in factors] def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False): """ Compute isolating intervals for roots of ``f``. For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used. References ========== .. [#] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005. .. [#] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 3, x).intervals() [((-2, -1), 1), ((1, 2), 1)] >>> Poly(x**2 - 3, x).intervals(eps=1e-2) [((-26/15, -19/11), 1), ((19/11, 26/15), 1)] """ if eps is not None: eps = QQ.convert(eps) if eps <= 0: raise ValueError("'eps' must be a positive rational") if inf is not None: inf = QQ.convert(inf) if sup is not None: sup = QQ.convert(sup) if hasattr(f.rep, 'intervals'): result = f.rep.intervals( all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf) else: # pragma: no cover raise OperationNotSupported(f, 'intervals') if sqf: def _real(interval): s, t = interval return (QQ.to_sympy(s), QQ.to_sympy(t)) if not all: return list(map(_real, result)) def _complex(rectangle): (u, v), (s, t) = rectangle return (QQ.to_sympy(u) + I*QQ.to_sympy(v), QQ.to_sympy(s) + I*QQ.to_sympy(t)) real_part, complex_part = result return list(map(_real, real_part)), list(map(_complex, complex_part)) else: def _real(interval): (s, t), k = interval return ((QQ.to_sympy(s), QQ.to_sympy(t)), k) if not all: return list(map(_real, result)) def _complex(rectangle): ((u, v), (s, t)), k = rectangle return ((QQ.to_sympy(u) + I*QQ.to_sympy(v), QQ.to_sympy(s) + I*QQ.to_sympy(t)), k) real_part, complex_part = result return list(map(_real, real_part)), list(map(_complex, complex_part)) def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False): """ Refine an isolating interval of a root to the given precision. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2) (19/11, 26/15) """ if check_sqf and not f.is_sqf: raise PolynomialError("only square-free polynomials supported") s, t = QQ.convert(s), QQ.convert(t) if eps is not None: eps = QQ.convert(eps) if eps <= 0: raise ValueError("'eps' must be a positive rational") if steps is not None: steps = int(steps) elif eps is None: steps = 1 if hasattr(f.rep, 'refine_root'): S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast) else: # pragma: no cover raise OperationNotSupported(f, 'refine_root') return QQ.to_sympy(S), QQ.to_sympy(T) def count_roots(f, inf=None, sup=None): """ Return the number of roots of ``f`` in ``[inf, sup]`` interval. Examples ======== >>> from sympy import Poly, I >>> from sympy.abc import x >>> Poly(x**4 - 4, x).count_roots(-3, 3) 2 >>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I) 1 """ inf_real, sup_real = True, True if inf is not None: inf = sympify(inf) if inf is S.NegativeInfinity: inf = None else: re, im = inf.as_real_imag() if not im: inf = QQ.convert(inf) else: inf, inf_real = list(map(QQ.convert, (re, im))), False if sup is not None: sup = sympify(sup) if sup is S.Infinity: sup = None else: re, im = sup.as_real_imag() if not im: sup = QQ.convert(sup) else: sup, sup_real = list(map(QQ.convert, (re, im))), False if inf_real and sup_real: if hasattr(f.rep, 'count_real_roots'): count = f.rep.count_real_roots(inf=inf, sup=sup) else: # pragma: no cover raise OperationNotSupported(f, 'count_real_roots') else: if inf_real and inf is not None: inf = (inf, QQ.zero) if sup_real and sup is not None: sup = (sup, QQ.zero) if hasattr(f.rep, 'count_complex_roots'): count = f.rep.count_complex_roots(inf=inf, sup=sup) else: # pragma: no cover raise OperationNotSupported(f, 'count_complex_roots') return Integer(count) def root(f, index, radicals=True): """ Get an indexed root of a polynomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4) >>> f.root(0) -1/2 >>> f.root(1) 2 >>> f.root(2) 2 >>> f.root(3) Traceback (most recent call last): ... IndexError: root index out of [-3, 2] range, got 3 >>> Poly(x**5 + x + 1).root(0) CRootOf(x**3 - x**2 + 1, 0) """ return sympy.polys.rootoftools.rootof(f, index, radicals=radicals) def real_roots(f, multiple=True, radicals=True): """ Return a list of real roots with multiplicities. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots() [-1/2, 2, 2] >>> Poly(x**3 + x + 1).real_roots() [CRootOf(x**3 + x + 1, 0)] """ reals = sympy.polys.rootoftools.CRootOf.real_roots(f, radicals=radicals) if multiple: return reals else: return group(reals, multiple=False) def all_roots(f, multiple=True, radicals=True): """ Return a list of real and complex roots with multiplicities. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots() [-1/2, 2, 2] >>> Poly(x**3 + x + 1).all_roots() [CRootOf(x**3 + x + 1, 0), CRootOf(x**3 + x + 1, 1), CRootOf(x**3 + x + 1, 2)] """ roots = sympy.polys.rootoftools.CRootOf.all_roots(f, radicals=radicals) if multiple: return roots else: return group(roots, multiple=False) def nroots(f, n=15, maxsteps=50, cleanup=True): """ Compute numerical approximations of roots of ``f``. Parameters ========== n ... the number of digits to calculate maxsteps ... the maximum number of iterations to do If the accuracy `n` cannot be reached in `maxsteps`, it will raise an exception. You need to rerun with higher maxsteps. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 3).nroots(n=15) [-1.73205080756888, 1.73205080756888] >>> Poly(x**2 - 3).nroots(n=30) [-1.73205080756887729352744634151, 1.73205080756887729352744634151] """ from sympy.functions.elementary.complexes import sign if f.is_multivariate: raise MultivariatePolynomialError( "can't compute numerical roots of %s" % f) if f.degree() <= 0: return [] # For integer and rational coefficients, convert them to integers only # (for accuracy). Otherwise just try to convert the coefficients to # mpmath.mpc and raise an exception if the conversion fails. if f.rep.dom is ZZ: coeffs = [int(coeff) for coeff in f.all_coeffs()] elif f.rep.dom is QQ: denoms = [coeff.q for coeff in f.all_coeffs()] from sympy.core.numbers import ilcm fac = ilcm(*denoms) coeffs = [int(coeff*fac) for coeff in f.all_coeffs()] else: coeffs = [coeff.evalf(n=n).as_real_imag() for coeff in f.all_coeffs()] try: coeffs = [mpmath.mpc(*coeff) for coeff in coeffs] except TypeError: raise DomainError("Numerical domain expected, got %s" % \ f.rep.dom) dps = mpmath.mp.dps mpmath.mp.dps = n try: # We need to add extra precision to guard against losing accuracy. # 10 times the degree of the polynomial seems to work well. roots = mpmath.polyroots(coeffs, maxsteps=maxsteps, cleanup=cleanup, error=False, extraprec=f.degree()*10) # Mpmath puts real roots first, then complex ones (as does all_roots) # so we make sure this convention holds here, too. roots = list(map(sympify, sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, abs(r.imag), sign(r.imag))))) except NoConvergence: raise NoConvergence( 'convergence to root failed; try n < %s or maxsteps > %s' % ( n, maxsteps)) finally: mpmath.mp.dps = dps return roots def ground_roots(f): """ Compute roots of ``f`` by factorization in the ground domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots() {0: 2, 1: 2} """ if f.is_multivariate: raise MultivariatePolynomialError( "can't compute ground roots of %s" % f) roots = {} for factor, k in f.factor_list()[1]: if factor.is_linear: a, b = factor.all_coeffs() roots[-b/a] = k return roots def nth_power_roots_poly(f, n): """ Construct a polynomial with n-th powers of roots of ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = Poly(x**4 - x**2 + 1) >>> f.nth_power_roots_poly(2) Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ') >>> f.nth_power_roots_poly(3) Poly(x**4 + 2*x**2 + 1, x, domain='ZZ') >>> f.nth_power_roots_poly(4) Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ') >>> f.nth_power_roots_poly(12) Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ') """ if f.is_multivariate: raise MultivariatePolynomialError( "must be a univariate polynomial") N = sympify(n) if N.is_Integer and N >= 1: n = int(N) else: raise ValueError("'n' must an integer and n >= 1, got %s" % n) x = f.gen t = Dummy('t') r = f.resultant(f.__class__.from_expr(x**n - t, x, t)) return r.replace(t, x) def cancel(f, g, include=False): """ Cancel common factors in a rational function ``f/g``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x)) (1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ')) >>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True) (Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ')) """ dom, per, F, G = f._unify(g) if hasattr(F, 'cancel'): result = F.cancel(G, include=include) else: # pragma: no cover raise OperationNotSupported(f, 'cancel') if not include: if dom.has_assoc_Ring: dom = dom.get_ring() cp, cq, p, q = result cp = dom.to_sympy(cp) cq = dom.to_sympy(cq) return cp/cq, per(p), per(q) else: return tuple(map(per, result)) @property def is_zero(f): """ Returns ``True`` if ``f`` is a zero polynomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(0, x).is_zero True >>> Poly(1, x).is_zero False """ return f.rep.is_zero @property def is_one(f): """ Returns ``True`` if ``f`` is a unit polynomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(0, x).is_one False >>> Poly(1, x).is_one True """ return f.rep.is_one @property def is_sqf(f): """ Returns ``True`` if ``f`` is a square-free polynomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 - 2*x + 1, x).is_sqf False >>> Poly(x**2 - 1, x).is_sqf True """ return f.rep.is_sqf @property def is_monic(f): """ Returns ``True`` if the leading coefficient of ``f`` is one. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x + 2, x).is_monic True >>> Poly(2*x + 2, x).is_monic False """ return f.rep.is_monic @property def is_primitive(f): """ Returns ``True`` if GCD of the coefficients of ``f`` is one. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(2*x**2 + 6*x + 12, x).is_primitive False >>> Poly(x**2 + 3*x + 6, x).is_primitive True """ return f.rep.is_primitive @property def is_ground(f): """ Returns ``True`` if ``f`` is an element of the ground domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x, x).is_ground False >>> Poly(2, x).is_ground True >>> Poly(y, x).is_ground True """ return f.rep.is_ground @property def is_linear(f): """ Returns ``True`` if ``f`` is linear in all its variables. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x + y + 2, x, y).is_linear True >>> Poly(x*y + 2, x, y).is_linear False """ return f.rep.is_linear @property def is_quadratic(f): """ Returns ``True`` if ``f`` is quadratic in all its variables. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x*y + 2, x, y).is_quadratic True >>> Poly(x*y**2 + 2, x, y).is_quadratic False """ return f.rep.is_quadratic @property def is_monomial(f): """ Returns ``True`` if ``f`` is zero or has only one term. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(3*x**2, x).is_monomial True >>> Poly(3*x**2 + 1, x).is_monomial False """ return f.rep.is_monomial @property def is_homogeneous(f): """ Returns ``True`` if ``f`` is a homogeneous polynomial. A homogeneous polynomial is a polynomial whose all monomials with non-zero coefficients have the same total degree. If you want not only to check if a polynomial is homogeneous but also compute its homogeneous order, then use :func:`Poly.homogeneous_order`. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + x*y, x, y).is_homogeneous True >>> Poly(x**3 + x*y, x, y).is_homogeneous False """ return f.rep.is_homogeneous @property def is_irreducible(f): """ Returns ``True`` if ``f`` has no factors over its domain. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible True >>> Poly(x**2 + 1, x, modulus=2).is_irreducible False """ return f.rep.is_irreducible @property def is_univariate(f): """ Returns ``True`` if ``f`` is a univariate polynomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + x + 1, x).is_univariate True >>> Poly(x*y**2 + x*y + 1, x, y).is_univariate False >>> Poly(x*y**2 + x*y + 1, x).is_univariate True >>> Poly(x**2 + x + 1, x, y).is_univariate False """ return len(f.gens) == 1 @property def is_multivariate(f): """ Returns ``True`` if ``f`` is a multivariate polynomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> Poly(x**2 + x + 1, x).is_multivariate False >>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate True >>> Poly(x*y**2 + x*y + 1, x).is_multivariate False >>> Poly(x**2 + x + 1, x, y).is_multivariate True """ return len(f.gens) != 1 @property def is_cyclotomic(f): """ Returns ``True`` if ``f`` is a cyclotomic polnomial. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x >>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1 >>> Poly(f).is_cyclotomic False >>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1 >>> Poly(g).is_cyclotomic True """ return f.rep.is_cyclotomic def __abs__(f): return f.abs() def __neg__(f): return f.neg() @_polifyit def __add__(f, g): return f.add(g) @_polifyit def __radd__(f, g): return g.add(f) @_polifyit def __sub__(f, g): return f.sub(g) @_polifyit def __rsub__(f, g): return g.sub(f) @_polifyit def __mul__(f, g): return f.mul(g) @_polifyit def __rmul__(f, g): return g.mul(f) @_sympifyit('n', NotImplemented) def __pow__(f, n): if n.is_Integer and n >= 0: return f.pow(n) else: return NotImplemented @_polifyit def __divmod__(f, g): return f.div(g) @_polifyit def __rdivmod__(f, g): return g.div(f) @_polifyit def __mod__(f, g): return f.rem(g) @_polifyit def __rmod__(f, g): return g.rem(f) @_polifyit def __floordiv__(f, g): return f.quo(g) @_polifyit def __rfloordiv__(f, g): return g.quo(f) @_sympifyit('g', NotImplemented) def __truediv__(f, g): return f.as_expr()/g.as_expr() @_sympifyit('g', NotImplemented) def __rtruediv__(f, g): return g.as_expr()/f.as_expr() @_sympifyit('other', NotImplemented) def __eq__(self, other): f, g = self, other if not g.is_Poly: try: g = f.__class__(g, f.gens, domain=f.get_domain()) except (PolynomialError, DomainError, CoercionFailed): return False if f.gens != g.gens: return False if f.rep.dom != g.rep.dom: return False return f.rep == g.rep @_sympifyit('g', NotImplemented) def __ne__(f, g): return not f == g def __bool__(f): return not f.is_zero def eq(f, g, strict=False): if not strict: return f == g else: return f._strict_eq(sympify(g)) def ne(f, g, strict=False): return not f.eq(g, strict=strict) def _strict_eq(f, g): return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True) @public class PurePoly(Poly): """Class for representing pure polynomials. """ def _hashable_content(self): """Allow SymPy to hash Poly instances. """ return (self.rep,) def __hash__(self): return super().__hash__() @property def free_symbols(self): """ Free symbols of a polynomial. Examples ======== >>> from sympy import PurePoly >>> from sympy.abc import x, y >>> PurePoly(x**2 + 1).free_symbols set() >>> PurePoly(x**2 + y).free_symbols set() >>> PurePoly(x**2 + y, x).free_symbols {y} """ return self.free_symbols_in_domain @_sympifyit('other', NotImplemented) def __eq__(self, other): f, g = self, other if not g.is_Poly: try: g = f.__class__(g, f.gens, domain=f.get_domain()) except (PolynomialError, DomainError, CoercionFailed): return False if len(f.gens) != len(g.gens): return False if f.rep.dom != g.rep.dom: try: dom = f.rep.dom.unify(g.rep.dom, f.gens) except UnificationFailed: return False f = f.set_domain(dom) g = g.set_domain(dom) return f.rep == g.rep def _strict_eq(f, g): return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True) def _unify(f, g): g = sympify(g) if not g.is_Poly: try: return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g)) except CoercionFailed: raise UnificationFailed("can't unify %s with %s" % (f, g)) if len(f.gens) != len(g.gens): raise UnificationFailed("can't unify %s with %s" % (f, g)) if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)): raise UnificationFailed("can't unify %s with %s" % (f, g)) cls = f.__class__ gens = f.gens dom = f.rep.dom.unify(g.rep.dom, gens) F = f.rep.convert(dom) G = g.rep.convert(dom) def per(rep, dom=dom, gens=gens, remove=None): if remove is not None: gens = gens[:remove] + gens[remove + 1:] if not gens: return dom.to_sympy(rep) return cls.new(rep, *gens) return dom, per, F, G @public def poly_from_expr(expr, *gens, **args): """Construct a polynomial from an expression. """ opt = options.build_options(gens, args) return _poly_from_expr(expr, opt) def _poly_from_expr(expr, opt): """Construct a polynomial from an expression. """ orig, expr = expr, sympify(expr) if not isinstance(expr, Basic): raise PolificationFailed(opt, orig, expr) elif expr.is_Poly: poly = expr.__class__._from_poly(expr, opt) opt.gens = poly.gens opt.domain = poly.domain if opt.polys is None: opt.polys = True return poly, opt elif opt.expand: expr = expr.expand() rep, opt = _dict_from_expr(expr, opt) if not opt.gens: raise PolificationFailed(opt, orig, expr) monoms, coeffs = list(zip(*list(rep.items()))) domain = opt.domain if domain is None: opt.domain, coeffs = construct_domain(coeffs, opt=opt) else: coeffs = list(map(domain.from_sympy, coeffs)) rep = dict(list(zip(monoms, coeffs))) poly = Poly._from_dict(rep, opt) if opt.polys is None: opt.polys = False return poly, opt @public def parallel_poly_from_expr(exprs, *gens, **args): """Construct polynomials from expressions. """ opt = options.build_options(gens, args) return _parallel_poly_from_expr(exprs, opt) def _parallel_poly_from_expr(exprs, opt): """Construct polynomials from expressions. """ from sympy.functions.elementary.piecewise import Piecewise if len(exprs) == 2: f, g = exprs if isinstance(f, Poly) and isinstance(g, Poly): f = f.__class__._from_poly(f, opt) g = g.__class__._from_poly(g, opt) f, g = f.unify(g) opt.gens = f.gens opt.domain = f.domain if opt.polys is None: opt.polys = True return [f, g], opt origs, exprs = list(exprs), [] _exprs, _polys = [], [] failed = False for i, expr in enumerate(origs): expr = sympify(expr) if isinstance(expr, Basic): if expr.is_Poly: _polys.append(i) else: _exprs.append(i) if opt.expand: expr = expr.expand() else: failed = True exprs.append(expr) if failed: raise PolificationFailed(opt, origs, exprs, True) if _polys: # XXX: this is a temporary solution for i in _polys: exprs[i] = exprs[i].as_expr() reps, opt = _parallel_dict_from_expr(exprs, opt) if not opt.gens: raise PolificationFailed(opt, origs, exprs, True) for k in opt.gens: if isinstance(k, Piecewise): raise PolynomialError("Piecewise generators do not make sense") coeffs_list, lengths = [], [] all_monoms = [] all_coeffs = [] for rep in reps: monoms, coeffs = list(zip(*list(rep.items()))) coeffs_list.extend(coeffs) all_monoms.append(monoms) lengths.append(len(coeffs)) domain = opt.domain if domain is None: opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt) else: coeffs_list = list(map(domain.from_sympy, coeffs_list)) for k in lengths: all_coeffs.append(coeffs_list[:k]) coeffs_list = coeffs_list[k:] polys = [] for monoms, coeffs in zip(all_monoms, all_coeffs): rep = dict(list(zip(monoms, coeffs))) poly = Poly._from_dict(rep, opt) polys.append(poly) if opt.polys is None: opt.polys = bool(_polys) return polys, opt def _update_args(args, key, value): """Add a new ``(key, value)`` pair to arguments ``dict``. """ args = dict(args) if key not in args: args[key] = value return args @public def degree(f, gen=0): """ Return the degree of ``f`` in the given variable. The degree of 0 is negative infinity. Examples ======== >>> from sympy import degree >>> from sympy.abc import x, y >>> degree(x**2 + y*x + 1, gen=x) 2 >>> degree(x**2 + y*x + 1, gen=y) 1 >>> degree(0, x) -oo See also ======== sympy.polys.polytools.Poly.total_degree degree_list """ f = sympify(f, strict=True) gen_is_Num = sympify(gen, strict=True).is_Number if f.is_Poly: p = f isNum = p.as_expr().is_Number else: isNum = f.is_Number if not isNum: if gen_is_Num: p, _ = poly_from_expr(f) else: p, _ = poly_from_expr(f, gen) if isNum: return S.Zero if f else S.NegativeInfinity if not gen_is_Num: if f.is_Poly and gen not in p.gens: # try recast without explicit gens p, _ = poly_from_expr(f.as_expr()) if gen not in p.gens: return S.Zero elif not f.is_Poly and len(f.free_symbols) > 1: raise TypeError(filldedent(''' A symbolic generator of interest is required for a multivariate expression like func = %s, e.g. degree(func, gen = %s) instead of degree(func, gen = %s). ''' % (f, next(ordered(f.free_symbols)), gen))) result = p.degree(gen) return Integer(result) if isinstance(result, int) else S.NegativeInfinity @public def total_degree(f, *gens): """ Return the total_degree of ``f`` in the given variables. Examples ======== >>> from sympy import total_degree, Poly >>> from sympy.abc import x, y >>> total_degree(1) 0 >>> total_degree(x + x*y) 2 >>> total_degree(x + x*y, x) 1 If the expression is a Poly and no variables are given then the generators of the Poly will be used: >>> p = Poly(x + x*y, y) >>> total_degree(p) 1 To deal with the underlying expression of the Poly, convert it to an Expr: >>> total_degree(p.as_expr()) 2 This is done automatically if any variables are given: >>> total_degree(p, x) 1 See also ======== degree """ p = sympify(f) if p.is_Poly: p = p.as_expr() if p.is_Number: rv = 0 else: if f.is_Poly: gens = gens or f.gens rv = Poly(p, gens).total_degree() return Integer(rv) @public def degree_list(f, *gens, **args): """ Return a list of degrees of ``f`` in all variables. Examples ======== >>> from sympy import degree_list >>> from sympy.abc import x, y >>> degree_list(x**2 + y*x + 1) (2, 1) """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('degree_list', 1, exc) degrees = F.degree_list() return tuple(map(Integer, degrees)) @public def LC(f, *gens, **args): """ Return the leading coefficient of ``f``. Examples ======== >>> from sympy import LC >>> from sympy.abc import x, y >>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y) 4 """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('LC', 1, exc) return F.LC(order=opt.order) @public def LM(f, *gens, **args): """ Return the leading monomial of ``f``. Examples ======== >>> from sympy import LM >>> from sympy.abc import x, y >>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y) x**2 """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('LM', 1, exc) monom = F.LM(order=opt.order) return monom.as_expr() @public def LT(f, *gens, **args): """ Return the leading term of ``f``. Examples ======== >>> from sympy import LT >>> from sympy.abc import x, y >>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y) 4*x**2 """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('LT', 1, exc) monom, coeff = F.LT(order=opt.order) return coeff*monom.as_expr() @public def pdiv(f, g, *gens, **args): """ Compute polynomial pseudo-division of ``f`` and ``g``. Examples ======== >>> from sympy import pdiv >>> from sympy.abc import x >>> pdiv(x**2 + 1, 2*x - 4) (2*x + 4, 20) """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('pdiv', 2, exc) q, r = F.pdiv(G) if not opt.polys: return q.as_expr(), r.as_expr() else: return q, r @public def prem(f, g, *gens, **args): """ Compute polynomial pseudo-remainder of ``f`` and ``g``. Examples ======== >>> from sympy import prem >>> from sympy.abc import x >>> prem(x**2 + 1, 2*x - 4) 20 """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('prem', 2, exc) r = F.prem(G) if not opt.polys: return r.as_expr() else: return r @public def pquo(f, g, *gens, **args): """ Compute polynomial pseudo-quotient of ``f`` and ``g``. Examples ======== >>> from sympy import pquo >>> from sympy.abc import x >>> pquo(x**2 + 1, 2*x - 4) 2*x + 4 >>> pquo(x**2 - 1, 2*x - 1) 2*x + 1 """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('pquo', 2, exc) try: q = F.pquo(G) except ExactQuotientFailed: raise ExactQuotientFailed(f, g) if not opt.polys: return q.as_expr() else: return q @public def pexquo(f, g, *gens, **args): """ Compute polynomial exact pseudo-quotient of ``f`` and ``g``. Examples ======== >>> from sympy import pexquo >>> from sympy.abc import x >>> pexquo(x**2 - 1, 2*x - 2) 2*x + 2 >>> pexquo(x**2 + 1, 2*x - 4) Traceback (most recent call last): ... ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1 """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('pexquo', 2, exc) q = F.pexquo(G) if not opt.polys: return q.as_expr() else: return q @public def div(f, g, *gens, **args): """ Compute polynomial division of ``f`` and ``g``. Examples ======== >>> from sympy import div, ZZ, QQ >>> from sympy.abc import x >>> div(x**2 + 1, 2*x - 4, domain=ZZ) (0, x**2 + 1) >>> div(x**2 + 1, 2*x - 4, domain=QQ) (x/2 + 1, 5) """ options.allowed_flags(args, ['auto', 'polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('div', 2, exc) q, r = F.div(G, auto=opt.auto) if not opt.polys: return q.as_expr(), r.as_expr() else: return q, r @public def rem(f, g, *gens, **args): """ Compute polynomial remainder of ``f`` and ``g``. Examples ======== >>> from sympy import rem, ZZ, QQ >>> from sympy.abc import x >>> rem(x**2 + 1, 2*x - 4, domain=ZZ) x**2 + 1 >>> rem(x**2 + 1, 2*x - 4, domain=QQ) 5 """ options.allowed_flags(args, ['auto', 'polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('rem', 2, exc) r = F.rem(G, auto=opt.auto) if not opt.polys: return r.as_expr() else: return r @public def quo(f, g, *gens, **args): """ Compute polynomial quotient of ``f`` and ``g``. Examples ======== >>> from sympy import quo >>> from sympy.abc import x >>> quo(x**2 + 1, 2*x - 4) x/2 + 1 >>> quo(x**2 - 1, x - 1) x + 1 """ options.allowed_flags(args, ['auto', 'polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('quo', 2, exc) q = F.quo(G, auto=opt.auto) if not opt.polys: return q.as_expr() else: return q @public def exquo(f, g, *gens, **args): """ Compute polynomial exact quotient of ``f`` and ``g``. Examples ======== >>> from sympy import exquo >>> from sympy.abc import x >>> exquo(x**2 - 1, x - 1) x + 1 >>> exquo(x**2 + 1, 2*x - 4) Traceback (most recent call last): ... ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1 """ options.allowed_flags(args, ['auto', 'polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('exquo', 2, exc) q = F.exquo(G, auto=opt.auto) if not opt.polys: return q.as_expr() else: return q @public def half_gcdex(f, g, *gens, **args): """ Half extended Euclidean algorithm of ``f`` and ``g``. Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``. Examples ======== >>> from sympy import half_gcdex >>> from sympy.abc import x >>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4) (3/5 - x/5, x + 1) """ options.allowed_flags(args, ['auto', 'polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: domain, (a, b) = construct_domain(exc.exprs) try: s, h = domain.half_gcdex(a, b) except NotImplementedError: raise ComputationFailed('half_gcdex', 2, exc) else: return domain.to_sympy(s), domain.to_sympy(h) s, h = F.half_gcdex(G, auto=opt.auto) if not opt.polys: return s.as_expr(), h.as_expr() else: return s, h @public def gcdex(f, g, *gens, **args): """ Extended Euclidean algorithm of ``f`` and ``g``. Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``. Examples ======== >>> from sympy import gcdex >>> from sympy.abc import x >>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4) (3/5 - x/5, x**2/5 - 6*x/5 + 2, x + 1) """ options.allowed_flags(args, ['auto', 'polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: domain, (a, b) = construct_domain(exc.exprs) try: s, t, h = domain.gcdex(a, b) except NotImplementedError: raise ComputationFailed('gcdex', 2, exc) else: return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h) s, t, h = F.gcdex(G, auto=opt.auto) if not opt.polys: return s.as_expr(), t.as_expr(), h.as_expr() else: return s, t, h @public def invert(f, g, *gens, **args): """ Invert ``f`` modulo ``g`` when possible. Examples ======== >>> from sympy import invert, S >>> from sympy.core.numbers import mod_inverse >>> from sympy.abc import x >>> invert(x**2 - 1, 2*x - 1) -4/3 >>> invert(x**2 - 1, x - 1) Traceback (most recent call last): ... NotInvertible: zero divisor For more efficient inversion of Rationals, use the :obj:`~.mod_inverse` function: >>> mod_inverse(3, 5) 2 >>> (S(2)/5).invert(S(7)/3) 5/2 See Also ======== sympy.core.numbers.mod_inverse """ options.allowed_flags(args, ['auto', 'polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: domain, (a, b) = construct_domain(exc.exprs) try: return domain.to_sympy(domain.invert(a, b)) except NotImplementedError: raise ComputationFailed('invert', 2, exc) h = F.invert(G, auto=opt.auto) if not opt.polys: return h.as_expr() else: return h @public def subresultants(f, g, *gens, **args): """ Compute subresultant PRS of ``f`` and ``g``. Examples ======== >>> from sympy import subresultants >>> from sympy.abc import x >>> subresultants(x**2 + 1, x**2 - 1) [x**2 + 1, x**2 - 1, -2] """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('subresultants', 2, exc) result = F.subresultants(G) if not opt.polys: return [r.as_expr() for r in result] else: return result @public def resultant(f, g, *gens, includePRS=False, **args): """ Compute resultant of ``f`` and ``g``. Examples ======== >>> from sympy import resultant >>> from sympy.abc import x >>> resultant(x**2 + 1, x**2 - 1) 4 """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('resultant', 2, exc) if includePRS: result, R = F.resultant(G, includePRS=includePRS) else: result = F.resultant(G) if not opt.polys: if includePRS: return result.as_expr(), [r.as_expr() for r in R] return result.as_expr() else: if includePRS: return result, R return result @public def discriminant(f, *gens, **args): """ Compute discriminant of ``f``. Examples ======== >>> from sympy import discriminant >>> from sympy.abc import x >>> discriminant(x**2 + 2*x + 3) -8 """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('discriminant', 1, exc) result = F.discriminant() if not opt.polys: return result.as_expr() else: return result @public def cofactors(f, g, *gens, **args): """ Compute GCD and cofactors of ``f`` and ``g``. Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and ``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors of ``f`` and ``g``. Examples ======== >>> from sympy import cofactors >>> from sympy.abc import x >>> cofactors(x**2 - 1, x**2 - 3*x + 2) (x - 1, x + 1, x - 2) """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: domain, (a, b) = construct_domain(exc.exprs) try: h, cff, cfg = domain.cofactors(a, b) except NotImplementedError: raise ComputationFailed('cofactors', 2, exc) else: return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg) h, cff, cfg = F.cofactors(G) if not opt.polys: return h.as_expr(), cff.as_expr(), cfg.as_expr() else: return h, cff, cfg @public def gcd_list(seq, *gens, **args): """ Compute GCD of a list of polynomials. Examples ======== >>> from sympy import gcd_list >>> from sympy.abc import x >>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2]) x - 1 """ seq = sympify(seq) def try_non_polynomial_gcd(seq): if not gens and not args: domain, numbers = construct_domain(seq) if not numbers: return domain.zero elif domain.is_Numerical: result, numbers = numbers[0], numbers[1:] for number in numbers: result = domain.gcd(result, number) if domain.is_one(result): break return domain.to_sympy(result) return None result = try_non_polynomial_gcd(seq) if result is not None: return result options.allowed_flags(args, ['polys']) try: polys, opt = parallel_poly_from_expr(seq, *gens, **args) # gcd for domain Q[irrational] (purely algebraic irrational) if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq): a = seq[-1] lst = [ (a/elt).ratsimp() for elt in seq[:-1] ] if all(frc.is_rational for frc in lst): lc = 1 for frc in lst: lc = lcm(lc, frc.as_numer_denom()[0]) # abs ensures that the gcd is always non-negative return abs(a/lc) except PolificationFailed as exc: result = try_non_polynomial_gcd(exc.exprs) if result is not None: return result else: raise ComputationFailed('gcd_list', len(seq), exc) if not polys: if not opt.polys: return S.Zero else: return Poly(0, opt=opt) result, polys = polys[0], polys[1:] for poly in polys: result = result.gcd(poly) if result.is_one: break if not opt.polys: return result.as_expr() else: return result @public def gcd(f, g=None, *gens, **args): """ Compute GCD of ``f`` and ``g``. Examples ======== >>> from sympy import gcd >>> from sympy.abc import x >>> gcd(x**2 - 1, x**2 - 3*x + 2) x - 1 """ if hasattr(f, '__iter__'): if g is not None: gens = (g,) + gens return gcd_list(f, *gens, **args) elif g is None: raise TypeError("gcd() takes 2 arguments or a sequence of arguments") options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) # gcd for domain Q[irrational] (purely algebraic irrational) a, b = map(sympify, (f, g)) if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational: frc = (a/b).ratsimp() if frc.is_rational: # abs ensures that the returned gcd is always non-negative return abs(a/frc.as_numer_denom()[0]) except PolificationFailed as exc: domain, (a, b) = construct_domain(exc.exprs) try: return domain.to_sympy(domain.gcd(a, b)) except NotImplementedError: raise ComputationFailed('gcd', 2, exc) result = F.gcd(G) if not opt.polys: return result.as_expr() else: return result @public def lcm_list(seq, *gens, **args): """ Compute LCM of a list of polynomials. Examples ======== >>> from sympy import lcm_list >>> from sympy.abc import x >>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2]) x**5 - x**4 - 2*x**3 - x**2 + x + 2 """ seq = sympify(seq) def try_non_polynomial_lcm(seq): if not gens and not args: domain, numbers = construct_domain(seq) if not numbers: return domain.one elif domain.is_Numerical: result, numbers = numbers[0], numbers[1:] for number in numbers: result = domain.lcm(result, number) return domain.to_sympy(result) return None result = try_non_polynomial_lcm(seq) if result is not None: return result options.allowed_flags(args, ['polys']) try: polys, opt = parallel_poly_from_expr(seq, *gens, **args) # lcm for domain Q[irrational] (purely algebraic irrational) if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq): a = seq[-1] lst = [ (a/elt).ratsimp() for elt in seq[:-1] ] if all(frc.is_rational for frc in lst): lc = 1 for frc in lst: lc = lcm(lc, frc.as_numer_denom()[1]) return a*lc except PolificationFailed as exc: result = try_non_polynomial_lcm(exc.exprs) if result is not None: return result else: raise ComputationFailed('lcm_list', len(seq), exc) if not polys: if not opt.polys: return S.One else: return Poly(1, opt=opt) result, polys = polys[0], polys[1:] for poly in polys: result = result.lcm(poly) if not opt.polys: return result.as_expr() else: return result @public def lcm(f, g=None, *gens, **args): """ Compute LCM of ``f`` and ``g``. Examples ======== >>> from sympy import lcm >>> from sympy.abc import x >>> lcm(x**2 - 1, x**2 - 3*x + 2) x**3 - 2*x**2 - x + 2 """ if hasattr(f, '__iter__'): if g is not None: gens = (g,) + gens return lcm_list(f, *gens, **args) elif g is None: raise TypeError("lcm() takes 2 arguments or a sequence of arguments") options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) # lcm for domain Q[irrational] (purely algebraic irrational) a, b = map(sympify, (f, g)) if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational: frc = (a/b).ratsimp() if frc.is_rational: return a*frc.as_numer_denom()[1] except PolificationFailed as exc: domain, (a, b) = construct_domain(exc.exprs) try: return domain.to_sympy(domain.lcm(a, b)) except NotImplementedError: raise ComputationFailed('lcm', 2, exc) result = F.lcm(G) if not opt.polys: return result.as_expr() else: return result @public def terms_gcd(f, *gens, **args): """ Remove GCD of terms from ``f``. If the ``deep`` flag is True, then the arguments of ``f`` will have terms_gcd applied to them. If a fraction is factored out of ``f`` and ``f`` is an Add, then an unevaluated Mul will be returned so that automatic simplification does not redistribute it. The hint ``clear``, when set to False, can be used to prevent such factoring when all coefficients are not fractions. Examples ======== >>> from sympy import terms_gcd, cos >>> from sympy.abc import x, y >>> terms_gcd(x**6*y**2 + x**3*y, x, y) x**3*y*(x**3*y + 1) The default action of polys routines is to expand the expression given to them. terms_gcd follows this behavior: >>> terms_gcd((3+3*x)*(x+x*y)) 3*x*(x*y + x + y + 1) If this is not desired then the hint ``expand`` can be set to False. In this case the expression will be treated as though it were comprised of one or more terms: >>> terms_gcd((3+3*x)*(x+x*y), expand=False) (3*x + 3)*(x*y + x) In order to traverse factors of a Mul or the arguments of other functions, the ``deep`` hint can be used: >>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True) 3*x*(x + 1)*(y + 1) >>> terms_gcd(cos(x + x*y), deep=True) cos(x*(y + 1)) Rationals are factored out by default: >>> terms_gcd(x + y/2) (2*x + y)/2 Only the y-term had a coefficient that was a fraction; if one does not want to factor out the 1/2 in cases like this, the flag ``clear`` can be set to False: >>> terms_gcd(x + y/2, clear=False) x + y/2 >>> terms_gcd(x*y/2 + y**2, clear=False) y*(x/2 + y) The ``clear`` flag is ignored if all coefficients are fractions: >>> terms_gcd(x/3 + y/2, clear=False) (2*x + 3*y)/6 See Also ======== sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms """ from sympy.core.relational import Equality orig = sympify(f) if isinstance(f, Equality): return Equality(*(terms_gcd(s, *gens, **args) for s in [f.lhs, f.rhs])) elif isinstance(f, Relational): raise TypeError("Inequalities can not be used with terms_gcd. Found: %s" %(f,)) if not isinstance(f, Expr) or f.is_Atom: return orig if args.get('deep', False): new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args]) args.pop('deep') args['expand'] = False return terms_gcd(new, *gens, **args) clear = args.pop('clear', True) options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: return exc.expr J, f = F.terms_gcd() if opt.domain.is_Ring: if opt.domain.is_Field: denom, f = f.clear_denoms(convert=True) coeff, f = f.primitive() if opt.domain.is_Field: coeff /= denom else: coeff = S.One term = Mul(*[x**j for x, j in zip(f.gens, J)]) if coeff == 1: coeff = S.One if term == 1: return orig if clear: return _keep_coeff(coeff, term*f.as_expr()) # base the clearing on the form of the original expression, not # the (perhaps) Mul that we have now coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul() return _keep_coeff(coeff, term*f, clear=False) @public def trunc(f, p, *gens, **args): """ Reduce ``f`` modulo a constant ``p``. Examples ======== >>> from sympy import trunc >>> from sympy.abc import x >>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3) -x**3 - x + 1 """ options.allowed_flags(args, ['auto', 'polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('trunc', 1, exc) result = F.trunc(sympify(p)) if not opt.polys: return result.as_expr() else: return result @public def monic(f, *gens, **args): """ Divide all coefficients of ``f`` by ``LC(f)``. Examples ======== >>> from sympy import monic >>> from sympy.abc import x >>> monic(3*x**2 + 4*x + 2) x**2 + 4*x/3 + 2/3 """ options.allowed_flags(args, ['auto', 'polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('monic', 1, exc) result = F.monic(auto=opt.auto) if not opt.polys: return result.as_expr() else: return result @public def content(f, *gens, **args): """ Compute GCD of coefficients of ``f``. Examples ======== >>> from sympy import content >>> from sympy.abc import x >>> content(6*x**2 + 8*x + 12) 2 """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('content', 1, exc) return F.content() @public def primitive(f, *gens, **args): """ Compute content and the primitive form of ``f``. Examples ======== >>> from sympy.polys.polytools import primitive >>> from sympy.abc import x >>> primitive(6*x**2 + 8*x + 12) (2, 3*x**2 + 4*x + 6) >>> eq = (2 + 2*x)*x + 2 Expansion is performed by default: >>> primitive(eq) (2, x**2 + x + 1) Set ``expand`` to False to shut this off. Note that the extraction will not be recursive; use the as_content_primitive method for recursive, non-destructive Rational extraction. >>> primitive(eq, expand=False) (1, x*(2*x + 2) + 2) >>> eq.as_content_primitive() (2, x*(x + 1) + 1) """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('primitive', 1, exc) cont, result = F.primitive() if not opt.polys: return cont, result.as_expr() else: return cont, result @public def compose(f, g, *gens, **args): """ Compute functional composition ``f(g)``. Examples ======== >>> from sympy import compose >>> from sympy.abc import x >>> compose(x**2 + x, x - 1) x**2 - x """ options.allowed_flags(args, ['polys']) try: (F, G), opt = parallel_poly_from_expr((f, g), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('compose', 2, exc) result = F.compose(G) if not opt.polys: return result.as_expr() else: return result @public def decompose(f, *gens, **args): """ Compute functional decomposition of ``f``. Examples ======== >>> from sympy import decompose >>> from sympy.abc import x >>> decompose(x**4 + 2*x**3 - x - 1) [x**2 - x - 1, x**2 + x] """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('decompose', 1, exc) result = F.decompose() if not opt.polys: return [r.as_expr() for r in result] else: return result @public def sturm(f, *gens, **args): """ Compute Sturm sequence of ``f``. Examples ======== >>> from sympy import sturm >>> from sympy.abc import x >>> sturm(x**3 - 2*x**2 + x - 3) [x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4] """ options.allowed_flags(args, ['auto', 'polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('sturm', 1, exc) result = F.sturm(auto=opt.auto) if not opt.polys: return [r.as_expr() for r in result] else: return result @public def gff_list(f, *gens, **args): """ Compute a list of greatest factorial factors of ``f``. Note that the input to ff() and rf() should be Poly instances to use the definitions here. Examples ======== >>> from sympy import gff_list, ff, Poly >>> from sympy.abc import x >>> f = Poly(x**5 + 2*x**4 - x**3 - 2*x**2, x) >>> gff_list(f) [(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)] >>> (ff(Poly(x), 1)*ff(Poly(x + 2), 4)) == f True >>> f = Poly(x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \ 1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x, x) >>> gff_list(f) [(Poly(x**3 + 7, x, domain='ZZ'), 2), (Poly(x**2 + 5*x, x, domain='ZZ'), 3)] >>> ff(Poly(x**3 + 7, x), 2)*ff(Poly(x**2 + 5*x, x), 3) == f True """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('gff_list', 1, exc) factors = F.gff_list() if not opt.polys: return [(g.as_expr(), k) for g, k in factors] else: return factors @public def gff(f, *gens, **args): """Compute greatest factorial factorization of ``f``. """ raise NotImplementedError('symbolic falling factorial') @public def sqf_norm(f, *gens, **args): """ Compute square-free norm of ``f``. Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and ``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``, where ``a`` is the algebraic extension of the ground domain. Examples ======== >>> from sympy import sqf_norm, sqrt >>> from sympy.abc import x >>> sqf_norm(x**2 + 1, extension=[sqrt(3)]) (1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16) """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('sqf_norm', 1, exc) s, g, r = F.sqf_norm() if not opt.polys: return Integer(s), g.as_expr(), r.as_expr() else: return Integer(s), g, r @public def sqf_part(f, *gens, **args): """ Compute square-free part of ``f``. Examples ======== >>> from sympy import sqf_part >>> from sympy.abc import x >>> sqf_part(x**3 - 3*x - 2) x**2 - x - 2 """ options.allowed_flags(args, ['polys']) try: F, opt = poly_from_expr(f, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('sqf_part', 1, exc) result = F.sqf_part() if not opt.polys: return result.as_expr() else: return result def _sorted_factors(factors, method): """Sort a list of ``(expr, exp)`` pairs. """ if method == 'sqf': def key(obj): poly, exp = obj rep = poly.rep.rep return (exp, len(rep), len(poly.gens), rep) else: def key(obj): poly, exp = obj rep = poly.rep.rep return (len(rep), len(poly.gens), exp, rep) return sorted(factors, key=key) def _factors_product(factors): """Multiply a list of ``(expr, exp)`` pairs. """ return Mul(*[f.as_expr()**k for f, k in factors]) def _symbolic_factor_list(expr, opt, method): """Helper function for :func:`_symbolic_factor`. """ coeff, factors = S.One, [] args = [i._eval_factor() if hasattr(i, '_eval_factor') else i for i in Mul.make_args(expr)] for arg in args: if arg.is_Number or (isinstance(arg, Expr) and pure_complex(arg)): coeff *= arg continue elif arg.is_Pow: base, exp = arg.args if base.is_Number and exp.is_Number: coeff *= arg continue if base.is_Number: factors.append((base, exp)) continue else: base, exp = arg, S.One try: poly, _ = _poly_from_expr(base, opt) except PolificationFailed as exc: factors.append((exc.expr, exp)) else: func = getattr(poly, method + '_list') _coeff, _factors = func() if _coeff is not S.One: if exp.is_Integer: coeff *= _coeff**exp elif _coeff.is_positive: factors.append((_coeff, exp)) else: _factors.append((_coeff, S.One)) if exp is S.One: factors.extend(_factors) elif exp.is_integer: factors.extend([(f, k*exp) for f, k in _factors]) else: other = [] for f, k in _factors: if f.as_expr().is_positive: factors.append((f, k*exp)) else: other.append((f, k)) factors.append((_factors_product(other), exp)) if method == 'sqf': factors = [(reduce(mul, (f for f, _ in factors if _ == k)), k) for k in {i for _, i in factors}] return coeff, factors def _symbolic_factor(expr, opt, method): """Helper function for :func:`_factor`. """ if isinstance(expr, Expr): if hasattr(expr,'_eval_factor'): return expr._eval_factor() coeff, factors = _symbolic_factor_list(together(expr, fraction=opt['fraction']), opt, method) return _keep_coeff(coeff, _factors_product(factors)) elif hasattr(expr, 'args'): return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args]) elif hasattr(expr, '__iter__'): return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr]) else: return expr def _generic_factor_list(expr, gens, args, method): """Helper function for :func:`sqf_list` and :func:`factor_list`. """ options.allowed_flags(args, ['frac', 'polys']) opt = options.build_options(gens, args) expr = sympify(expr) if isinstance(expr, (Expr, Poly)): if isinstance(expr, Poly): numer, denom = expr, 1 else: numer, denom = together(expr).as_numer_denom() cp, fp = _symbolic_factor_list(numer, opt, method) cq, fq = _symbolic_factor_list(denom, opt, method) if fq and not opt.frac: raise PolynomialError("a polynomial expected, got %s" % expr) _opt = opt.clone(dict(expand=True)) for factors in (fp, fq): for i, (f, k) in enumerate(factors): if not f.is_Poly: f, _ = _poly_from_expr(f, _opt) factors[i] = (f, k) fp = _sorted_factors(fp, method) fq = _sorted_factors(fq, method) if not opt.polys: fp = [(f.as_expr(), k) for f, k in fp] fq = [(f.as_expr(), k) for f, k in fq] coeff = cp/cq if not opt.frac: return coeff, fp else: return coeff, fp, fq else: raise PolynomialError("a polynomial expected, got %s" % expr) def _generic_factor(expr, gens, args, method): """Helper function for :func:`sqf` and :func:`factor`. """ fraction = args.pop('fraction', True) options.allowed_flags(args, []) opt = options.build_options(gens, args) opt['fraction'] = fraction return _symbolic_factor(sympify(expr), opt, method) def to_rational_coeffs(f): """ try to transform a polynomial to have rational coefficients try to find a transformation ``x = alpha*y`` ``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with rational coefficients, ``lc`` the leading coefficient. If this fails, try ``x = y + beta`` ``f(x) = g(y)`` Returns ``None`` if ``g`` not found; ``(lc, alpha, None, g)`` in case of rescaling ``(None, None, beta, g)`` in case of translation Notes ===== Currently it transforms only polynomials without roots larger than 2. Examples ======== >>> from sympy import sqrt, Poly, simplify >>> from sympy.polys.polytools import to_rational_coeffs >>> from sympy.abc import x >>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX') >>> lc, r, _, g = to_rational_coeffs(p) >>> lc, r (7 + 5*sqrt(2), 2 - 2*sqrt(2)) >>> g Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ') >>> r1 = simplify(1/r) >>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p True """ from sympy.simplify.simplify import simplify def _try_rescale(f, f1=None): """ try rescaling ``x -> alpha*x`` to convert f to a polynomial with rational coefficients. Returns ``alpha, f``; if the rescaling is successful, ``alpha`` is the rescaling factor, and ``f`` is the rescaled polynomial; else ``alpha`` is ``None``. """ from sympy.core.add import Add if not len(f.gens) == 1 or not (f.gens[0]).is_Atom: return None, f n = f.degree() lc = f.LC() f1 = f1 or f1.monic() coeffs = f1.all_coeffs()[1:] coeffs = [simplify(coeffx) for coeffx in coeffs] if coeffs[-2]: rescale1_x = simplify(coeffs[-2]/coeffs[-1]) coeffs1 = [] for i in range(len(coeffs)): coeffx = simplify(coeffs[i]*rescale1_x**(i + 1)) if not coeffx.is_rational: break coeffs1.append(coeffx) else: rescale_x = simplify(1/rescale1_x) x = f.gens[0] v = [x**n] for i in range(1, n + 1): v.append(coeffs1[i - 1]*x**(n - i)) f = Add(*v) f = Poly(f) return lc, rescale_x, f return None def _try_translate(f, f1=None): """ try translating ``x -> x + alpha`` to convert f to a polynomial with rational coefficients. Returns ``alpha, f``; if the translating is successful, ``alpha`` is the translating factor, and ``f`` is the shifted polynomial; else ``alpha`` is ``None``. """ from sympy.core.add import Add if not len(f.gens) == 1 or not (f.gens[0]).is_Atom: return None, f n = f.degree() f1 = f1 or f1.monic() coeffs = f1.all_coeffs()[1:] c = simplify(coeffs[0]) if c and not c.is_rational: func = Add if c.is_Add: args = c.args func = c.func else: args = [c] c1, c2 = sift(args, lambda z: z.is_rational, binary=True) alpha = -func(*c2)/n f2 = f1.shift(alpha) return alpha, f2 return None def _has_square_roots(p): """ Return True if ``f`` is a sum with square roots but no other root """ from sympy.core.exprtools import Factors coeffs = p.coeffs() has_sq = False for y in coeffs: for x in Add.make_args(y): f = Factors(x).factors r = [wx.q for b, wx in f.items() if b.is_number and wx.is_Rational and wx.q >= 2] if not r: continue if min(r) == 2: has_sq = True if max(r) > 2: return False return has_sq if f.get_domain().is_EX and _has_square_roots(f): f1 = f.monic() r = _try_rescale(f, f1) if r: return r[0], r[1], None, r[2] else: r = _try_translate(f, f1) if r: return None, None, r[0], r[1] return None def _torational_factor_list(p, x): """ helper function to factor polynomial using to_rational_coeffs Examples ======== >>> from sympy.polys.polytools import _torational_factor_list >>> from sympy.abc import x >>> from sympy import sqrt, expand, Mul >>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))})) >>> factors = _torational_factor_list(p, x); factors (-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)]) >>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p True >>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)})) >>> factors = _torational_factor_list(p, x); factors (1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)]) >>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p True """ from sympy.simplify.simplify import simplify p1 = Poly(p, x, domain='EX') n = p1.degree() res = to_rational_coeffs(p1) if not res: return None lc, r, t, g = res factors = factor_list(g.as_expr()) if lc: c = simplify(factors[0]*lc*r**n) r1 = simplify(1/r) a = [] for z in factors[1:][0]: a.append((simplify(z[0].subs({x: x*r1})), z[1])) else: c = factors[0] a = [] for z in factors[1:][0]: a.append((z[0].subs({x: x - t}), z[1])) return (c, a) @public def sqf_list(f, *gens, **args): """ Compute a list of square-free factors of ``f``. Examples ======== >>> from sympy import sqf_list >>> from sympy.abc import x >>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16) (2, [(x + 1, 2), (x + 2, 3)]) """ return _generic_factor_list(f, gens, args, method='sqf') @public def sqf(f, *gens, **args): """ Compute square-free factorization of ``f``. Examples ======== >>> from sympy import sqf >>> from sympy.abc import x >>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16) 2*(x + 1)**2*(x + 2)**3 """ return _generic_factor(f, gens, args, method='sqf') @public def factor_list(f, *gens, **args): """ Compute a list of irreducible factors of ``f``. Examples ======== >>> from sympy import factor_list >>> from sympy.abc import x, y >>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y) (2, [(x + y, 1), (x**2 + 1, 2)]) """ return _generic_factor_list(f, gens, args, method='factor') @public def factor(f, *gens, deep=False, **args): """ Compute the factorization of expression, ``f``, into irreducibles. (To factor an integer into primes, use ``factorint``.) There two modes implemented: symbolic and formal. If ``f`` is not an instance of :class:`Poly` and generators are not specified, then the former mode is used. Otherwise, the formal mode is used. In symbolic mode, :func:`factor` will traverse the expression tree and factor its components without any prior expansion, unless an instance of :class:`~.Add` is encountered (in this case formal factorization is used). This way :func:`factor` can handle large or symbolic exponents. By default, the factorization is computed over the rationals. To factor over other domain, e.g. an algebraic or finite field, use appropriate options: ``extension``, ``modulus`` or ``domain``. Examples ======== >>> from sympy import factor, sqrt, exp >>> from sympy.abc import x, y >>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y) 2*(x + y)*(x**2 + 1)**2 >>> factor(x**2 + 1) x**2 + 1 >>> factor(x**2 + 1, modulus=2) (x + 1)**2 >>> factor(x**2 + 1, gaussian=True) (x - I)*(x + I) >>> factor(x**2 - 2, extension=sqrt(2)) (x - sqrt(2))*(x + sqrt(2)) >>> factor((x**2 - 1)/(x**2 + 4*x + 4)) (x - 1)*(x + 1)/(x + 2)**2 >>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1)) (x + 2)**20000000*(x**2 + 1) By default, factor deals with an expression as a whole: >>> eq = 2**(x**2 + 2*x + 1) >>> factor(eq) 2**(x**2 + 2*x + 1) If the ``deep`` flag is True then subexpressions will be factored: >>> factor(eq, deep=True) 2**((x + 1)**2) If the ``fraction`` flag is False then rational expressions won't be combined. By default it is True. >>> factor(5*x + 3*exp(2 - 7*x), deep=True) (5*x*exp(7*x) + 3*exp(2))*exp(-7*x) >>> factor(5*x + 3*exp(2 - 7*x), deep=True, fraction=False) 5*x + 3*exp(2)*exp(-7*x) See Also ======== sympy.ntheory.factor_.factorint """ f = sympify(f) if deep: from sympy.simplify.simplify import bottom_up def _try_factor(expr): """ Factor, but avoid changing the expression when unable to. """ fac = factor(expr, *gens, **args) if fac.is_Mul or fac.is_Pow: return fac return expr f = bottom_up(f, _try_factor) # clean up any subexpressions that may have been expanded # while factoring out a larger expression partials = {} muladd = f.atoms(Mul, Add) for p in muladd: fac = factor(p, *gens, **args) if (fac.is_Mul or fac.is_Pow) and fac != p: partials[p] = fac return f.xreplace(partials) try: return _generic_factor(f, gens, args, method='factor') except PolynomialError as msg: if not f.is_commutative: from sympy.core.exprtools import factor_nc return factor_nc(f) else: raise PolynomialError(msg) @public def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False): """ Compute isolating intervals for roots of ``f``. Examples ======== >>> from sympy import intervals >>> from sympy.abc import x >>> intervals(x**2 - 3) [((-2, -1), 1), ((1, 2), 1)] >>> intervals(x**2 - 3, eps=1e-2) [((-26/15, -19/11), 1), ((19/11, 26/15), 1)] """ if not hasattr(F, '__iter__'): try: F = Poly(F) except GeneratorsNeeded: return [] return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf) else: polys, opt = parallel_poly_from_expr(F, domain='QQ') if len(opt.gens) > 1: raise MultivariatePolynomialError for i, poly in enumerate(polys): polys[i] = poly.rep.rep if eps is not None: eps = opt.domain.convert(eps) if eps <= 0: raise ValueError("'eps' must be a positive rational") if inf is not None: inf = opt.domain.convert(inf) if sup is not None: sup = opt.domain.convert(sup) intervals = dup_isolate_real_roots_list(polys, opt.domain, eps=eps, inf=inf, sup=sup, strict=strict, fast=fast) result = [] for (s, t), indices in intervals: s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t) result.append(((s, t), indices)) return result @public def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False): """ Refine an isolating interval of a root to the given precision. Examples ======== >>> from sympy import refine_root >>> from sympy.abc import x >>> refine_root(x**2 - 3, 1, 2, eps=1e-2) (19/11, 26/15) """ try: F = Poly(f) if not isinstance(f, Poly) and not F.gen.is_Symbol: # root of sin(x) + 1 is -1 but when someone # passes an Expr instead of Poly they may not expect # that the generator will be sin(x), not x raise PolynomialError("generator must be a Symbol") except GeneratorsNeeded: raise PolynomialError( "can't refine a root of %s, not a polynomial" % f) return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf) @public def count_roots(f, inf=None, sup=None): """ Return the number of roots of ``f`` in ``[inf, sup]`` interval. If one of ``inf`` or ``sup`` is complex, it will return the number of roots in the complex rectangle with corners at ``inf`` and ``sup``. Examples ======== >>> from sympy import count_roots, I >>> from sympy.abc import x >>> count_roots(x**4 - 4, -3, 3) 2 >>> count_roots(x**4 - 4, 0, 1 + 3*I) 1 """ try: F = Poly(f, greedy=False) if not isinstance(f, Poly) and not F.gen.is_Symbol: # root of sin(x) + 1 is -1 but when someone # passes an Expr instead of Poly they may not expect # that the generator will be sin(x), not x raise PolynomialError("generator must be a Symbol") except GeneratorsNeeded: raise PolynomialError("can't count roots of %s, not a polynomial" % f) return F.count_roots(inf=inf, sup=sup) @public def real_roots(f, multiple=True): """ Return a list of real roots with multiplicities of ``f``. Examples ======== >>> from sympy import real_roots >>> from sympy.abc import x >>> real_roots(2*x**3 - 7*x**2 + 4*x + 4) [-1/2, 2, 2] """ try: F = Poly(f, greedy=False) if not isinstance(f, Poly) and not F.gen.is_Symbol: # root of sin(x) + 1 is -1 but when someone # passes an Expr instead of Poly they may not expect # that the generator will be sin(x), not x raise PolynomialError("generator must be a Symbol") except GeneratorsNeeded: raise PolynomialError( "can't compute real roots of %s, not a polynomial" % f) return F.real_roots(multiple=multiple) @public def nroots(f, n=15, maxsteps=50, cleanup=True): """ Compute numerical approximations of roots of ``f``. Examples ======== >>> from sympy import nroots >>> from sympy.abc import x >>> nroots(x**2 - 3, n=15) [-1.73205080756888, 1.73205080756888] >>> nroots(x**2 - 3, n=30) [-1.73205080756887729352744634151, 1.73205080756887729352744634151] """ try: F = Poly(f, greedy=False) if not isinstance(f, Poly) and not F.gen.is_Symbol: # root of sin(x) + 1 is -1 but when someone # passes an Expr instead of Poly they may not expect # that the generator will be sin(x), not x raise PolynomialError("generator must be a Symbol") except GeneratorsNeeded: raise PolynomialError( "can't compute numerical roots of %s, not a polynomial" % f) return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup) @public def ground_roots(f, *gens, **args): """ Compute roots of ``f`` by factorization in the ground domain. Examples ======== >>> from sympy import ground_roots >>> from sympy.abc import x >>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2) {0: 2, 1: 2} """ options.allowed_flags(args, []) try: F, opt = poly_from_expr(f, *gens, **args) if not isinstance(f, Poly) and not F.gen.is_Symbol: # root of sin(x) + 1 is -1 but when someone # passes an Expr instead of Poly they may not expect # that the generator will be sin(x), not x raise PolynomialError("generator must be a Symbol") except PolificationFailed as exc: raise ComputationFailed('ground_roots', 1, exc) return F.ground_roots() @public def nth_power_roots_poly(f, n, *gens, **args): """ Construct a polynomial with n-th powers of roots of ``f``. Examples ======== >>> from sympy import nth_power_roots_poly, factor, roots >>> from sympy.abc import x >>> f = x**4 - x**2 + 1 >>> g = factor(nth_power_roots_poly(f, 2)) >>> g (x**2 - x + 1)**2 >>> R_f = [ (r**2).expand() for r in roots(f) ] >>> R_g = roots(g).keys() >>> set(R_f) == set(R_g) True """ options.allowed_flags(args, []) try: F, opt = poly_from_expr(f, *gens, **args) if not isinstance(f, Poly) and not F.gen.is_Symbol: # root of sin(x) + 1 is -1 but when someone # passes an Expr instead of Poly they may not expect # that the generator will be sin(x), not x raise PolynomialError("generator must be a Symbol") except PolificationFailed as exc: raise ComputationFailed('nth_power_roots_poly', 1, exc) result = F.nth_power_roots_poly(n) if not opt.polys: return result.as_expr() else: return result @public def cancel(f, *gens, **args): """ Cancel common factors in a rational function ``f``. Examples ======== >>> from sympy import cancel, sqrt, Symbol, together >>> from sympy.abc import x >>> A = Symbol('A', commutative=False) >>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1)) (2*x + 2)/(x - 1) >>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A)) sqrt(6)/2 Note: due to automatic distribution of Rationals, a sum divided by an integer will appear as a sum. To recover a rational form use `together` on the result: >>> cancel(x/2 + 1) x/2 + 1 >>> together(_) (x + 2)/2 """ from sympy.core.exprtools import factor_terms from sympy.functions.elementary.piecewise import Piecewise from sympy.polys.rings import sring options.allowed_flags(args, ['polys']) f = sympify(f) opt = {} if 'polys' in args: opt['polys'] = args['polys'] if not isinstance(f, (tuple, Tuple)): if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr): return f f = factor_terms(f, radical=True) p, q = f.as_numer_denom() elif len(f) == 2: p, q = f if isinstance(p, Poly) and isinstance(q, Poly): opt['gens'] = p.gens opt['domain'] = p.domain opt['polys'] = opt.get('polys', True) p, q = p.as_expr(), q.as_expr() elif isinstance(f, Tuple): return factor_terms(f) else: raise ValueError('unexpected argument: %s' % f) try: if f.has(Piecewise): raise PolynomialError() R, (F, G) = sring((p, q), *gens, **args) if not R.ngens: if not isinstance(f, (tuple, Tuple)): return f.expand() else: return S.One, p, q except PolynomialError as msg: if f.is_commutative and not f.has(Piecewise): raise PolynomialError(msg) # Handling of noncommutative and/or piecewise expressions if f.is_Add or f.is_Mul: c, nc = sift(f.args, lambda x: x.is_commutative is True and not x.has(Piecewise), binary=True) nc = [cancel(i) for i in nc] return f.func(cancel(f.func(*c)), *nc) else: reps = [] pot = preorder_traversal(f) next(pot) for e in pot: # XXX: This should really skip anything that's not Expr. if isinstance(e, (tuple, Tuple, BooleanAtom)): continue try: reps.append((e, cancel(e))) pot.skip() # this was handled successfully except NotImplementedError: pass return f.xreplace(dict(reps)) c, (P, Q) = 1, F.cancel(G) if opt.get('polys', False) and not 'gens' in opt: opt['gens'] = R.symbols if not isinstance(f, (tuple, Tuple)): return c*(P.as_expr()/Q.as_expr()) else: P, Q = P.as_expr(), Q.as_expr() if not opt.get('polys', False): return c, P, Q else: return c, Poly(P, *gens, **opt), Poly(Q, *gens, **opt) @public def reduced(f, G, *gens, **args): """ Reduces a polynomial ``f`` modulo a set of polynomials ``G``. Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``, computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r`` such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r`` is a completely reduced polynomial with respect to ``G``. Examples ======== >>> from sympy import reduced >>> from sympy.abc import x, y >>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y]) ([2*x, 1], x**2 + y**2 + y) """ options.allowed_flags(args, ['polys', 'auto']) try: polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args) except PolificationFailed as exc: raise ComputationFailed('reduced', 0, exc) domain = opt.domain retract = False if opt.auto and domain.is_Ring and not domain.is_Field: opt = opt.clone(dict(domain=domain.get_field())) retract = True from sympy.polys.rings import xring _ring, _ = xring(opt.gens, opt.domain, opt.order) for i, poly in enumerate(polys): poly = poly.set_domain(opt.domain).rep.to_dict() polys[i] = _ring.from_dict(poly) Q, r = polys[0].div(polys[1:]) Q = [Poly._from_dict(dict(q), opt) for q in Q] r = Poly._from_dict(dict(r), opt) if retract: try: _Q, _r = [q.to_ring() for q in Q], r.to_ring() except CoercionFailed: pass else: Q, r = _Q, _r if not opt.polys: return [q.as_expr() for q in Q], r.as_expr() else: return Q, r @public def groebner(F, *gens, **args): """ Computes the reduced Groebner basis for a set of polynomials. Use the ``order`` argument to set the monomial ordering that will be used to compute the basis. Allowed orders are ``lex``, ``grlex`` and ``grevlex``. If no order is specified, it defaults to ``lex``. For more information on Groebner bases, see the references and the docstring of :func:`~.solve_poly_system`. Examples ======== Example taken from [1]. >>> from sympy import groebner >>> from sympy.abc import x, y >>> F = [x*y - 2*y, 2*y**2 - x**2] >>> groebner(F, x, y, order='lex') GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y, domain='ZZ', order='lex') >>> groebner(F, x, y, order='grlex') GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y, domain='ZZ', order='grlex') >>> groebner(F, x, y, order='grevlex') GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y, domain='ZZ', order='grevlex') By default, an improved implementation of the Buchberger algorithm is used. Optionally, an implementation of the F5B algorithm can be used. The algorithm can be set using the ``method`` flag or with the :func:`sympy.polys.polyconfig.setup` function. >>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)] >>> groebner(F, x, y, method='buchberger') GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex') >>> groebner(F, x, y, method='f5b') GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex') References ========== 1. [Buchberger01]_ 2. [Cox97]_ """ return GroebnerBasis(F, *gens, **args) @public def is_zero_dimensional(F, *gens, **args): """ Checks if the ideal generated by a Groebner basis is zero-dimensional. The algorithm checks if the set of monomials not divisible by the leading monomial of any element of ``F`` is bounded. References ========== David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and Algorithms, 3rd edition, p. 230 """ return GroebnerBasis(F, *gens, **args).is_zero_dimensional @public class GroebnerBasis(Basic): """Represents a reduced Groebner basis. """ def __new__(cls, F, *gens, **args): """Compute a reduced Groebner basis for a system of polynomials. """ options.allowed_flags(args, ['polys', 'method']) try: polys, opt = parallel_poly_from_expr(F, *gens, **args) except PolificationFailed as exc: raise ComputationFailed('groebner', len(F), exc) from sympy.polys.rings import PolyRing ring = PolyRing(opt.gens, opt.domain, opt.order) polys = [ring.from_dict(poly.rep.to_dict()) for poly in polys if poly] G = _groebner(polys, ring, method=opt.method) G = [Poly._from_dict(g, opt) for g in G] return cls._new(G, opt) @classmethod def _new(cls, basis, options): obj = Basic.__new__(cls) obj._basis = tuple(basis) obj._options = options return obj @property def args(self): basis = (p.as_expr() for p in self._basis) return (Tuple(*basis), Tuple(*self._options.gens)) @property def exprs(self): return [poly.as_expr() for poly in self._basis] @property def polys(self): return list(self._basis) @property def gens(self): return self._options.gens @property def domain(self): return self._options.domain @property def order(self): return self._options.order def __len__(self): return len(self._basis) def __iter__(self): if self._options.polys: return iter(self.polys) else: return iter(self.exprs) def __getitem__(self, item): if self._options.polys: basis = self.polys else: basis = self.exprs return basis[item] def __hash__(self): return hash((self._basis, tuple(self._options.items()))) def __eq__(self, other): if isinstance(other, self.__class__): return self._basis == other._basis and self._options == other._options elif iterable(other): return self.polys == list(other) or self.exprs == list(other) else: return False def __ne__(self, other): return not self == other @property def is_zero_dimensional(self): """ Checks if the ideal generated by a Groebner basis is zero-dimensional. The algorithm checks if the set of monomials not divisible by the leading monomial of any element of ``F`` is bounded. References ========== David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and Algorithms, 3rd edition, p. 230 """ def single_var(monomial): return sum(map(bool, monomial)) == 1 exponents = Monomial([0]*len(self.gens)) order = self._options.order for poly in self.polys: monomial = poly.LM(order=order) if single_var(monomial): exponents *= monomial # If any element of the exponents vector is zero, then there's # a variable for which there's no degree bound and the ideal # generated by this Groebner basis isn't zero-dimensional. return all(exponents) def fglm(self, order): """ Convert a Groebner basis from one ordering to another. The FGLM algorithm converts reduced Groebner bases of zero-dimensional ideals from one ordering to another. This method is often used when it is infeasible to compute a Groebner basis with respect to a particular ordering directly. Examples ======== >>> from sympy.abc import x, y >>> from sympy import groebner >>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1] >>> G = groebner(F, x, y, order='grlex') >>> list(G.fglm('lex')) [2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7] >>> list(groebner(F, x, y, order='lex')) [2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7] References ========== .. [1] J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient Computation of Zero-dimensional Groebner Bases by Change of Ordering """ opt = self._options src_order = opt.order dst_order = monomial_key(order) if src_order == dst_order: return self if not self.is_zero_dimensional: raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension") polys = list(self._basis) domain = opt.domain opt = opt.clone(dict( domain=domain.get_field(), order=dst_order, )) from sympy.polys.rings import xring _ring, _ = xring(opt.gens, opt.domain, src_order) for i, poly in enumerate(polys): poly = poly.set_domain(opt.domain).rep.to_dict() polys[i] = _ring.from_dict(poly) G = matrix_fglm(polys, _ring, dst_order) G = [Poly._from_dict(dict(g), opt) for g in G] if not domain.is_Field: G = [g.clear_denoms(convert=True)[1] for g in G] opt.domain = domain return self._new(G, opt) def reduce(self, expr, auto=True): """ Reduces a polynomial modulo a Groebner basis. Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``, computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r`` such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r`` is a completely reduced polynomial with respect to ``G``. Examples ======== >>> from sympy import groebner, expand >>> from sympy.abc import x, y >>> f = 2*x**4 - x**2 + y**3 + y**2 >>> G = groebner([x**3 - x, y**3 - y]) >>> G.reduce(f) ([2*x, 1], x**2 + y**2 + y) >>> Q, r = _ >>> expand(sum(q*g for q, g in zip(Q, G)) + r) 2*x**4 - x**2 + y**3 + y**2 >>> _ == f True """ poly = Poly._from_expr(expr, self._options) polys = [poly] + list(self._basis) opt = self._options domain = opt.domain retract = False if auto and domain.is_Ring and not domain.is_Field: opt = opt.clone(dict(domain=domain.get_field())) retract = True from sympy.polys.rings import xring _ring, _ = xring(opt.gens, opt.domain, opt.order) for i, poly in enumerate(polys): poly = poly.set_domain(opt.domain).rep.to_dict() polys[i] = _ring.from_dict(poly) Q, r = polys[0].div(polys[1:]) Q = [Poly._from_dict(dict(q), opt) for q in Q] r = Poly._from_dict(dict(r), opt) if retract: try: _Q, _r = [q.to_ring() for q in Q], r.to_ring() except CoercionFailed: pass else: Q, r = _Q, _r if not opt.polys: return [q.as_expr() for q in Q], r.as_expr() else: return Q, r def contains(self, poly): """ Check if ``poly`` belongs the ideal generated by ``self``. Examples ======== >>> from sympy import groebner >>> from sympy.abc import x, y >>> f = 2*x**3 + y**3 + 3*y >>> G = groebner([x**2 + y**2 - 1, x*y - 2]) >>> G.contains(f) True >>> G.contains(f + 1) False """ return self.reduce(poly)[1] == 0 @public def poly(expr, *gens, **args): """ Efficiently transform an expression into a polynomial. Examples ======== >>> from sympy import poly >>> from sympy.abc import x >>> poly(x*(x**2 + x - 1)**2) Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ') """ options.allowed_flags(args, []) def _poly(expr, opt): terms, poly_terms = [], [] for term in Add.make_args(expr): factors, poly_factors = [], [] for factor in Mul.make_args(term): if factor.is_Add: poly_factors.append(_poly(factor, opt)) elif factor.is_Pow and factor.base.is_Add and \ factor.exp.is_Integer and factor.exp >= 0: poly_factors.append( _poly(factor.base, opt).pow(factor.exp)) else: factors.append(factor) if not poly_factors: terms.append(term) else: product = poly_factors[0] for factor in poly_factors[1:]: product = product.mul(factor) if factors: factor = Mul(*factors) if factor.is_Number: product = product.mul(factor) else: product = product.mul(Poly._from_expr(factor, opt)) poly_terms.append(product) if not poly_terms: result = Poly._from_expr(expr, opt) else: result = poly_terms[0] for term in poly_terms[1:]: result = result.add(term) if terms: term = Add(*terms) if term.is_Number: result = result.add(term) else: result = result.add(Poly._from_expr(term, opt)) return result.reorder(*opt.get('gens', ()), **args) expr = sympify(expr) if expr.is_Poly: return Poly(expr, *gens, **args) if 'expand' not in args: args['expand'] = False opt = options.build_options(gens, args) return _poly(expr, opt)
a5e95f2722ba5efe6302846a5aea3d30e4b8cce28f9b85db322d4b438b8e2936
"""Algorithms for computing symbolic roots of polynomials. """ import math from functools import reduce from sympy.core import S, I, pi from sympy.core.compatibility import ordered from sympy.core.exprtools import factor_terms from sympy.core.function import _mexpand from sympy.core.logic import fuzzy_not from sympy.core.mul import expand_2arg, Mul from sympy.core.numbers import Rational, igcd, comp from sympy.core.power import Pow from sympy.core.relational import Eq from sympy.core.symbol import Dummy, Symbol, symbols from sympy.core.sympify import sympify from sympy.functions import exp, sqrt, im, cos, acos, Piecewise from sympy.functions.elementary.miscellaneous import root from sympy.ntheory import divisors, isprime, nextprime from sympy.polys.domains import EX from sympy.polys.polyerrors import (PolynomialError, GeneratorsNeeded, DomainError) from sympy.polys.polyquinticconst import PolyQuintic from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant from sympy.polys.rationaltools import together from sympy.polys.specialpolys import cyclotomic_poly from sympy.simplify import simplify, powsimp from sympy.utilities import public def roots_linear(f): """Returns a list of roots of a linear polynomial.""" r = -f.nth(0)/f.nth(1) dom = f.get_domain() if not dom.is_Numerical: if dom.is_Composite: r = factor(r) else: r = simplify(r) return [r] def roots_quadratic(f): """Returns a list of roots of a quadratic polynomial. If the domain is ZZ then the roots will be sorted with negatives coming before positives. The ordering will be the same for any numerical coefficients as long as the assumptions tested are correct, otherwise the ordering will not be sorted (but will be canonical). """ a, b, c = f.all_coeffs() dom = f.get_domain() def _sqrt(d): # remove squares from square root since both will be represented # in the results; a similar thing is happening in roots() but # must be duplicated here because not all quadratics are binomials co = [] other = [] for di in Mul.make_args(d): if di.is_Pow and di.exp.is_Integer and di.exp % 2 == 0: co.append(Pow(di.base, di.exp//2)) else: other.append(di) if co: d = Mul(*other) co = Mul(*co) return co*sqrt(d) return sqrt(d) def _simplify(expr): if dom.is_Composite: return factor(expr) else: return simplify(expr) if c is S.Zero: r0, r1 = S.Zero, -b/a if not dom.is_Numerical: r1 = _simplify(r1) elif r1.is_negative: r0, r1 = r1, r0 elif b is S.Zero: r = -c/a if not dom.is_Numerical: r = _simplify(r) R = _sqrt(r) r0 = -R r1 = R else: d = b**2 - 4*a*c A = 2*a B = -b/A if not dom.is_Numerical: d = _simplify(d) B = _simplify(B) D = factor_terms(_sqrt(d)/A) r0 = B - D r1 = B + D if a.is_negative: r0, r1 = r1, r0 elif not dom.is_Numerical: r0, r1 = [expand_2arg(i) for i in (r0, r1)] return [r0, r1] def roots_cubic(f, trig=False): """Returns a list of roots of a cubic polynomial. References ========== [1] https://en.wikipedia.org/wiki/Cubic_function, General formula for roots, (accessed November 17, 2014). """ if trig: a, b, c, d = f.all_coeffs() p = (3*a*c - b**2)/3/a**2 q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3) D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2 if (D > 0) == True: rv = [] for k in range(3): rv.append(2*sqrt(-p/3)*cos(acos(q/p*sqrt(-3/p)*Rational(3, 2))/3 - k*pi*Rational(2, 3))) return [i - b/3/a for i in rv] _, a, b, c = f.monic().all_coeffs() if c is S.Zero: x1, x2 = roots([1, a, b], multiple=True) return [x1, S.Zero, x2] p = b - a**2/3 q = c - a*b/3 + 2*a**3/27 pon3 = p/3 aon3 = a/3 u1 = None if p is S.Zero: if q is S.Zero: return [-aon3]*3 if q.is_real: if q.is_positive: u1 = -root(q, 3) elif q.is_negative: u1 = root(-q, 3) elif q is S.Zero: y1, y2 = roots([1, 0, p], multiple=True) return [tmp - aon3 for tmp in [y1, S.Zero, y2]] elif q.is_real and q.is_negative: u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3) coeff = I*sqrt(3)/2 if u1 is None: u1 = S.One u2 = Rational(-1, 2) + coeff u3 = Rational(-1, 2) - coeff a, b, c, d = S(1), a, b, c D0 = b**2 - 3*a*c D1 = 2*b**3 - 9*a*b*c + 27*a**2*d C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3) return [-(b + uk*C + D0/C/uk)/3/a for uk in [u1, u2, u3]] u2 = u1*(Rational(-1, 2) + coeff) u3 = u1*(Rational(-1, 2) - coeff) if p is S.Zero: return [u1 - aon3, u2 - aon3, u3 - aon3] soln = [ -u1 + pon3/u1 - aon3, -u2 + pon3/u2 - aon3, -u3 + pon3/u3 - aon3 ] return soln def _roots_quartic_euler(p, q, r, a): """ Descartes-Euler solution of the quartic equation Parameters ========== p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r`` a: shift of the roots Notes ===== This is a helper function for ``roots_quartic``. Look for solutions of the form :: ``x1 = sqrt(R) - sqrt(A + B*sqrt(R))`` ``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))`` ``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))`` ``x4 = sqrt(R) + sqrt(A + B*sqrt(R))`` To satisfy the quartic equation one must have ``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R`` so that ``R`` must satisfy the Descartes-Euler resolvent equation ``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0`` If the resolvent does not have a rational solution, return None; in that case it is likely that the Ferrari method gives a simpler solution. Examples ======== >>> from sympy import S >>> from sympy.polys.polyroots import _roots_quartic_euler >>> p, q, r = -S(64)/5, -S(512)/125, -S(1024)/3125 >>> _roots_quartic_euler(p, q, r, S(0))[0] -sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5 """ # solve the resolvent equation x = Dummy('x') eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2 xsols = list(roots(Poly(eq, x), cubics=False).keys()) xsols = [sol for sol in xsols if sol.is_rational and sol.is_nonzero] if not xsols: return None R = max(xsols) c1 = sqrt(R) B = -q*c1/(4*R) A = -R - p/2 c2 = sqrt(A + B) c3 = sqrt(A - B) return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a] def roots_quartic(f): r""" Returns a list of roots of a quartic polynomial. There are many references for solving quartic expressions available [1-5]. This reviewer has found that many of them require one to select from among 2 or more possible sets of solutions and that some solutions work when one is searching for real roots but don't work when searching for complex roots (though this is not always stated clearly). The following routine has been tested and found to be correct for 0, 2 or 4 complex roots. The quasisymmetric case solution [6] looks for quartics that have the form `x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`. Although no general solution that is always applicable for all coefficients is known to this reviewer, certain conditions are tested to determine the simplest 4 expressions that can be returned: 1) `f = c + a*(a**2/8 - b/2) == 0` 2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0` 3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then a) `p == 0` b) `p != 0` Examples ======== >>> from sympy import Poly >>> from sympy.polys.polyroots import roots_quartic >>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20')) >>> # 4 complex roots: 1+-I*sqrt(3), 2+-I >>> sorted(str(tmp.evalf(n=2)) for tmp in r) ['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I'] References ========== 1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html 2. https://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method 3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html 4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf 5. http://www.albmath.org/files/Math_5713.pdf 6. http://www.statemaster.com/encyclopedia/Quartic-equation 7. eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf """ _, a, b, c, d = f.monic().all_coeffs() if not d: return [S.Zero] + roots([1, a, b, c], multiple=True) elif (c/a)**2 == d: x, m = f.gen, c/a g = Poly(x**2 + a*x + b - 2*m, x) z1, z2 = roots_quadratic(g) h1 = Poly(x**2 - z1*x + m, x) h2 = Poly(x**2 - z2*x + m, x) r1 = roots_quadratic(h1) r2 = roots_quadratic(h2) return r1 + r2 else: a2 = a**2 e = b - 3*a2/8 f = _mexpand(c + a*(a2/8 - b/2)) g = _mexpand(d - a*(a*(3*a2/256 - b/16) + c/4)) aon4 = a/4 if f is S.Zero: y1, y2 = [sqrt(tmp) for tmp in roots([1, e, g], multiple=True)] return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]] if g is S.Zero: y = [S.Zero] + roots([1, 0, e, f], multiple=True) return [tmp - aon4 for tmp in y] else: # Descartes-Euler method, see [7] sols = _roots_quartic_euler(e, f, g, aon4) if sols: return sols # Ferrari method, see [1, 2] a2 = a**2 e = b - 3*a2/8 f = c + a*(a2/8 - b/2) g = d - a*(a*(3*a2/256 - b/16) + c/4) p = -e**2/12 - g q = -e**3/108 + e*g/3 - f**2/8 TH = Rational(1, 3) def _ans(y): w = sqrt(e + 2*y) arg1 = 3*e + 2*y arg2 = 2*f/w ans = [] for s in [-1, 1]: root = sqrt(-(arg1 + s*arg2)) for t in [-1, 1]: ans.append((s*w - t*root)/2 - aon4) return ans # p == 0 case y1 = e*Rational(-5, 6) - q**TH if p.is_zero: return _ans(y1) # if p != 0 then u below is not 0 root = sqrt(q**2/4 + p**3/27) r = -q/2 + root # or -q/2 - root u = r**TH # primary root of solve(x**3 - r, x) y2 = e*Rational(-5, 6) + u - p/u/3 if fuzzy_not(p.is_zero): return _ans(y2) # sort it out once they know the values of the coefficients return [Piecewise((a1, Eq(p, 0)), (a2, True)) for a1, a2 in zip(_ans(y1), _ans(y2))] def roots_binomial(f): """Returns a list of roots of a binomial polynomial. If the domain is ZZ then the roots will be sorted with negatives coming before positives. The ordering will be the same for any numerical coefficients as long as the assumptions tested are correct, otherwise the ordering will not be sorted (but will be canonical). """ n = f.degree() a, b = f.nth(n), f.nth(0) base = -cancel(b/a) alpha = root(base, n) if alpha.is_number: alpha = alpha.expand(complex=True) # define some parameters that will allow us to order the roots. # If the domain is ZZ this is guaranteed to return roots sorted # with reals before non-real roots and non-real sorted according # to real part and imaginary part, e.g. -1, 1, -1 + I, 2 - I neg = base.is_negative even = n % 2 == 0 if neg: if even == True and (base + 1).is_positive: big = True else: big = False # get the indices in the right order so the computed # roots will be sorted when the domain is ZZ ks = [] imax = n//2 if even: ks.append(imax) imax -= 1 if not neg: ks.append(0) for i in range(imax, 0, -1): if neg: ks.extend([i, -i]) else: ks.extend([-i, i]) if neg: ks.append(0) if big: for i in range(0, len(ks), 2): pair = ks[i: i + 2] pair = list(reversed(pair)) # compute the roots roots, d = [], 2*I*pi/n for k in ks: zeta = exp(k*d).expand(complex=True) roots.append((alpha*zeta).expand(power_base=False)) return roots def _inv_totient_estimate(m): """ Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``. Examples ======== >>> from sympy.polys.polyroots import _inv_totient_estimate >>> _inv_totient_estimate(192) (192, 840) >>> _inv_totient_estimate(400) (400, 1750) """ primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ] a, b = 1, 1 for p in primes: a *= p b *= p - 1 L = m U = int(math.ceil(m*(float(a)/b))) P = p = 2 primes = [] while P <= U: p = nextprime(p) primes.append(p) P *= p P //= p b = 1 for p in primes[:-1]: b *= p - 1 U = int(math.ceil(m*(float(P)/b))) return L, U def roots_cyclotomic(f, factor=False): """Compute roots of cyclotomic polynomials. """ L, U = _inv_totient_estimate(f.degree()) for n in range(L, U + 1): g = cyclotomic_poly(n, f.gen, polys=True) if f.expr == g.expr: break else: # pragma: no cover raise RuntimeError("failed to find index of a cyclotomic polynomial") roots = [] if not factor: # get the indices in the right order so the computed # roots will be sorted h = n//2 ks = [i for i in range(1, n + 1) if igcd(i, n) == 1] ks.sort(key=lambda x: (x, -1) if x <= h else (abs(x - n), 1)) d = 2*I*pi/n for k in reversed(ks): roots.append(exp(k*d).expand(complex=True)) else: g = Poly(f, extension=root(-1, n)) for h, _ in ordered(g.factor_list()[1]): roots.append(-h.TC()) return roots def roots_quintic(f): """ Calculate exact roots of a solvable quintic """ result = [] coeff_5, coeff_4, p, q, r, s = f.all_coeffs() # Eqn must be of the form x^5 + px^3 + qx^2 + rx + s if coeff_4: return result if coeff_5 != 1: l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5] if not all(coeff.is_Rational for coeff in l): return result f = Poly(f/coeff_5) quintic = PolyQuintic(f) # Eqn standardized. Algo for solving starts here if not f.is_irreducible: return result f20 = quintic.f20 # Check if f20 has linear factors over domain Z if f20.is_irreducible: return result # Now, we know that f is solvable for _factor in f20.factor_list()[1]: if _factor[0].is_linear: theta = _factor[0].root(0) break d = discriminant(f) delta = sqrt(d) # zeta = a fifth root of unity zeta1, zeta2, zeta3, zeta4 = quintic.zeta T = quintic.T(theta, d) tol = S(1e-10) alpha = T[1] + T[2]*delta alpha_bar = T[1] - T[2]*delta beta = T[3] + T[4]*delta beta_bar = T[3] - T[4]*delta disc = alpha**2 - 4*beta disc_bar = alpha_bar**2 - 4*beta_bar l0 = quintic.l0(theta) l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2)) l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2)) l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2)) l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2)) order = quintic.order(theta, d) test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) ) # Comparing floats if not comp(test, 0, tol): l2, l3 = l3, l2 # Now we have correct order of l's R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4 R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4 R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4 R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4 Res = [None, [None]*5, [None]*5, [None]*5, [None]*5] Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5] sol = Symbol('sol') # Simplifying improves performance a lot for exact expressions R1 = _quintic_simplify(R1) R2 = _quintic_simplify(R2) R3 = _quintic_simplify(R3) R4 = _quintic_simplify(R4) # Solve imported here. Causing problems if imported as 'solve' # and hence the changed name from sympy.solvers.solvers import solve as _solve a, b = symbols('a b', cls=Dummy) _sol = _solve( sol**5 - a - I*b, sol) for i in range(5): _sol[i] = factor(_sol[i]) R1 = R1.as_real_imag() R2 = R2.as_real_imag() R3 = R3.as_real_imag() R4 = R4.as_real_imag() for i, currentroot in enumerate(_sol): Res[1][i] = _quintic_simplify(currentroot.subs({ a: R1[0], b: R1[1] })) Res[2][i] = _quintic_simplify(currentroot.subs({ a: R2[0], b: R2[1] })) Res[3][i] = _quintic_simplify(currentroot.subs({ a: R3[0], b: R3[1] })) Res[4][i] = _quintic_simplify(currentroot.subs({ a: R4[0], b: R4[1] })) for i in range(1, 5): for j in range(5): Res_n[i][j] = Res[i][j].n() Res[i][j] = _quintic_simplify(Res[i][j]) r1 = Res[1][0] r1_n = Res_n[1][0] for i in range(5): if comp(im(r1_n*Res_n[4][i]), 0, tol): r4 = Res[4][i] break # Now we have various Res values. Each will be a list of five # values. We have to pick one r value from those five for each Res u, v = quintic.uv(theta, d) testplus = (u + v*delta*sqrt(5)).n() testminus = (u - v*delta*sqrt(5)).n() # Evaluated numbers suffixed with _n # We will use evaluated numbers for calculation. Much faster. r4_n = r4.n() r2 = r3 = None for i in range(5): r2temp_n = Res_n[2][i] for j in range(5): # Again storing away the exact number and using # evaluated numbers in computations r3temp_n = Res_n[3][j] if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)): r2 = Res[2][i] r3 = Res[3][j] break if r2: break else: return [] # fall back to normal solve # Now, we have r's so we can get roots x1 = (r1 + r2 + r3 + r4)/5 x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5 x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5 x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5 x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5 result = [x1, x2, x3, x4, x5] # Now check if solutions are distinct saw = set() for r in result: r = r.n(2) if r in saw: # Roots were identical. Abort, return [] # and fall back to usual solve return [] saw.add(r) return result def _quintic_simplify(expr): expr = powsimp(expr) expr = cancel(expr) return together(expr) def _integer_basis(poly): """Compute coefficient basis for a polynomial over integers. Returns the integer ``div`` such that substituting ``x = div*y`` ``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller than those of ``p``. For example ``x**5 + 512*x + 1024 = 0`` with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0`` Returns the integer ``div`` or ``None`` if there is no possible scaling. Examples ======== >>> from sympy.polys import Poly >>> from sympy.abc import x >>> from sympy.polys.polyroots import _integer_basis >>> p = Poly(x**5 + 512*x + 1024, x, domain='ZZ') >>> _integer_basis(p) 4 """ monoms, coeffs = list(zip(*poly.terms())) monoms, = list(zip(*monoms)) coeffs = list(map(abs, coeffs)) if coeffs[0] < coeffs[-1]: coeffs = list(reversed(coeffs)) n = monoms[0] monoms = [n - i for i in reversed(monoms)] else: return None monoms = monoms[:-1] coeffs = coeffs[:-1] divs = reversed(divisors(gcd_list(coeffs))[1:]) try: div = next(divs) except StopIteration: return None while True: for monom, coeff in zip(monoms, coeffs): if coeff % div**monom != 0: try: div = next(divs) except StopIteration: return None else: break else: return div def preprocess_roots(poly): """Try to get rid of symbolic coefficients from ``poly``. """ coeff = S.One poly_func = poly.func try: _, poly = poly.clear_denoms(convert=True) except DomainError: return coeff, poly poly = poly.primitive()[1] poly = poly.retract() # TODO: This is fragile. Figure out how to make this independent of construct_domain(). if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()): poly = poly.inject() strips = list(zip(*poly.monoms())) gens = list(poly.gens[1:]) base, strips = strips[0], strips[1:] for gen, strip in zip(list(gens), strips): reverse = False if strip[0] < strip[-1]: strip = reversed(strip) reverse = True ratio = None for a, b in zip(base, strip): if not a and not b: continue elif not a or not b: break elif b % a != 0: break else: _ratio = b // a if ratio is None: ratio = _ratio elif ratio != _ratio: break else: if reverse: ratio = -ratio poly = poly.eval(gen, 1) coeff *= gen**(-ratio) gens.remove(gen) if gens: poly = poly.eject(*gens) if poly.is_univariate and poly.get_domain().is_ZZ: basis = _integer_basis(poly) if basis is not None: n = poly.degree() def func(k, coeff): return coeff//basis**(n - k[0]) poly = poly.termwise(func) coeff *= basis if not isinstance(poly, poly_func): poly = poly_func(poly) return coeff, poly @public def roots(f, *gens, auto=True, cubics=True, trig=False, quartics=True, quintics=False, multiple=False, filter=None, predicate=None, **flags): """ Computes symbolic roots of a univariate polynomial. Given a univariate polynomial f with symbolic coefficients (or a list of the polynomial's coefficients), returns a dictionary with its roots and their multiplicities. Only roots expressible via radicals will be returned. To get a complete set of roots use RootOf class or numerical methods instead. By default cubic and quartic formulas are used in the algorithm. To disable them because of unreadable output set ``cubics=False`` or ``quartics=False`` respectively. If cubic roots are real but are expressed in terms of complex numbers (casus irreducibilis [1]) the ``trig`` flag can be set to True to have the solutions returned in terms of cosine and inverse cosine functions. To get roots from a specific domain set the ``filter`` flag with one of the following specifiers: Z, Q, R, I, C. By default all roots are returned (this is equivalent to setting ``filter='C'``). By default a dictionary is returned giving a compact result in case of multiple roots. However to get a list containing all those roots set the ``multiple`` flag to True; the list will have identical roots appearing next to each other in the result. (For a given Poly, the all_roots method will give the roots in sorted numerical order.) Examples ======== >>> from sympy import Poly, roots >>> from sympy.abc import x, y >>> roots(x**2 - 1, x) {-1: 1, 1: 1} >>> p = Poly(x**2-1, x) >>> roots(p) {-1: 1, 1: 1} >>> p = Poly(x**2-y, x, y) >>> roots(Poly(p, x)) {-sqrt(y): 1, sqrt(y): 1} >>> roots(x**2 - y, x) {-sqrt(y): 1, sqrt(y): 1} >>> roots([1, 0, -1]) {-1: 1, 1: 1} References ========== .. [1] https://en.wikipedia.org/wiki/Cubic_function#Trigonometric_.28and_hyperbolic.29_method """ from sympy.polys.polytools import to_rational_coeffs flags = dict(flags) if isinstance(f, list): if gens: raise ValueError('redundant generators given') x = Dummy('x') poly, i = {}, len(f) - 1 for coeff in f: poly[i], i = sympify(coeff), i - 1 f = Poly(poly, x, field=True) else: try: F = Poly(f, *gens, **flags) if not isinstance(f, Poly) and not F.gen.is_Symbol: raise PolynomialError("generator must be a Symbol") else: f = F if f.length == 2 and f.degree() != 1: # check for foo**n factors in the constant n = f.degree() npow_bases = [] others = [] expr = f.as_expr() con = expr.as_independent(*gens)[0] for p in Mul.make_args(con): if p.is_Pow and not p.exp % n: npow_bases.append(p.base**(p.exp/n)) else: others.append(p) if npow_bases: b = Mul(*npow_bases) B = Dummy() d = roots(Poly(expr - con + B**n*Mul(*others), *gens, **flags), *gens, **flags) rv = {} for k, v in d.items(): rv[k.subs(B, b)] = v return rv except GeneratorsNeeded: if multiple: return [] else: return {} if f.is_multivariate: raise PolynomialError('multivariate polynomials are not supported') def _update_dict(result, currentroot, k): if currentroot in result: result[currentroot] += k else: result[currentroot] = k def _try_decompose(f): """Find roots using functional decomposition. """ factors, roots = f.decompose(), [] for currentroot in _try_heuristics(factors[0]): roots.append(currentroot) for currentfactor in factors[1:]: previous, roots = list(roots), [] for currentroot in previous: g = currentfactor - Poly(currentroot, f.gen) for currentroot in _try_heuristics(g): roots.append(currentroot) return roots def _try_heuristics(f): """Find roots using formulas and some tricks. """ if f.is_ground: return [] if f.is_monomial: return [S.Zero]*f.degree() if f.length() == 2: if f.degree() == 1: return list(map(cancel, roots_linear(f))) else: return roots_binomial(f) result = [] for i in [-1, 1]: if not f.eval(i): f = f.quo(Poly(f.gen - i, f.gen)) result.append(i) break n = f.degree() if n == 1: result += list(map(cancel, roots_linear(f))) elif n == 2: result += list(map(cancel, roots_quadratic(f))) elif f.is_cyclotomic: result += roots_cyclotomic(f) elif n == 3 and cubics: result += roots_cubic(f, trig=trig) elif n == 4 and quartics: result += roots_quartic(f) elif n == 5 and quintics: result += roots_quintic(f) return result # Convert the generators to symbols dumgens = symbols('x:%d' % len(f.gens), cls=Dummy) f = f.per(f.rep, dumgens) (k,), f = f.terms_gcd() if not k: zeros = {} else: zeros = {S.Zero: k} coeff, f = preprocess_roots(f) if auto and f.get_domain().is_Ring: f = f.to_field() # Use EX instead of ZZ_I or QQ_I if f.get_domain().is_QQ_I: f = f.per(f.rep.convert(EX)) rescale_x = None translate_x = None result = {} if not f.is_ground: dom = f.get_domain() if not dom.is_Exact and dom.is_Numerical: for r in f.nroots(): _update_dict(result, r, 1) elif f.degree() == 1: result[roots_linear(f)[0]] = 1 elif f.length() == 2: roots_fun = roots_quadratic if f.degree() == 2 else roots_binomial for r in roots_fun(f): _update_dict(result, r, 1) else: _, factors = Poly(f.as_expr()).factor_list() if len(factors) == 1 and f.degree() == 2: for r in roots_quadratic(f): _update_dict(result, r, 1) else: if len(factors) == 1 and factors[0][1] == 1: if f.get_domain().is_EX: res = to_rational_coeffs(f) if res: if res[0] is None: translate_x, f = res[2:] else: rescale_x, f = res[1], res[-1] result = roots(f) if not result: for currentroot in _try_decompose(f): _update_dict(result, currentroot, 1) else: for r in _try_heuristics(f): _update_dict(result, r, 1) else: for currentroot in _try_decompose(f): _update_dict(result, currentroot, 1) else: for currentfactor, k in factors: for r in _try_heuristics(Poly(currentfactor, f.gen, field=True)): _update_dict(result, r, k) if coeff is not S.One: _result, result, = result, {} for currentroot, k in _result.items(): result[coeff*currentroot] = k if filter not in [None, 'C']: handlers = { 'Z': lambda r: r.is_Integer, 'Q': lambda r: r.is_Rational, 'R': lambda r: all(a.is_real for a in r.as_numer_denom()), 'I': lambda r: r.is_imaginary, } try: query = handlers[filter] except KeyError: raise ValueError("Invalid filter: %s" % filter) for zero in dict(result).keys(): if not query(zero): del result[zero] if predicate is not None: for zero in dict(result).keys(): if not predicate(zero): del result[zero] if rescale_x: result1 = {} for k, v in result.items(): result1[k*rescale_x] = v result = result1 if translate_x: result1 = {} for k, v in result.items(): result1[k + translate_x] = v result = result1 # adding zero roots after non-trivial roots have been translated result.update(zeros) if not multiple: return result else: zeros = [] for zero in ordered(result): zeros.extend([zero]*result[zero]) return zeros def root_factors(f, *gens, filter=None, **args): """ Returns all factors of a univariate polynomial. Examples ======== >>> from sympy.abc import x, y >>> from sympy.polys.polyroots import root_factors >>> root_factors(x**2 - y, x) [x - sqrt(y), x + sqrt(y)] """ args = dict(args) F = Poly(f, *gens, **args) if not F.is_Poly: return [f] if F.is_multivariate: raise ValueError('multivariate polynomials are not supported') x = F.gens[0] zeros = roots(F, filter=filter) if not zeros: factors = [F] else: factors, N = [], 0 for r, n in ordered(zeros.items()): factors, N = factors + [Poly(x - r, x)]*n, N + n if N < F.degree(): G = reduce(lambda p, q: p*q, factors) factors.append(F.quo(G)) if not isinstance(f, Poly): factors = [ f.as_expr() for f in factors ] return factors
4bc5cd168a2b5e1c4830e2503e330ee8d6d36dd84d72881d344c2a85a2fea0e0
from operator import mul from sympy.core.symbol import Dummy from sympy.core.sympify import _sympify from sympy.matrices.common import (NonInvertibleMatrixError, NonSquareMatrixError, ShapeError) from sympy.polys import Poly from sympy.polys.agca.extensions import FiniteExtension from sympy.polys.constructor import construct_domain from sympy.polys.factortools import dup_factor_list from sympy.polys.polyroots import roots from sympy.polys.rootoftools import CRootOf class DDMError(Exception): """Base class for errors raised by DDM""" pass class DDMBadInputError(DDMError): """list of lists is inconsistent with shape""" pass class DDMDomainError(DDMError): """domains do not match""" pass class DDMShapeError(DDMError): """shapes are inconsistent""" pass class DDM(list): """Dense matrix based on polys domain elements This is a list subclass and is a wrapper for a list of lists that supports basic matrix arithmetic +, -, *, **. """ def __init__(self, rowslist, shape, domain): super().__init__(rowslist) self.shape = self.rows, self.cols = m, n = shape self.domain = domain if not (len(self) == m and all(len(row) == n for row in self)): raise DDMBadInputError("Inconsistent row-list/shape") def __str__(self): cls = type(self).__name__ rows = list.__str__(self) return '%s(%s, %s, %s)' % (cls, rows, self.shape, self.domain) def __eq__(self, other): if not isinstance(other, DDM): return False return (super().__eq__(other) and self.domain == other.domain) def __ne__(self, other): return not self.__eq__(other) @classmethod def zeros(cls, shape, domain): z = domain.zero m, n = shape rowslist = ([z] * n for _ in range(m)) return DDM(rowslist, shape, domain) @classmethod def eye(cls, size, domain): one = domain.one ddm = cls.zeros((size, size), domain) for i in range(size): ddm[i][i] = one return ddm def copy(self): copyrows = (row[:] for row in self) return DDM(copyrows, self.shape, self.domain) def __add__(a, b): if not isinstance(b, DDM): return NotImplemented return a.add(b) def __sub__(a, b): if not isinstance(b, DDM): return NotImplemented return a.sub(b) def __neg__(a): return a.neg() def __mul__(a, b): if b in a.domain: return a.mul(b) else: return NotImplemented def __matmul__(a, b): if isinstance(b, DDM): return a.matmul(b) else: return NotImplemented @classmethod def _check(cls, a, op, b, ashape, bshape): if a.domain != b.domain: msg = "Domain mismatch: %s %s %s" % (a.domain, op, b.domain) raise DDMDomainError(msg) if ashape != bshape: msg = "Shape mismatch: %s %s %s" % (a.shape, op, b.shape) raise DDMShapeError(msg) def add(a, b): """a + b""" a._check(a, '+', b, a.shape, b.shape) c = a.copy() ddm_iadd(c, b) return c def sub(a, b): """a - b""" a._check(a, '-', b, a.shape, b.shape) c = a.copy() ddm_isub(c, b) return c def neg(a): """-a""" b = a.copy() ddm_ineg(b) return b def mul(a, b): c = a.copy() ddm_imul(c, b) return c def matmul(a, b): """a @ b (matrix product)""" m, o = a.shape o2, n = b.shape a._check(a, '*', b, o, o2) c = a.zeros((m, n), a.domain) ddm_imatmul(c, a, b) return c def rref(a): """Reduced-row echelon form of a and list of pivots""" b = a.copy() pivots = ddm_irref(b) return b, pivots def nullspace(a): rref, pivots = a.rref() rows, cols = a.shape domain = a.domain basis = [] for i in range(cols): if i in pivots: continue vec = [domain.one if i == j else domain.zero for j in range(cols)] for ii, jj in enumerate(pivots): vec[jj] -= rref[ii][i] basis.append(vec) return DDM(basis, (len(basis), cols), domain) def det(a): """Determinant of a""" m, n = a.shape if m != n: raise DDMShapeError("Determinant of non-square matrix") b = a.copy() K = b.domain deta = ddm_idet(b, K) return deta def inv(a): """Inverse of a""" m, n = a.shape if m != n: raise DDMShapeError("Determinant of non-square matrix") ainv = a.copy() K = a.domain ddm_iinv(ainv, a, K) return ainv def lu(a): """L, U decomposition of a""" m, n = a.shape K = a.domain U = a.copy() L = a.eye(m, K) swaps = ddm_ilu_split(L, U, K) return L, U, swaps def lu_solve(a, b): """x where a*x = b""" m, n = a.shape m2, o = b.shape a._check(a, 'lu_solve', b, m, m2) L, U, swaps = a.lu() x = a.zeros((n, o), a.domain) ddm_ilu_solve(x, L, U, swaps, b) return x def charpoly(a): """Coefficients of characteristic polynomial of a""" K = a.domain m, n = a.shape if m != n: raise DDMShapeError("Charpoly of non-square matrix") vec = ddm_berk(a, K) coeffs = [vec[i][0] for i in range(n+1)] return coeffs def ddm_iadd(a, b): """a += b""" for ai, bi in zip(a, b): for j, bij in enumerate(bi): ai[j] += bij def ddm_isub(a, b): """a -= b""" for ai, bi in zip(a, b): for j, bij in enumerate(bi): ai[j] -= bij def ddm_ineg(a): """a <-- -a""" for ai in a: for j, aij in enumerate(ai): ai[j] = -aij def ddm_imul(a, b): for ai in a: for j, aij in enumerate(ai): ai[j] = b * aij def ddm_imatmul(a, b, c): """a += b @ c""" cT = list(zip(*c)) for bi, ai in zip(b, a): for j, cTj in enumerate(cT): ai[j] = sum(map(mul, bi, cTj), ai[j]) def ddm_irref(a): """a <-- rref(a)""" # a is (m x n) m = len(a) if not m: return [] n = len(a[0]) i = 0 pivots = [] for j in range(n): # pivot aij = a[i][j] # zero-pivot if not aij: for ip in range(i+1, m): aij = a[ip][j] # row-swap if aij: a[i], a[ip] = a[ip], a[i] break else: # next column continue # normalise row ai = a[i] aijinv = aij**-1 for l in range(j, n): ai[l] *= aijinv # ai[j] = one # eliminate above and below to the right for k, ak in enumerate(a): if k == i or not ak[j]: continue akj = ak[j] ak[j] -= akj # ak[j] = zero for l in range(j+1, n): ak[l] -= akj * ai[l] # next row pivots.append(j) i += 1 # no more rows? if i >= m: break return pivots def ddm_idet(a, K): """a <-- echelon(a); return det""" # Fraction-free Gaussian elimination # https://www.math.usm.edu/perry/Research/Thesis_DRL.pdf # a is (m x n) m = len(a) if not m: return K.one n = len(a[0]) is_field = K.is_Field # uf keeps track of the effect of row swaps and multiplies uf = K.one for j in range(n-1): # if zero on the diagonal need to swap if not a[j][j]: for l in range(j+1, n): if a[l][j]: a[j], a[l] = a[l], a[j] uf = -uf break else: # unable to swap: det = 0 return K.zero for i in range(j+1, n): if a[i][j]: if not is_field: d = K.gcd(a[j][j], a[i][j]) b = a[j][j] // d c = a[i][j] // d else: b = a[j][j] c = a[i][j] # account for multiplying row i by b uf = b * uf for k in range(j+1, n): a[i][k] = b*a[i][k] - c*a[j][k] # triangular det is product of diagonal prod = K.one for i in range(n): prod = prod * a[i][i] # incorporate swaps and multiplies if not is_field: D = prod // uf else: D = prod / uf return D def ddm_iinv(ainv, a, K): if not K.is_Field: raise ValueError('Not a field') # a is (m x n) m = len(a) if not m: return n = len(a[0]) if m != n: raise NonSquareMatrixError eye = [[K.one if i==j else K.zero for j in range(n)] for i in range(n)] Aaug = [row + eyerow for row, eyerow in zip(a, eye)] pivots = ddm_irref(Aaug) if pivots != list(range(n)): raise NonInvertibleMatrixError('Matrix det == 0; not invertible.') ainv[:] = [row[n:] for row in Aaug] def ddm_ilu_split(L, U, K): """L, U <-- LU(U)""" m = len(U) if not m: return [] n = len(U[0]) swaps = ddm_ilu(U) zeros = [K.zero] * min(m, n) for i in range(1, m): j = min(i, n) L[i][:j] = U[i][:j] U[i][:j] = zeros[:j] return swaps def ddm_ilu(a): """a <-- LU(a)""" m = len(a) if not m: return [] n = len(a[0]) swaps = [] for i in range(min(m, n)): if not a[i][i]: for ip in range(i+1, m): if a[ip][i]: swaps.append((i, ip)) a[i], a[ip] = a[ip], a[i] break else: # M = Matrix([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1], [0, 0, 1, 2]]) continue for j in range(i+1, m): l_ji = a[j][i] / a[i][i] a[j][i] = l_ji for k in range(i+1, n): a[j][k] -= l_ji * a[i][k] return swaps def ddm_ilu_solve(x, L, U, swaps, b): """x <-- solve(L*U*x = swaps(b))""" m = len(U) if not m: return n = len(U[0]) m2 = len(b) if not m2: raise DDMShapeError("Shape mismtch") o = len(b[0]) if m != m2: raise DDMShapeError("Shape mismtch") if m < n: raise NotImplementedError("Underdetermined") if swaps: b = [row[:] for row in b] for i1, i2 in swaps: b[i1], b[i2] = b[i2], b[i1] # solve Ly = b y = [[None] * o for _ in range(m)] for k in range(o): for i in range(m): rhs = b[i][k] for j in range(i): rhs -= L[i][j] * y[j][k] y[i][k] = rhs if m > n: for i in range(n, m): for j in range(o): if y[i][j]: raise NonInvertibleMatrixError # Solve Ux = y for k in range(o): for i in reversed(range(n)): if not U[i][i]: raise NonInvertibleMatrixError rhs = y[i][k] for j in range(i+1, n): rhs -= U[i][j] * x[j][k] x[i][k] = rhs / U[i][i] def ddm_berk(M, K): m = len(M) if not m: return [[K.one]] n = len(M[0]) if m != n: raise DDMShapeError("Not square") if n == 1: return [[K.one], [-M[0][0]]] a = M[0][0] R = [M[0][1:]] C = [[row[0]] for row in M[1:]] A = [row[1:] for row in M[1:]] q = ddm_berk(A, K) T = [[K.zero] * n for _ in range(n+1)] for i in range(n): T[i][i] = K.one T[i+1][i] = -a for i in range(2, n+1): if i == 2: AnC = C else: C = AnC AnC = [[K.zero] for row in C] ddm_imatmul(AnC, A, C) RAnC = [[K.zero]] ddm_imatmul(RAnC, R, AnC) for j in range(0, n+1-i): T[i+j][j] = -RAnC[0][0] qout = [[K.zero] for _ in range(n+1)] ddm_imatmul(qout, T, q) return qout class DomainMatrix: def __init__(self, rows, shape, domain): self.rep = DDM(rows, shape, domain) self.shape = shape self.domain = domain @classmethod def from_ddm(cls, ddm): return cls(ddm, ddm.shape, ddm.domain) @classmethod def from_list_sympy(cls, nrows, ncols, rows, **kwargs): assert len(rows) == nrows assert all(len(row) == ncols for row in rows) items_sympy = [_sympify(item) for row in rows for item in row] domain, items_domain = cls.get_domain(items_sympy, **kwargs) domain_rows = [[items_domain[ncols*r + c] for c in range(ncols)] for r in range(nrows)] return DomainMatrix(domain_rows, (nrows, ncols), domain) @classmethod def from_Matrix(cls, M, **kwargs): return cls.from_list_sympy(*M.shape, M.tolist(), **kwargs) @classmethod def get_domain(cls, items_sympy, **kwargs): K, items_K = construct_domain(items_sympy, **kwargs) return K, items_K def convert_to(self, K): Kold = self.domain if K == Kold: return self.from_ddm(self.rep.copy()) new_rows = [[K.convert_from(e, Kold) for e in row] for row in self.rep] return DomainMatrix(new_rows, self.shape, K) def to_field(self): K = self.domain.get_field() return self.convert_to(K) def unify(self, other): K1 = self.domain K2 = other.domain if K1 == K2: return self, other K = K1.unify(K2) if K1 != K: self = self.convert_to(K) if K2 != K: other = other.convert_to(K) return self, other def to_Matrix(self): from sympy.matrices.dense import MutableDenseMatrix rows_sympy = [[self.domain.to_sympy(e) for e in row] for row in self.rep] return MutableDenseMatrix(rows_sympy) def __repr__(self): rows_str = ['[%s]' % (', '.join(map(str, row))) for row in self.rep] rowstr = '[%s]' % ', '.join(rows_str) return 'DomainMatrix(%s, %r, %r)' % (rowstr, self.shape, self.domain) def __add__(A, B): if not isinstance(B, DomainMatrix): return NotImplemented return A.add(B) def __sub__(A, B): if not isinstance(B, DomainMatrix): return NotImplemented return A.sub(B) def __neg__(A): return A.neg() def __mul__(A, B): """A * B""" if isinstance(B, DomainMatrix): return A.matmul(B) elif B in A.domain: return A.from_ddm(A.rep * B) else: return NotImplemented def __rmul__(A, B): if B in A.domain: return A.from_ddm(A.rep * B) else: return NotImplemented def __pow__(A, n): """A ** n""" if not isinstance(n, int): return NotImplemented return A.pow(n) def add(A, B): if A.shape != B.shape: raise ShapeError("shape") if A.domain != B.domain: raise ValueError("domain") return A.from_ddm(A.rep.add(B.rep)) def sub(A, B): if A.shape != B.shape: raise ShapeError("shape") if A.domain != B.domain: raise ValueError("domain") return A.from_ddm(A.rep.sub(B.rep)) def neg(A): return A.from_ddm(A.rep.neg()) def mul(A, b): return A.from_ddm(A.rep.mul(b)) def matmul(A, B): return A.from_ddm(A.rep.matmul(B.rep)) def pow(A, n): if n < 0: raise NotImplementedError('Negative powers') elif n == 0: m, n = A.shape rows = [[A.domain.zero] * m for _ in range(m)] for i in range(m): rows[i][i] = A.domain.one return type(A)(rows, A.shape, A.domain) elif n == 1: return A elif n % 2 == 1: return A * A**(n - 1) else: sqrtAn = A ** (n // 2) return sqrtAn * sqrtAn def rref(self): if not self.domain.is_Field: raise ValueError('Not a field') rref_ddm, pivots = self.rep.rref() return self.from_ddm(rref_ddm), tuple(pivots) def nullspace(self): return self.from_ddm(self.rep.nullspace()) def inv(self): if not self.domain.is_Field: raise ValueError('Not a field') m, n = self.shape if m != n: raise NonSquareMatrixError inv = self.rep.inv() return self.from_ddm(inv) def det(self): m, n = self.shape if m != n: raise NonSquareMatrixError return self.rep.det() def lu(self): if not self.domain.is_Field: raise ValueError('Not a field') L, U, swaps = self.rep.lu() return self.from_ddm(L), self.from_ddm(U), swaps def lu_solve(self, rhs): if self.shape[0] != rhs.shape[0]: raise ShapeError("Shape") if not self.domain.is_Field: raise ValueError('Not a field') sol = self.rep.lu_solve(rhs.rep) return self.from_ddm(sol) def charpoly(self): m, n = self.shape if m != n: raise NonSquareMatrixError("not square") return self.rep.charpoly() @classmethod def eye(cls, n, domain): return cls.from_ddm(DDM.eye(n, domain)) def __eq__(A, B): """A == B""" if not isinstance(B, DomainMatrix): return NotImplemented return A.rep == B.rep def dom_eigenvects(A, l=Dummy('lambda')): charpoly = A.charpoly() rows, cols = A.shape domain = A.domain _, factors = dup_factor_list(charpoly, domain) rational_eigenvects = [] algebraic_eigenvects = [] for base, exp in factors: if len(base) == 2: field = domain eigenval = -base[1] / base[0] EE_items = [ [eigenval if i == j else field.zero for j in range(cols)] for i in range(rows)] EE = DomainMatrix(EE_items, (rows, cols), field) basis = (A - EE).nullspace() rational_eigenvects.append((field, eigenval, exp, basis)) else: minpoly = Poly.from_list(base, l, domain=domain) field = FiniteExtension(minpoly) eigenval = field(l) AA_items = [ [Poly.from_list([item], l, domain=domain).rep for item in row] for row in A.rep] AA_items = [[field(item) for item in row] for row in AA_items] AA = DomainMatrix(AA_items, (rows, cols), field) EE_items = [ [eigenval if i == j else field.zero for j in range(cols)] for i in range(rows)] EE = DomainMatrix(EE_items, (rows, cols), field) basis = (AA - EE).nullspace() algebraic_eigenvects.append((field, minpoly, exp, basis)) return rational_eigenvects, algebraic_eigenvects def dom_eigenvects_to_sympy( rational_eigenvects, algebraic_eigenvects, Matrix, **kwargs ): result = [] for field, eigenvalue, multiplicity, eigenvects in rational_eigenvects: eigenvects = eigenvects.rep eigenvalue = field.to_sympy(eigenvalue) new_eigenvects = [ Matrix([field.to_sympy(x) for x in vect]) for vect in eigenvects] result.append((eigenvalue, multiplicity, new_eigenvects)) for field, minpoly, multiplicity, eigenvects in algebraic_eigenvects: eigenvects = eigenvects.rep l = minpoly.gens[0] eigenvects = [[field.to_sympy(x) for x in vect] for vect in eigenvects] degree = minpoly.degree() minpoly = minpoly.as_expr() eigenvals = roots(minpoly, l, **kwargs) if len(eigenvals) != degree: eigenvals = [CRootOf(minpoly, l, idx) for idx in range(degree)] for eigenvalue in eigenvals: new_eigenvects = [ Matrix([x.subs(l, eigenvalue) for x in vect]) for vect in eigenvects] result.append((eigenvalue, multiplicity, new_eigenvects)) return result
dbdc5ece91bb2c420040754960314876b097ed5808d2fd1f0076b917d42afc1f
"""Sparse polynomial rings. """ from typing import Any, Dict from operator import add, mul, lt, le, gt, ge from functools import reduce from types import GeneratorType from sympy.core.compatibility import is_sequence from sympy.core.expr import Expr from sympy.core.numbers import igcd, oo from sympy.core.symbol import Symbol, symbols as _symbols from sympy.core.sympify import CantSympify, sympify from sympy.ntheory.multinomial import multinomial_coefficients from sympy.polys.compatibility import IPolys from sympy.polys.constructor import construct_domain from sympy.polys.densebasic import dmp_to_dict, dmp_from_dict from sympy.polys.domains.domainelement import DomainElement from sympy.polys.domains.polynomialring import PolynomialRing from sympy.polys.heuristicgcd import heugcd from sympy.polys.monomials import MonomialOps from sympy.polys.orderings import lex from sympy.polys.polyerrors import ( CoercionFailed, GeneratorsError, ExactQuotientFailed, MultivariatePolynomialError) from sympy.polys.polyoptions import (Domain as DomainOpt, Order as OrderOpt, build_options) from sympy.polys.polyutils import (expr_from_dict, _dict_reorder, _parallel_dict_from_expr) from sympy.printing.defaults import DefaultPrinting from sympy.utilities import public from sympy.utilities.magic import pollute @public def ring(symbols, domain, order=lex): """Construct a polynomial ring returning ``(ring, x_1, ..., x_n)``. Parameters ========== symbols : str Symbol/Expr or sequence of str, Symbol/Expr (non-empty) domain : :class:`~.Domain` or coercible order : :class:`~.MonomialOrder` or coercible, optional, defaults to ``lex`` Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> from sympy.polys.orderings import lex >>> R, x, y, z = ring("x,y,z", ZZ, lex) >>> R Polynomial ring in x, y, z over ZZ with lex order >>> x + y + z x + y + z >>> type(_) <class 'sympy.polys.rings.PolyElement'> """ _ring = PolyRing(symbols, domain, order) return (_ring,) + _ring.gens @public def xring(symbols, domain, order=lex): """Construct a polynomial ring returning ``(ring, (x_1, ..., x_n))``. Parameters ========== symbols : str Symbol/Expr or sequence of str, Symbol/Expr (non-empty) domain : :class:`~.Domain` or coercible order : :class:`~.MonomialOrder` or coercible, optional, defaults to ``lex`` Examples ======== >>> from sympy.polys.rings import xring >>> from sympy.polys.domains import ZZ >>> from sympy.polys.orderings import lex >>> R, (x, y, z) = xring("x,y,z", ZZ, lex) >>> R Polynomial ring in x, y, z over ZZ with lex order >>> x + y + z x + y + z >>> type(_) <class 'sympy.polys.rings.PolyElement'> """ _ring = PolyRing(symbols, domain, order) return (_ring, _ring.gens) @public def vring(symbols, domain, order=lex): """Construct a polynomial ring and inject ``x_1, ..., x_n`` into the global namespace. Parameters ========== symbols : str Symbol/Expr or sequence of str, Symbol/Expr (non-empty) domain : :class:`~.Domain` or coercible order : :class:`~.MonomialOrder` or coercible, optional, defaults to ``lex`` Examples ======== >>> from sympy.polys.rings import vring >>> from sympy.polys.domains import ZZ >>> from sympy.polys.orderings import lex >>> vring("x,y,z", ZZ, lex) Polynomial ring in x, y, z over ZZ with lex order >>> x + y + z # noqa: x + y + z >>> type(_) <class 'sympy.polys.rings.PolyElement'> """ _ring = PolyRing(symbols, domain, order) pollute([ sym.name for sym in _ring.symbols ], _ring.gens) return _ring @public def sring(exprs, *symbols, **options): """Construct a ring deriving generators and domain from options and input expressions. Parameters ========== exprs : :class:`~.Expr` or sequence of :class:`~.Expr` (sympifiable) symbols : sequence of :class:`~.Symbol`/:class:`~.Expr` options : keyword arguments understood by :class:`~.Options` Examples ======== >>> from sympy.core import symbols >>> from sympy.polys.rings import sring >>> x, y, z = symbols("x,y,z") >>> R, f = sring(x + 2*y + 3*z) >>> R Polynomial ring in x, y, z over ZZ with lex order >>> f x + 2*y + 3*z >>> type(_) <class 'sympy.polys.rings.PolyElement'> """ single = False if not is_sequence(exprs): exprs, single = [exprs], True exprs = list(map(sympify, exprs)) opt = build_options(symbols, options) # TODO: rewrite this so that it doesn't use expand() (see poly()). reps, opt = _parallel_dict_from_expr(exprs, opt) if opt.domain is None: coeffs = sum([ list(rep.values()) for rep in reps ], []) opt.domain, coeffs_dom = construct_domain(coeffs, opt=opt) coeff_map = dict(zip(coeffs, coeffs_dom)) reps = [{m: coeff_map[c] for m, c in rep.items()} for rep in reps] _ring = PolyRing(opt.gens, opt.domain, opt.order) polys = list(map(_ring.from_dict, reps)) if single: return (_ring, polys[0]) else: return (_ring, polys) def _parse_symbols(symbols): if isinstance(symbols, str): return _symbols(symbols, seq=True) if symbols else () elif isinstance(symbols, Expr): return (symbols,) elif is_sequence(symbols): if all(isinstance(s, str) for s in symbols): return _symbols(symbols) elif all(isinstance(s, Expr) for s in symbols): return symbols raise GeneratorsError("expected a string, Symbol or expression or a non-empty sequence of strings, Symbols or expressions") _ring_cache = {} # type: Dict[Any, Any] class PolyRing(DefaultPrinting, IPolys): """Multivariate distributed polynomial ring. """ def __new__(cls, symbols, domain, order=lex): symbols = tuple(_parse_symbols(symbols)) ngens = len(symbols) domain = DomainOpt.preprocess(domain) order = OrderOpt.preprocess(order) _hash_tuple = (cls.__name__, symbols, ngens, domain, order) obj = _ring_cache.get(_hash_tuple) if obj is None: if domain.is_Composite and set(symbols) & set(domain.symbols): raise GeneratorsError("polynomial ring and it's ground domain share generators") obj = object.__new__(cls) obj._hash_tuple = _hash_tuple obj._hash = hash(_hash_tuple) obj.dtype = type("PolyElement", (PolyElement,), {"ring": obj}) obj.symbols = symbols obj.ngens = ngens obj.domain = domain obj.order = order obj.zero_monom = (0,)*ngens obj.gens = obj._gens() obj._gens_set = set(obj.gens) obj._one = [(obj.zero_monom, domain.one)] if ngens: # These expect monomials in at least one variable codegen = MonomialOps(ngens) obj.monomial_mul = codegen.mul() obj.monomial_pow = codegen.pow() obj.monomial_mulpow = codegen.mulpow() obj.monomial_ldiv = codegen.ldiv() obj.monomial_div = codegen.div() obj.monomial_lcm = codegen.lcm() obj.monomial_gcd = codegen.gcd() else: monunit = lambda a, b: () obj.monomial_mul = monunit obj.monomial_pow = monunit obj.monomial_mulpow = lambda a, b, c: () obj.monomial_ldiv = monunit obj.monomial_div = monunit obj.monomial_lcm = monunit obj.monomial_gcd = monunit if order is lex: obj.leading_expv = lambda f: max(f) else: obj.leading_expv = lambda f: max(f, key=order) for symbol, generator in zip(obj.symbols, obj.gens): if isinstance(symbol, Symbol): name = symbol.name if not hasattr(obj, name): setattr(obj, name, generator) _ring_cache[_hash_tuple] = obj return obj def _gens(self): """Return a list of polynomial generators. """ one = self.domain.one _gens = [] for i in range(self.ngens): expv = self.monomial_basis(i) poly = self.zero poly[expv] = one _gens.append(poly) return tuple(_gens) def __getnewargs__(self): return (self.symbols, self.domain, self.order) def __getstate__(self): state = self.__dict__.copy() del state["leading_expv"] for key, value in state.items(): if key.startswith("monomial_"): del state[key] return state def __hash__(self): return self._hash def __eq__(self, other): return isinstance(other, PolyRing) and \ (self.symbols, self.domain, self.ngens, self.order) == \ (other.symbols, other.domain, other.ngens, other.order) def __ne__(self, other): return not self == other def clone(self, symbols=None, domain=None, order=None): return self.__class__(symbols or self.symbols, domain or self.domain, order or self.order) def monomial_basis(self, i): """Return the ith-basis element. """ basis = [0]*self.ngens basis[i] = 1 return tuple(basis) @property def zero(self): return self.dtype() @property def one(self): return self.dtype(self._one) def domain_new(self, element, orig_domain=None): return self.domain.convert(element, orig_domain) def ground_new(self, coeff): return self.term_new(self.zero_monom, coeff) def term_new(self, monom, coeff): coeff = self.domain_new(coeff) poly = self.zero if coeff: poly[monom] = coeff return poly def ring_new(self, element): if isinstance(element, PolyElement): if self == element.ring: return element elif isinstance(self.domain, PolynomialRing) and self.domain.ring == element.ring: return self.ground_new(element) else: raise NotImplementedError("conversion") elif isinstance(element, str): raise NotImplementedError("parsing") elif isinstance(element, dict): return self.from_dict(element) elif isinstance(element, list): try: return self.from_terms(element) except ValueError: return self.from_list(element) elif isinstance(element, Expr): return self.from_expr(element) else: return self.ground_new(element) __call__ = ring_new def from_dict(self, element): domain_new = self.domain_new poly = self.zero for monom, coeff in element.items(): coeff = domain_new(coeff) if coeff: poly[monom] = coeff return poly def from_terms(self, element): return self.from_dict(dict(element)) def from_list(self, element): return self.from_dict(dmp_to_dict(element, self.ngens-1, self.domain)) def _rebuild_expr(self, expr, mapping): domain = self.domain def _rebuild(expr): generator = mapping.get(expr) if generator is not None: return generator elif expr.is_Add: return reduce(add, list(map(_rebuild, expr.args))) elif expr.is_Mul: return reduce(mul, list(map(_rebuild, expr.args))) elif expr.is_Pow and expr.exp.is_Integer and expr.exp >= 0: return _rebuild(expr.base)**int(expr.exp) else: return domain.convert(expr) return _rebuild(sympify(expr)) def from_expr(self, expr): mapping = dict(list(zip(self.symbols, self.gens))) try: poly = self._rebuild_expr(expr, mapping) except CoercionFailed: raise ValueError("expected an expression convertible to a polynomial in %s, got %s" % (self, expr)) else: return self.ring_new(poly) def index(self, gen): """Compute index of ``gen`` in ``self.gens``. """ if gen is None: if self.ngens: i = 0 else: i = -1 # indicate impossible choice elif isinstance(gen, int): i = gen if 0 <= i and i < self.ngens: pass elif -self.ngens <= i and i <= -1: i = -i - 1 else: raise ValueError("invalid generator index: %s" % gen) elif isinstance(gen, self.dtype): try: i = self.gens.index(gen) except ValueError: raise ValueError("invalid generator: %s" % gen) elif isinstance(gen, str): try: i = self.symbols.index(gen) except ValueError: raise ValueError("invalid generator: %s" % gen) else: raise ValueError("expected a polynomial generator, an integer, a string or None, got %s" % gen) return i def drop(self, *gens): """Remove specified generators from this ring. """ indices = set(map(self.index, gens)) symbols = [ s for i, s in enumerate(self.symbols) if i not in indices ] if not symbols: return self.domain else: return self.clone(symbols=symbols) def __getitem__(self, key): symbols = self.symbols[key] if not symbols: return self.domain else: return self.clone(symbols=symbols) def to_ground(self): # TODO: should AlgebraicField be a Composite domain? if self.domain.is_Composite or hasattr(self.domain, 'domain'): return self.clone(domain=self.domain.domain) else: raise ValueError("%s is not a composite domain" % self.domain) def to_domain(self): return PolynomialRing(self) def to_field(self): from sympy.polys.fields import FracField return FracField(self.symbols, self.domain, self.order) @property def is_univariate(self): return len(self.gens) == 1 @property def is_multivariate(self): return len(self.gens) > 1 def add(self, *objs): """ Add a sequence of polynomials or containers of polynomials. Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> R, x = ring("x", ZZ) >>> R.add([ x**2 + 2*i + 3 for i in range(4) ]) 4*x**2 + 24 >>> _.factor_list() (4, [(x**2 + 6, 1)]) """ p = self.zero for obj in objs: if is_sequence(obj, include=GeneratorType): p += self.add(*obj) else: p += obj return p def mul(self, *objs): """ Multiply a sequence of polynomials or containers of polynomials. Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> R, x = ring("x", ZZ) >>> R.mul([ x**2 + 2*i + 3 for i in range(4) ]) x**8 + 24*x**6 + 206*x**4 + 744*x**2 + 945 >>> _.factor_list() (1, [(x**2 + 3, 1), (x**2 + 5, 1), (x**2 + 7, 1), (x**2 + 9, 1)]) """ p = self.one for obj in objs: if is_sequence(obj, include=GeneratorType): p *= self.mul(*obj) else: p *= obj return p def drop_to_ground(self, *gens): r""" Remove specified generators from the ring and inject them into its domain. """ indices = set(map(self.index, gens)) symbols = [s for i, s in enumerate(self.symbols) if i not in indices] gens = [gen for i, gen in enumerate(self.gens) if i not in indices] if not symbols: return self else: return self.clone(symbols=symbols, domain=self.drop(*gens)) def compose(self, other): """Add the generators of ``other`` to ``self``""" if self != other: syms = set(self.symbols).union(set(other.symbols)) return self.clone(symbols=list(syms)) else: return self def add_gens(self, symbols): """Add the elements of ``symbols`` as generators to ``self``""" syms = set(self.symbols).union(set(symbols)) return self.clone(symbols=list(syms)) class PolyElement(DomainElement, DefaultPrinting, CantSympify, dict): """Element of multivariate distributed polynomial ring. """ def new(self, init): return self.__class__(init) def parent(self): return self.ring.to_domain() def __getnewargs__(self): return (self.ring, list(self.iterterms())) _hash = None def __hash__(self): # XXX: This computes a hash of a dictionary, but currently we don't # protect dictionary from being changed so any use site modifications # will make hashing go wrong. Use this feature with caution until we # figure out how to make a safe API without compromising speed of this # low-level class. _hash = self._hash if _hash is None: self._hash = _hash = hash((self.ring, frozenset(self.items()))) return _hash def copy(self): """Return a copy of polynomial self. Polynomials are mutable; if one is interested in preserving a polynomial, and one plans to use inplace operations, one can copy the polynomial. This method makes a shallow copy. Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.rings import ring >>> R, x, y = ring('x, y', ZZ) >>> p = (x + y)**2 >>> p1 = p.copy() >>> p2 = p >>> p[R.zero_monom] = 3 >>> p x**2 + 2*x*y + y**2 + 3 >>> p1 x**2 + 2*x*y + y**2 >>> p2 x**2 + 2*x*y + y**2 + 3 """ return self.new(self) def set_ring(self, new_ring): if self.ring == new_ring: return self elif self.ring.symbols != new_ring.symbols: terms = list(zip(*_dict_reorder(self, self.ring.symbols, new_ring.symbols))) return new_ring.from_terms(terms) else: return new_ring.from_dict(self) def as_expr(self, *symbols): if symbols and len(symbols) != self.ring.ngens: raise ValueError("not enough symbols, expected %s got %s" % (self.ring.ngens, len(symbols))) else: symbols = self.ring.symbols return expr_from_dict(self.as_expr_dict(), *symbols) def as_expr_dict(self): to_sympy = self.ring.domain.to_sympy return {monom: to_sympy(coeff) for monom, coeff in self.iterterms()} def clear_denoms(self): domain = self.ring.domain if not domain.is_Field or not domain.has_assoc_Ring: return domain.one, self ground_ring = domain.get_ring() common = ground_ring.one lcm = ground_ring.lcm denom = domain.denom for coeff in self.values(): common = lcm(common, denom(coeff)) poly = self.new([ (k, v*common) for k, v in self.items() ]) return common, poly def strip_zero(self): """Eliminate monomials with zero coefficient. """ for k, v in list(self.items()): if not v: del self[k] def __eq__(p1, p2): """Equality test for polynomials. Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.rings import ring >>> _, x, y = ring('x, y', ZZ) >>> p1 = (x + y)**2 + (x - y)**2 >>> p1 == 4*x*y False >>> p1 == 2*(x**2 + y**2) True """ if not p2: return not p1 elif isinstance(p2, PolyElement) and p2.ring == p1.ring: return dict.__eq__(p1, p2) elif len(p1) > 1: return False else: return p1.get(p1.ring.zero_monom) == p2 def __ne__(p1, p2): return not p1 == p2 def almosteq(p1, p2, tolerance=None): """Approximate equality test for polynomials. """ ring = p1.ring if isinstance(p2, ring.dtype): if set(p1.keys()) != set(p2.keys()): return False almosteq = ring.domain.almosteq for k in p1.keys(): if not almosteq(p1[k], p2[k], tolerance): return False return True elif len(p1) > 1: return False else: try: p2 = ring.domain.convert(p2) except CoercionFailed: return False else: return ring.domain.almosteq(p1.const(), p2, tolerance) def sort_key(self): return (len(self), self.terms()) def _cmp(p1, p2, op): if isinstance(p2, p1.ring.dtype): return op(p1.sort_key(), p2.sort_key()) else: return NotImplemented def __lt__(p1, p2): return p1._cmp(p2, lt) def __le__(p1, p2): return p1._cmp(p2, le) def __gt__(p1, p2): return p1._cmp(p2, gt) def __ge__(p1, p2): return p1._cmp(p2, ge) def _drop(self, gen): ring = self.ring i = ring.index(gen) if ring.ngens == 1: return i, ring.domain else: symbols = list(ring.symbols) del symbols[i] return i, ring.clone(symbols=symbols) def drop(self, gen): i, ring = self._drop(gen) if self.ring.ngens == 1: if self.is_ground: return self.coeff(1) else: raise ValueError("can't drop %s" % gen) else: poly = ring.zero for k, v in self.items(): if k[i] == 0: K = list(k) del K[i] poly[tuple(K)] = v else: raise ValueError("can't drop %s" % gen) return poly def _drop_to_ground(self, gen): ring = self.ring i = ring.index(gen) symbols = list(ring.symbols) del symbols[i] return i, ring.clone(symbols=symbols, domain=ring[i]) def drop_to_ground(self, gen): if self.ring.ngens == 1: raise ValueError("can't drop only generator to ground") i, ring = self._drop_to_ground(gen) poly = ring.zero gen = ring.domain.gens[0] for monom, coeff in self.iterterms(): mon = monom[:i] + monom[i+1:] if not mon in poly: poly[mon] = (gen**monom[i]).mul_ground(coeff) else: poly[mon] += (gen**monom[i]).mul_ground(coeff) return poly def to_dense(self): return dmp_from_dict(self, self.ring.ngens-1, self.ring.domain) def to_dict(self): return dict(self) def str(self, printer, precedence, exp_pattern, mul_symbol): if not self: return printer._print(self.ring.domain.zero) prec_mul = precedence["Mul"] prec_atom = precedence["Atom"] ring = self.ring symbols = ring.symbols ngens = ring.ngens zm = ring.zero_monom sexpvs = [] for expv, coeff in self.terms(): negative = ring.domain.is_negative(coeff) sign = " - " if negative else " + " sexpvs.append(sign) if expv == zm: scoeff = printer._print(coeff) if negative and scoeff.startswith("-"): scoeff = scoeff[1:] else: if negative: coeff = -coeff if coeff != self.ring.one: scoeff = printer.parenthesize(coeff, prec_mul, strict=True) else: scoeff = '' sexpv = [] for i in range(ngens): exp = expv[i] if not exp: continue symbol = printer.parenthesize(symbols[i], prec_atom, strict=True) if exp != 1: if exp != int(exp) or exp < 0: sexp = printer.parenthesize(exp, prec_atom, strict=False) else: sexp = exp sexpv.append(exp_pattern % (symbol, sexp)) else: sexpv.append('%s' % symbol) if scoeff: sexpv = [scoeff] + sexpv sexpvs.append(mul_symbol.join(sexpv)) if sexpvs[0] in [" + ", " - "]: head = sexpvs.pop(0) if head == " - ": sexpvs.insert(0, "-") return "".join(sexpvs) @property def is_generator(self): return self in self.ring._gens_set @property def is_ground(self): return not self or (len(self) == 1 and self.ring.zero_monom in self) @property def is_monomial(self): return not self or (len(self) == 1 and self.LC == 1) @property def is_term(self): return len(self) <= 1 @property def is_negative(self): return self.ring.domain.is_negative(self.LC) @property def is_positive(self): return self.ring.domain.is_positive(self.LC) @property def is_nonnegative(self): return self.ring.domain.is_nonnegative(self.LC) @property def is_nonpositive(self): return self.ring.domain.is_nonpositive(self.LC) @property def is_zero(f): return not f @property def is_one(f): return f == f.ring.one @property def is_monic(f): return f.ring.domain.is_one(f.LC) @property def is_primitive(f): return f.ring.domain.is_one(f.content()) @property def is_linear(f): return all(sum(monom) <= 1 for monom in f.itermonoms()) @property def is_quadratic(f): return all(sum(monom) <= 2 for monom in f.itermonoms()) @property def is_squarefree(f): if not f.ring.ngens: return True return f.ring.dmp_sqf_p(f) @property def is_irreducible(f): if not f.ring.ngens: return True return f.ring.dmp_irreducible_p(f) @property def is_cyclotomic(f): if f.ring.is_univariate: return f.ring.dup_cyclotomic_p(f) else: raise MultivariatePolynomialError("cyclotomic polynomial") def __neg__(self): return self.new([ (monom, -coeff) for monom, coeff in self.iterterms() ]) def __pos__(self): return self def __add__(p1, p2): """Add two polynomials. Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.rings import ring >>> _, x, y = ring('x, y', ZZ) >>> (x + y)**2 + (x - y)**2 2*x**2 + 2*y**2 """ if not p2: return p1.copy() ring = p1.ring if isinstance(p2, ring.dtype): p = p1.copy() get = p.get zero = ring.domain.zero for k, v in p2.items(): v = get(k, zero) + v if v: p[k] = v else: del p[k] return p elif isinstance(p2, PolyElement): if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring: pass elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring: return p2.__radd__(p1) else: return NotImplemented try: cp2 = ring.domain_new(p2) except CoercionFailed: return NotImplemented else: p = p1.copy() if not cp2: return p zm = ring.zero_monom if zm not in p1.keys(): p[zm] = cp2 else: if p2 == -p[zm]: del p[zm] else: p[zm] += cp2 return p def __radd__(p1, n): p = p1.copy() if not n: return p ring = p1.ring try: n = ring.domain_new(n) except CoercionFailed: return NotImplemented else: zm = ring.zero_monom if zm not in p1.keys(): p[zm] = n else: if n == -p[zm]: del p[zm] else: p[zm] += n return p def __sub__(p1, p2): """Subtract polynomial p2 from p1. Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.rings import ring >>> _, x, y = ring('x, y', ZZ) >>> p1 = x + y**2 >>> p2 = x*y + y**2 >>> p1 - p2 -x*y + x """ if not p2: return p1.copy() ring = p1.ring if isinstance(p2, ring.dtype): p = p1.copy() get = p.get zero = ring.domain.zero for k, v in p2.items(): v = get(k, zero) - v if v: p[k] = v else: del p[k] return p elif isinstance(p2, PolyElement): if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring: pass elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring: return p2.__rsub__(p1) else: return NotImplemented try: p2 = ring.domain_new(p2) except CoercionFailed: return NotImplemented else: p = p1.copy() zm = ring.zero_monom if zm not in p1.keys(): p[zm] = -p2 else: if p2 == p[zm]: del p[zm] else: p[zm] -= p2 return p def __rsub__(p1, n): """n - p1 with n convertible to the coefficient domain. Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.rings import ring >>> _, x, y = ring('x, y', ZZ) >>> p = x + y >>> 4 - p -x - y + 4 """ ring = p1.ring try: n = ring.domain_new(n) except CoercionFailed: return NotImplemented else: p = ring.zero for expv in p1: p[expv] = -p1[expv] p += n return p def __mul__(p1, p2): """Multiply two polynomials. Examples ======== >>> from sympy.polys.domains import QQ >>> from sympy.polys.rings import ring >>> _, x, y = ring('x, y', QQ) >>> p1 = x + y >>> p2 = x - y >>> p1*p2 x**2 - y**2 """ ring = p1.ring p = ring.zero if not p1 or not p2: return p elif isinstance(p2, ring.dtype): get = p.get zero = ring.domain.zero monomial_mul = ring.monomial_mul p2it = list(p2.items()) for exp1, v1 in p1.items(): for exp2, v2 in p2it: exp = monomial_mul(exp1, exp2) p[exp] = get(exp, zero) + v1*v2 p.strip_zero() return p elif isinstance(p2, PolyElement): if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring: pass elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring: return p2.__rmul__(p1) else: return NotImplemented try: p2 = ring.domain_new(p2) except CoercionFailed: return NotImplemented else: for exp1, v1 in p1.items(): v = v1*p2 if v: p[exp1] = v return p def __rmul__(p1, p2): """p2 * p1 with p2 in the coefficient domain of p1. Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.rings import ring >>> _, x, y = ring('x, y', ZZ) >>> p = x + y >>> 4 * p 4*x + 4*y """ p = p1.ring.zero if not p2: return p try: p2 = p.ring.domain_new(p2) except CoercionFailed: return NotImplemented else: for exp1, v1 in p1.items(): v = p2*v1 if v: p[exp1] = v return p def __pow__(self, n): """raise polynomial to power `n` Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.rings import ring >>> _, x, y = ring('x, y', ZZ) >>> p = x + y**2 >>> p**3 x**3 + 3*x**2*y**2 + 3*x*y**4 + y**6 """ ring = self.ring if not n: if self: return ring.one else: raise ValueError("0**0") elif len(self) == 1: monom, coeff = list(self.items())[0] p = ring.zero if coeff == 1: p[ring.monomial_pow(monom, n)] = coeff else: p[ring.monomial_pow(monom, n)] = coeff**n return p # For ring series, we need negative and rational exponent support only # with monomials. n = int(n) if n < 0: raise ValueError("Negative exponent") elif n == 1: return self.copy() elif n == 2: return self.square() elif n == 3: return self*self.square() elif len(self) <= 5: # TODO: use an actual density measure return self._pow_multinomial(n) else: return self._pow_generic(n) def _pow_generic(self, n): p = self.ring.one c = self while True: if n & 1: p = p*c n -= 1 if not n: break c = c.square() n = n // 2 return p def _pow_multinomial(self, n): multinomials = list(multinomial_coefficients(len(self), n).items()) monomial_mulpow = self.ring.monomial_mulpow zero_monom = self.ring.zero_monom terms = list(self.iterterms()) zero = self.ring.domain.zero poly = self.ring.zero for multinomial, multinomial_coeff in multinomials: product_monom = zero_monom product_coeff = multinomial_coeff for exp, (monom, coeff) in zip(multinomial, terms): if exp: product_monom = monomial_mulpow(product_monom, monom, exp) product_coeff *= coeff**exp monom = tuple(product_monom) coeff = product_coeff coeff = poly.get(monom, zero) + coeff if coeff: poly[monom] = coeff else: del poly[monom] return poly def square(self): """square of a polynomial Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y = ring('x, y', ZZ) >>> p = x + y**2 >>> p.square() x**2 + 2*x*y**2 + y**4 """ ring = self.ring p = ring.zero get = p.get keys = list(self.keys()) zero = ring.domain.zero monomial_mul = ring.monomial_mul for i in range(len(keys)): k1 = keys[i] pk = self[k1] for j in range(i): k2 = keys[j] exp = monomial_mul(k1, k2) p[exp] = get(exp, zero) + pk*self[k2] p = p.imul_num(2) get = p.get for k, v in self.items(): k2 = monomial_mul(k, k) p[k2] = get(k2, zero) + v**2 p.strip_zero() return p def __divmod__(p1, p2): ring = p1.ring if not p2: raise ZeroDivisionError("polynomial division") elif isinstance(p2, ring.dtype): return p1.div(p2) elif isinstance(p2, PolyElement): if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring: pass elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring: return p2.__rdivmod__(p1) else: return NotImplemented try: p2 = ring.domain_new(p2) except CoercionFailed: return NotImplemented else: return (p1.quo_ground(p2), p1.rem_ground(p2)) def __rdivmod__(p1, p2): return NotImplemented def __mod__(p1, p2): ring = p1.ring if not p2: raise ZeroDivisionError("polynomial division") elif isinstance(p2, ring.dtype): return p1.rem(p2) elif isinstance(p2, PolyElement): if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring: pass elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring: return p2.__rmod__(p1) else: return NotImplemented try: p2 = ring.domain_new(p2) except CoercionFailed: return NotImplemented else: return p1.rem_ground(p2) def __rmod__(p1, p2): return NotImplemented def __truediv__(p1, p2): ring = p1.ring if not p2: raise ZeroDivisionError("polynomial division") elif isinstance(p2, ring.dtype): if p2.is_monomial: return p1*(p2**(-1)) else: return p1.quo(p2) elif isinstance(p2, PolyElement): if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring: pass elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring: return p2.__rtruediv__(p1) else: return NotImplemented try: p2 = ring.domain_new(p2) except CoercionFailed: return NotImplemented else: return p1.quo_ground(p2) def __rtruediv__(p1, p2): return NotImplemented __floordiv__ = __truediv__ __rfloordiv__ = __rtruediv__ # TODO: use // (__floordiv__) for exquo()? def _term_div(self): zm = self.ring.zero_monom domain = self.ring.domain domain_quo = domain.quo monomial_div = self.ring.monomial_div if domain.is_Field: def term_div(a_lm_a_lc, b_lm_b_lc): a_lm, a_lc = a_lm_a_lc b_lm, b_lc = b_lm_b_lc if b_lm == zm: # apparently this is a very common case monom = a_lm else: monom = monomial_div(a_lm, b_lm) if monom is not None: return monom, domain_quo(a_lc, b_lc) else: return None else: def term_div(a_lm_a_lc, b_lm_b_lc): a_lm, a_lc = a_lm_a_lc b_lm, b_lc = b_lm_b_lc if b_lm == zm: # apparently this is a very common case monom = a_lm else: monom = monomial_div(a_lm, b_lm) if not (monom is None or a_lc % b_lc): return monom, domain_quo(a_lc, b_lc) else: return None return term_div def div(self, fv): """Division algorithm, see [CLO] p64. fv array of polynomials return qv, r such that self = sum(fv[i]*qv[i]) + r All polynomials are required not to be Laurent polynomials. Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y = ring('x, y', ZZ) >>> f = x**3 >>> f0 = x - y**2 >>> f1 = x - y >>> qv, r = f.div((f0, f1)) >>> qv[0] x**2 + x*y**2 + y**4 >>> qv[1] 0 >>> r y**6 """ ring = self.ring ret_single = False if isinstance(fv, PolyElement): ret_single = True fv = [fv] if any(not f for f in fv): raise ZeroDivisionError("polynomial division") if not self: if ret_single: return ring.zero, ring.zero else: return [], ring.zero for f in fv: if f.ring != ring: raise ValueError('self and f must have the same ring') s = len(fv) qv = [ring.zero for i in range(s)] p = self.copy() r = ring.zero term_div = self._term_div() expvs = [fx.leading_expv() for fx in fv] while p: i = 0 divoccurred = 0 while i < s and divoccurred == 0: expv = p.leading_expv() term = term_div((expv, p[expv]), (expvs[i], fv[i][expvs[i]])) if term is not None: expv1, c = term qv[i] = qv[i]._iadd_monom((expv1, c)) p = p._iadd_poly_monom(fv[i], (expv1, -c)) divoccurred = 1 else: i += 1 if not divoccurred: expv = p.leading_expv() r = r._iadd_monom((expv, p[expv])) del p[expv] if expv == ring.zero_monom: r += p if ret_single: if not qv: return ring.zero, r else: return qv[0], r else: return qv, r def rem(self, G): f = self if isinstance(G, PolyElement): G = [G] if any(not g for g in G): raise ZeroDivisionError("polynomial division") ring = f.ring domain = ring.domain zero = domain.zero monomial_mul = ring.monomial_mul r = ring.zero term_div = f._term_div() ltf = f.LT f = f.copy() get = f.get while f: for g in G: tq = term_div(ltf, g.LT) if tq is not None: m, c = tq for mg, cg in g.iterterms(): m1 = monomial_mul(mg, m) c1 = get(m1, zero) - c*cg if not c1: del f[m1] else: f[m1] = c1 ltm = f.leading_expv() if ltm is not None: ltf = ltm, f[ltm] break else: ltm, ltc = ltf if ltm in r: r[ltm] += ltc else: r[ltm] = ltc del f[ltm] ltm = f.leading_expv() if ltm is not None: ltf = ltm, f[ltm] return r def quo(f, G): return f.div(G)[0] def exquo(f, G): q, r = f.div(G) if not r: return q else: raise ExactQuotientFailed(f, G) def _iadd_monom(self, mc): """add to self the monomial coeff*x0**i0*x1**i1*... unless self is a generator -- then just return the sum of the two. mc is a tuple, (monom, coeff), where monomial is (i0, i1, ...) Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y = ring('x, y', ZZ) >>> p = x**4 + 2*y >>> m = (1, 2) >>> p1 = p._iadd_monom((m, 5)) >>> p1 x**4 + 5*x*y**2 + 2*y >>> p1 is p True >>> p = x >>> p1 = p._iadd_monom((m, 5)) >>> p1 5*x*y**2 + x >>> p1 is p False """ if self in self.ring._gens_set: cpself = self.copy() else: cpself = self expv, coeff = mc c = cpself.get(expv) if c is None: cpself[expv] = coeff else: c += coeff if c: cpself[expv] = c else: del cpself[expv] return cpself def _iadd_poly_monom(self, p2, mc): """add to self the product of (p)*(coeff*x0**i0*x1**i1*...) unless self is a generator -- then just return the sum of the two. mc is a tuple, (monom, coeff), where monomial is (i0, i1, ...) Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y, z = ring('x, y, z', ZZ) >>> p1 = x**4 + 2*y >>> p2 = y + z >>> m = (1, 2, 3) >>> p1 = p1._iadd_poly_monom(p2, (m, 3)) >>> p1 x**4 + 3*x*y**3*z**3 + 3*x*y**2*z**4 + 2*y """ p1 = self if p1 in p1.ring._gens_set: p1 = p1.copy() (m, c) = mc get = p1.get zero = p1.ring.domain.zero monomial_mul = p1.ring.monomial_mul for k, v in p2.items(): ka = monomial_mul(k, m) coeff = get(ka, zero) + v*c if coeff: p1[ka] = coeff else: del p1[ka] return p1 def degree(f, x=None): """ The leading degree in ``x`` or the main variable. Note that the degree of 0 is negative infinity (the SymPy object -oo). """ i = f.ring.index(x) if not f: return -oo elif i < 0: return 0 else: return max([ monom[i] for monom in f.itermonoms() ]) def degrees(f): """ A tuple containing leading degrees in all variables. Note that the degree of 0 is negative infinity (the SymPy object -oo) """ if not f: return (-oo,)*f.ring.ngens else: return tuple(map(max, list(zip(*f.itermonoms())))) def tail_degree(f, x=None): """ The tail degree in ``x`` or the main variable. Note that the degree of 0 is negative infinity (the SymPy object -oo) """ i = f.ring.index(x) if not f: return -oo elif i < 0: return 0 else: return min([ monom[i] for monom in f.itermonoms() ]) def tail_degrees(f): """ A tuple containing tail degrees in all variables. Note that the degree of 0 is negative infinity (the SymPy object -oo) """ if not f: return (-oo,)*f.ring.ngens else: return tuple(map(min, list(zip(*f.itermonoms())))) def leading_expv(self): """Leading monomial tuple according to the monomial ordering. Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y, z = ring('x, y, z', ZZ) >>> p = x**4 + x**3*y + x**2*z**2 + z**7 >>> p.leading_expv() (4, 0, 0) """ if self: return self.ring.leading_expv(self) else: return None def _get_coeff(self, expv): return self.get(expv, self.ring.domain.zero) def coeff(self, element): """ Returns the coefficient that stands next to the given monomial. Parameters ========== element : PolyElement (with ``is_monomial = True``) or 1 Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y, z = ring("x,y,z", ZZ) >>> f = 3*x**2*y - x*y*z + 7*z**3 + 23 >>> f.coeff(x**2*y) 3 >>> f.coeff(x*y) 0 >>> f.coeff(1) 23 """ if element == 1: return self._get_coeff(self.ring.zero_monom) elif isinstance(element, self.ring.dtype): terms = list(element.iterterms()) if len(terms) == 1: monom, coeff = terms[0] if coeff == self.ring.domain.one: return self._get_coeff(monom) raise ValueError("expected a monomial, got %s" % element) def const(self): """Returns the constant coeffcient. """ return self._get_coeff(self.ring.zero_monom) @property def LC(self): return self._get_coeff(self.leading_expv()) @property def LM(self): expv = self.leading_expv() if expv is None: return self.ring.zero_monom else: return expv def leading_monom(self): """ Leading monomial as a polynomial element. Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y = ring('x, y', ZZ) >>> (3*x*y + y**2).leading_monom() x*y """ p = self.ring.zero expv = self.leading_expv() if expv: p[expv] = self.ring.domain.one return p @property def LT(self): expv = self.leading_expv() if expv is None: return (self.ring.zero_monom, self.ring.domain.zero) else: return (expv, self._get_coeff(expv)) def leading_term(self): """Leading term as a polynomial element. Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y = ring('x, y', ZZ) >>> (3*x*y + y**2).leading_term() 3*x*y """ p = self.ring.zero expv = self.leading_expv() if expv is not None: p[expv] = self[expv] return p def _sorted(self, seq, order): if order is None: order = self.ring.order else: order = OrderOpt.preprocess(order) if order is lex: return sorted(seq, key=lambda monom: monom[0], reverse=True) else: return sorted(seq, key=lambda monom: order(monom[0]), reverse=True) def coeffs(self, order=None): """Ordered list of polynomial coefficients. Parameters ========== order : :class:`~.MonomialOrder` or coercible, optional Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> from sympy.polys.orderings import lex, grlex >>> _, x, y = ring("x, y", ZZ, lex) >>> f = x*y**7 + 2*x**2*y**3 >>> f.coeffs() [2, 1] >>> f.coeffs(grlex) [1, 2] """ return [ coeff for _, coeff in self.terms(order) ] def monoms(self, order=None): """Ordered list of polynomial monomials. Parameters ========== order : :class:`~.MonomialOrder` or coercible, optional Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> from sympy.polys.orderings import lex, grlex >>> _, x, y = ring("x, y", ZZ, lex) >>> f = x*y**7 + 2*x**2*y**3 >>> f.monoms() [(2, 3), (1, 7)] >>> f.monoms(grlex) [(1, 7), (2, 3)] """ return [ monom for monom, _ in self.terms(order) ] def terms(self, order=None): """Ordered list of polynomial terms. Parameters ========== order : :class:`~.MonomialOrder` or coercible, optional Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> from sympy.polys.orderings import lex, grlex >>> _, x, y = ring("x, y", ZZ, lex) >>> f = x*y**7 + 2*x**2*y**3 >>> f.terms() [((2, 3), 2), ((1, 7), 1)] >>> f.terms(grlex) [((1, 7), 1), ((2, 3), 2)] """ return self._sorted(list(self.items()), order) def itercoeffs(self): """Iterator over coefficients of a polynomial. """ return iter(self.values()) def itermonoms(self): """Iterator over monomials of a polynomial. """ return iter(self.keys()) def iterterms(self): """Iterator over terms of a polynomial. """ return iter(self.items()) def listcoeffs(self): """Unordered list of polynomial coefficients. """ return list(self.values()) def listmonoms(self): """Unordered list of polynomial monomials. """ return list(self.keys()) def listterms(self): """Unordered list of polynomial terms. """ return list(self.items()) def imul_num(p, c): """multiply inplace the polynomial p by an element in the coefficient ring, provided p is not one of the generators; else multiply not inplace Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y = ring('x, y', ZZ) >>> p = x + y**2 >>> p1 = p.imul_num(3) >>> p1 3*x + 3*y**2 >>> p1 is p True >>> p = x >>> p1 = p.imul_num(3) >>> p1 3*x >>> p1 is p False """ if p in p.ring._gens_set: return p*c if not c: p.clear() return for exp in p: p[exp] *= c return p def content(f): """Returns GCD of polynomial's coefficients. """ domain = f.ring.domain cont = domain.zero gcd = domain.gcd for coeff in f.itercoeffs(): cont = gcd(cont, coeff) return cont def primitive(f): """Returns content and a primitive polynomial. """ cont = f.content() return cont, f.quo_ground(cont) def monic(f): """Divides all coefficients by the leading coefficient. """ if not f: return f else: return f.quo_ground(f.LC) def mul_ground(f, x): if not x: return f.ring.zero terms = [ (monom, coeff*x) for monom, coeff in f.iterterms() ] return f.new(terms) def mul_monom(f, monom): monomial_mul = f.ring.monomial_mul terms = [ (monomial_mul(f_monom, monom), f_coeff) for f_monom, f_coeff in f.items() ] return f.new(terms) def mul_term(f, term): monom, coeff = term if not f or not coeff: return f.ring.zero elif monom == f.ring.zero_monom: return f.mul_ground(coeff) monomial_mul = f.ring.monomial_mul terms = [ (monomial_mul(f_monom, monom), f_coeff*coeff) for f_monom, f_coeff in f.items() ] return f.new(terms) def quo_ground(f, x): domain = f.ring.domain if not x: raise ZeroDivisionError('polynomial division') if not f or x == domain.one: return f if domain.is_Field: quo = domain.quo terms = [ (monom, quo(coeff, x)) for monom, coeff in f.iterterms() ] else: terms = [ (monom, coeff // x) for monom, coeff in f.iterterms() if not (coeff % x) ] return f.new(terms) def quo_term(f, term): monom, coeff = term if not coeff: raise ZeroDivisionError("polynomial division") elif not f: return f.ring.zero elif monom == f.ring.zero_monom: return f.quo_ground(coeff) term_div = f._term_div() terms = [ term_div(t, term) for t in f.iterterms() ] return f.new([ t for t in terms if t is not None ]) def trunc_ground(f, p): if f.ring.domain.is_ZZ: terms = [] for monom, coeff in f.iterterms(): coeff = coeff % p if coeff > p // 2: coeff = coeff - p terms.append((monom, coeff)) else: terms = [ (monom, coeff % p) for monom, coeff in f.iterterms() ] poly = f.new(terms) poly.strip_zero() return poly rem_ground = trunc_ground def extract_ground(self, g): f = self fc = f.content() gc = g.content() gcd = f.ring.domain.gcd(fc, gc) f = f.quo_ground(gcd) g = g.quo_ground(gcd) return gcd, f, g def _norm(f, norm_func): if not f: return f.ring.domain.zero else: ground_abs = f.ring.domain.abs return norm_func([ ground_abs(coeff) for coeff in f.itercoeffs() ]) def max_norm(f): return f._norm(max) def l1_norm(f): return f._norm(sum) def deflate(f, *G): ring = f.ring polys = [f] + list(G) J = [0]*ring.ngens for p in polys: for monom in p.itermonoms(): for i, m in enumerate(monom): J[i] = igcd(J[i], m) for i, b in enumerate(J): if not b: J[i] = 1 J = tuple(J) if all(b == 1 for b in J): return J, polys H = [] for p in polys: h = ring.zero for I, coeff in p.iterterms(): N = [ i // j for i, j in zip(I, J) ] h[tuple(N)] = coeff H.append(h) return J, H def inflate(f, J): poly = f.ring.zero for I, coeff in f.iterterms(): N = [ i*j for i, j in zip(I, J) ] poly[tuple(N)] = coeff return poly def lcm(self, g): f = self domain = f.ring.domain if not domain.is_Field: fc, f = f.primitive() gc, g = g.primitive() c = domain.lcm(fc, gc) h = (f*g).quo(f.gcd(g)) if not domain.is_Field: return h.mul_ground(c) else: return h.monic() def gcd(f, g): return f.cofactors(g)[0] def cofactors(f, g): if not f and not g: zero = f.ring.zero return zero, zero, zero elif not f: h, cff, cfg = f._gcd_zero(g) return h, cff, cfg elif not g: h, cfg, cff = g._gcd_zero(f) return h, cff, cfg elif len(f) == 1: h, cff, cfg = f._gcd_monom(g) return h, cff, cfg elif len(g) == 1: h, cfg, cff = g._gcd_monom(f) return h, cff, cfg J, (f, g) = f.deflate(g) h, cff, cfg = f._gcd(g) return (h.inflate(J), cff.inflate(J), cfg.inflate(J)) def _gcd_zero(f, g): one, zero = f.ring.one, f.ring.zero if g.is_nonnegative: return g, zero, one else: return -g, zero, -one def _gcd_monom(f, g): ring = f.ring ground_gcd = ring.domain.gcd ground_quo = ring.domain.quo monomial_gcd = ring.monomial_gcd monomial_ldiv = ring.monomial_ldiv mf, cf = list(f.iterterms())[0] _mgcd, _cgcd = mf, cf for mg, cg in g.iterterms(): _mgcd = monomial_gcd(_mgcd, mg) _cgcd = ground_gcd(_cgcd, cg) h = f.new([(_mgcd, _cgcd)]) cff = f.new([(monomial_ldiv(mf, _mgcd), ground_quo(cf, _cgcd))]) cfg = f.new([(monomial_ldiv(mg, _mgcd), ground_quo(cg, _cgcd)) for mg, cg in g.iterterms()]) return h, cff, cfg def _gcd(f, g): ring = f.ring if ring.domain.is_QQ: return f._gcd_QQ(g) elif ring.domain.is_ZZ: return f._gcd_ZZ(g) else: # TODO: don't use dense representation (port PRS algorithms) return ring.dmp_inner_gcd(f, g) def _gcd_ZZ(f, g): return heugcd(f, g) def _gcd_QQ(self, g): f = self ring = f.ring new_ring = ring.clone(domain=ring.domain.get_ring()) cf, f = f.clear_denoms() cg, g = g.clear_denoms() f = f.set_ring(new_ring) g = g.set_ring(new_ring) h, cff, cfg = f._gcd_ZZ(g) h = h.set_ring(ring) c, h = h.LC, h.monic() cff = cff.set_ring(ring).mul_ground(ring.domain.quo(c, cf)) cfg = cfg.set_ring(ring).mul_ground(ring.domain.quo(c, cg)) return h, cff, cfg def cancel(self, g): """ Cancel common factors in a rational function ``f/g``. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> (2*x**2 - 2).cancel(x**2 - 2*x + 1) (2*x + 2, x - 1) """ f = self ring = f.ring if not f: return f, ring.one domain = ring.domain if not (domain.is_Field and domain.has_assoc_Ring): _, p, q = f.cofactors(g) if q.is_negative: p, q = -p, -q else: new_ring = ring.clone(domain=domain.get_ring()) cq, f = f.clear_denoms() cp, g = g.clear_denoms() f = f.set_ring(new_ring) g = g.set_ring(new_ring) _, p, q = f.cofactors(g) _, cp, cq = new_ring.domain.cofactors(cp, cq) p = p.set_ring(ring) q = q.set_ring(ring) p_neg = p.is_negative q_neg = q.is_negative if p_neg and q_neg: p, q = -p, -q elif p_neg: cp, p = -cp, -p elif q_neg: cp, q = -cp, -q p = p.mul_ground(cp) q = q.mul_ground(cq) return p, q def diff(f, x): """Computes partial derivative in ``x``. Examples ======== >>> from sympy.polys.rings import ring >>> from sympy.polys.domains import ZZ >>> _, x, y = ring("x,y", ZZ) >>> p = x + x**2*y**3 >>> p.diff(x) 2*x*y**3 + 1 """ ring = f.ring i = ring.index(x) m = ring.monomial_basis(i) g = ring.zero for expv, coeff in f.iterterms(): if expv[i]: e = ring.monomial_ldiv(expv, m) g[e] = ring.domain_new(coeff*expv[i]) return g def __call__(f, *values): if 0 < len(values) <= f.ring.ngens: return f.evaluate(list(zip(f.ring.gens, values))) else: raise ValueError("expected at least 1 and at most %s values, got %s" % (f.ring.ngens, len(values))) def evaluate(self, x, a=None): f = self if isinstance(x, list) and a is None: (X, a), x = x[0], x[1:] f = f.evaluate(X, a) if not x: return f else: x = [ (Y.drop(X), a) for (Y, a) in x ] return f.evaluate(x) ring = f.ring i = ring.index(x) a = ring.domain.convert(a) if ring.ngens == 1: result = ring.domain.zero for (n,), coeff in f.iterterms(): result += coeff*a**n return result else: poly = ring.drop(x).zero for monom, coeff in f.iterterms(): n, monom = monom[i], monom[:i] + monom[i+1:] coeff = coeff*a**n if monom in poly: coeff = coeff + poly[monom] if coeff: poly[monom] = coeff else: del poly[monom] else: if coeff: poly[monom] = coeff return poly def subs(self, x, a=None): f = self if isinstance(x, list) and a is None: for X, a in x: f = f.subs(X, a) return f ring = f.ring i = ring.index(x) a = ring.domain.convert(a) if ring.ngens == 1: result = ring.domain.zero for (n,), coeff in f.iterterms(): result += coeff*a**n return ring.ground_new(result) else: poly = ring.zero for monom, coeff in f.iterterms(): n, monom = monom[i], monom[:i] + (0,) + monom[i+1:] coeff = coeff*a**n if monom in poly: coeff = coeff + poly[monom] if coeff: poly[monom] = coeff else: del poly[monom] else: if coeff: poly[monom] = coeff return poly def compose(f, x, a=None): ring = f.ring poly = ring.zero gens_map = dict(list(zip(ring.gens, list(range(ring.ngens))))) if a is not None: replacements = [(x, a)] else: if isinstance(x, list): replacements = list(x) elif isinstance(x, dict): replacements = sorted(list(x.items()), key=lambda k: gens_map[k[0]]) else: raise ValueError("expected a generator, value pair a sequence of such pairs") for k, (x, g) in enumerate(replacements): replacements[k] = (gens_map[x], ring.ring_new(g)) for monom, coeff in f.iterterms(): monom = list(monom) subpoly = ring.one for i, g in replacements: n, monom[i] = monom[i], 0 if n: subpoly *= g**n subpoly = subpoly.mul_term((tuple(monom), coeff)) poly += subpoly return poly # TODO: following methods should point to polynomial # representation independent algorithm implementations. def pdiv(f, g): return f.ring.dmp_pdiv(f, g) def prem(f, g): return f.ring.dmp_prem(f, g) def pquo(f, g): return f.ring.dmp_quo(f, g) def pexquo(f, g): return f.ring.dmp_exquo(f, g) def half_gcdex(f, g): return f.ring.dmp_half_gcdex(f, g) def gcdex(f, g): return f.ring.dmp_gcdex(f, g) def subresultants(f, g): return f.ring.dmp_subresultants(f, g) def resultant(f, g): return f.ring.dmp_resultant(f, g) def discriminant(f): return f.ring.dmp_discriminant(f) def decompose(f): if f.ring.is_univariate: return f.ring.dup_decompose(f) else: raise MultivariatePolynomialError("polynomial decomposition") def shift(f, a): if f.ring.is_univariate: return f.ring.dup_shift(f, a) else: raise MultivariatePolynomialError("polynomial shift") def sturm(f): if f.ring.is_univariate: return f.ring.dup_sturm(f) else: raise MultivariatePolynomialError("sturm sequence") def gff_list(f): return f.ring.dmp_gff_list(f) def sqf_norm(f): return f.ring.dmp_sqf_norm(f) def sqf_part(f): return f.ring.dmp_sqf_part(f) def sqf_list(f, all=False): return f.ring.dmp_sqf_list(f, all=all) def factor_list(f): return f.ring.dmp_factor_list(f)
34f8c08c33533a5b08e3605cf0cf1d9d0c892722b02fef436dd0015d7b12b4c0
"""Options manager for :class:`~.Poly` and public API functions. """ __all__ = ["Options"] from typing import Dict, Type from typing import List, Optional from sympy.core import Basic, sympify from sympy.polys.polyerrors import GeneratorsError, OptionError, FlagError from sympy.utilities import numbered_symbols, topological_sort, public from sympy.utilities.iterables import has_dups from sympy.core.compatibility import is_sequence import sympy.polys import re class Option: """Base class for all kinds of options. """ option = None # type: Optional[str] is_Flag = False requires = [] # type: List[str] excludes = [] # type: List[str] after = [] # type: List[str] before = [] # type: List[str] @classmethod def default(cls): return None @classmethod def preprocess(cls, option): return None @classmethod def postprocess(cls, options): pass class Flag(Option): """Base class for all kinds of flags. """ is_Flag = True class BooleanOption(Option): """An option that must have a boolean value or equivalent assigned. """ @classmethod def preprocess(cls, value): if value in [True, False]: return bool(value) else: raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value)) class OptionType(type): """Base type for all options that does registers options. """ def __init__(cls, *args, **kwargs): @property def getter(self): try: return self[cls.option] except KeyError: return cls.default() setattr(Options, cls.option, getter) Options.__options__[cls.option] = cls @public class Options(dict): """ Options manager for polynomial manipulation module. Examples ======== >>> from sympy.polys.polyoptions import Options >>> from sympy.polys.polyoptions import build_options >>> from sympy.abc import x, y, z >>> Options((x, y, z), {'domain': 'ZZ'}) {'auto': False, 'domain': ZZ, 'gens': (x, y, z)} >>> build_options((x, y, z), {'domain': 'ZZ'}) {'auto': False, 'domain': ZZ, 'gens': (x, y, z)} **Options** * Expand --- boolean option * Gens --- option * Wrt --- option * Sort --- option * Order --- option * Field --- boolean option * Greedy --- boolean option * Domain --- option * Split --- boolean option * Gaussian --- boolean option * Extension --- option * Modulus --- option * Symmetric --- boolean option * Strict --- boolean option **Flags** * Auto --- boolean flag * Frac --- boolean flag * Formal --- boolean flag * Polys --- boolean flag * Include --- boolean flag * All --- boolean flag * Gen --- flag * Series --- boolean flag """ __order__ = None __options__ = {} # type: Dict[str, Type[Option]] def __init__(self, gens, args, flags=None, strict=False): dict.__init__(self) if gens and args.get('gens', ()): raise OptionError( "both '*gens' and keyword argument 'gens' supplied") elif gens: args = dict(args) args['gens'] = gens defaults = args.pop('defaults', {}) def preprocess_options(args): for option, value in args.items(): try: cls = self.__options__[option] except KeyError: raise OptionError("'%s' is not a valid option" % option) if issubclass(cls, Flag): if flags is None or option not in flags: if strict: raise OptionError("'%s' flag is not allowed in this context" % option) if value is not None: self[option] = cls.preprocess(value) preprocess_options(args) for key, value in dict(defaults).items(): if key in self: del defaults[key] else: for option in self.keys(): cls = self.__options__[option] if key in cls.excludes: del defaults[key] break preprocess_options(defaults) for option in self.keys(): cls = self.__options__[option] for require_option in cls.requires: if self.get(require_option) is None: raise OptionError("'%s' option is only allowed together with '%s'" % (option, require_option)) for exclude_option in cls.excludes: if self.get(exclude_option) is not None: raise OptionError("'%s' option is not allowed together with '%s'" % (option, exclude_option)) for option in self.__order__: self.__options__[option].postprocess(self) @classmethod def _init_dependencies_order(cls): """Resolve the order of options' processing. """ if cls.__order__ is None: vertices, edges = [], set() for name, option in cls.__options__.items(): vertices.append(name) for _name in option.after: edges.add((_name, name)) for _name in option.before: edges.add((name, _name)) try: cls.__order__ = topological_sort((vertices, list(edges))) except ValueError: raise RuntimeError( "cycle detected in sympy.polys options framework") def clone(self, updates={}): """Clone ``self`` and update specified options. """ obj = dict.__new__(self.__class__) for option, value in self.items(): obj[option] = value for option, value in updates.items(): obj[option] = value return obj def __setattr__(self, attr, value): if attr in self.__options__: self[attr] = value else: super().__setattr__(attr, value) @property def args(self): args = {} for option, value in self.items(): if value is not None and option != 'gens': cls = self.__options__[option] if not issubclass(cls, Flag): args[option] = value return args @property def options(self): options = {} for option, cls in self.__options__.items(): if not issubclass(cls, Flag): options[option] = getattr(self, option) return options @property def flags(self): flags = {} for option, cls in self.__options__.items(): if issubclass(cls, Flag): flags[option] = getattr(self, option) return flags class Expand(BooleanOption, metaclass=OptionType): """``expand`` option to polynomial manipulation functions. """ option = 'expand' requires = [] # type: List[str] excludes = [] # type: List[str] @classmethod def default(cls): return True class Gens(Option, metaclass=OptionType): """``gens`` option to polynomial manipulation functions. """ option = 'gens' requires = [] # type: List[str] excludes = [] # type: List[str] @classmethod def default(cls): return () @classmethod def preprocess(cls, gens): if isinstance(gens, Basic): gens = (gens,) elif len(gens) == 1 and is_sequence(gens[0]): gens = gens[0] if gens == (None,): gens = () elif has_dups(gens): raise GeneratorsError("duplicated generators: %s" % str(gens)) elif any(gen.is_commutative is False for gen in gens): raise GeneratorsError("non-commutative generators: %s" % str(gens)) return tuple(gens) class Wrt(Option, metaclass=OptionType): """``wrt`` option to polynomial manipulation functions. """ option = 'wrt' requires = [] # type: List[str] excludes = [] # type: List[str] _re_split = re.compile(r"\s*,\s*|\s+") @classmethod def preprocess(cls, wrt): if isinstance(wrt, Basic): return [str(wrt)] elif isinstance(wrt, str): wrt = wrt.strip() if wrt.endswith(','): raise OptionError('Bad input: missing parameter.') if not wrt: return [] return [ gen for gen in cls._re_split.split(wrt) ] elif hasattr(wrt, '__getitem__'): return list(map(str, wrt)) else: raise OptionError("invalid argument for 'wrt' option") class Sort(Option, metaclass=OptionType): """``sort`` option to polynomial manipulation functions. """ option = 'sort' requires = [] # type: List[str] excludes = [] # type: List[str] @classmethod def default(cls): return [] @classmethod def preprocess(cls, sort): if isinstance(sort, str): return [ gen.strip() for gen in sort.split('>') ] elif hasattr(sort, '__getitem__'): return list(map(str, sort)) else: raise OptionError("invalid argument for 'sort' option") class Order(Option, metaclass=OptionType): """``order`` option to polynomial manipulation functions. """ option = 'order' requires = [] # type: List[str] excludes = [] # type: List[str] @classmethod def default(cls): return sympy.polys.orderings.lex @classmethod def preprocess(cls, order): return sympy.polys.orderings.monomial_key(order) class Field(BooleanOption, metaclass=OptionType): """``field`` option to polynomial manipulation functions. """ option = 'field' requires = [] # type: List[str] excludes = ['domain', 'split', 'gaussian'] class Greedy(BooleanOption, metaclass=OptionType): """``greedy`` option to polynomial manipulation functions. """ option = 'greedy' requires = [] # type: List[str] excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric'] class Composite(BooleanOption, metaclass=OptionType): """``composite`` option to polynomial manipulation functions. """ option = 'composite' @classmethod def default(cls): return None requires = [] # type: List[str] excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric'] class Domain(Option, metaclass=OptionType): """``domain`` option to polynomial manipulation functions. """ option = 'domain' requires = [] # type: List[str] excludes = ['field', 'greedy', 'split', 'gaussian', 'extension'] after = ['gens'] _re_realfield = re.compile(r"^(R|RR)(_(\d+))?$") _re_complexfield = re.compile(r"^(C|CC)(_(\d+))?$") _re_finitefield = re.compile(r"^(FF|GF)\((\d+)\)$") _re_polynomial = re.compile(r"^(Z|ZZ|Q|QQ|ZZ_I|QQ_I|R|RR|C|CC)\[(.+)\]$") _re_fraction = re.compile(r"^(Z|ZZ|Q|QQ)\((.+)\)$") _re_algebraic = re.compile(r"^(Q|QQ)\<(.+)\>$") @classmethod def preprocess(cls, domain): if isinstance(domain, sympy.polys.domains.Domain): return domain elif hasattr(domain, 'to_domain'): return domain.to_domain() elif isinstance(domain, str): if domain in ['Z', 'ZZ']: return sympy.polys.domains.ZZ if domain in ['Q', 'QQ']: return sympy.polys.domains.QQ if domain == 'ZZ_I': return sympy.polys.domains.ZZ_I if domain == 'QQ_I': return sympy.polys.domains.QQ_I if domain == 'EX': return sympy.polys.domains.EX r = cls._re_realfield.match(domain) if r is not None: _, _, prec = r.groups() if prec is None: return sympy.polys.domains.RR else: return sympy.polys.domains.RealField(int(prec)) r = cls._re_complexfield.match(domain) if r is not None: _, _, prec = r.groups() if prec is None: return sympy.polys.domains.CC else: return sympy.polys.domains.ComplexField(int(prec)) r = cls._re_finitefield.match(domain) if r is not None: return sympy.polys.domains.FF(int(r.groups()[1])) r = cls._re_polynomial.match(domain) if r is not None: ground, gens = r.groups() gens = list(map(sympify, gens.split(','))) if ground in ['Z', 'ZZ']: return sympy.polys.domains.ZZ.poly_ring(*gens) elif ground in ['Q', 'QQ']: return sympy.polys.domains.QQ.poly_ring(*gens) elif ground in ['R', 'RR']: return sympy.polys.domains.RR.poly_ring(*gens) elif ground == 'ZZ_I': return sympy.polys.domains.ZZ_I.poly_ring(*gens) elif ground == 'QQ_I': return sympy.polys.domains.QQ_I.poly_ring(*gens) else: return sympy.polys.domains.CC.poly_ring(*gens) r = cls._re_fraction.match(domain) if r is not None: ground, gens = r.groups() gens = list(map(sympify, gens.split(','))) if ground in ['Z', 'ZZ']: return sympy.polys.domains.ZZ.frac_field(*gens) else: return sympy.polys.domains.QQ.frac_field(*gens) r = cls._re_algebraic.match(domain) if r is not None: gens = list(map(sympify, r.groups()[1].split(','))) return sympy.polys.domains.QQ.algebraic_field(*gens) raise OptionError('expected a valid domain specification, got %s' % domain) @classmethod def postprocess(cls, options): if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \ (set(options['domain'].symbols) & set(options['gens'])): raise GeneratorsError( "ground domain and generators interfere together") elif ('gens' not in options or not options['gens']) and \ 'domain' in options and options['domain'] == sympy.polys.domains.EX: raise GeneratorsError("you have to provide generators because EX domain was requested") class Split(BooleanOption, metaclass=OptionType): """``split`` option to polynomial manipulation functions. """ option = 'split' requires = [] # type: List[str] excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension', 'modulus', 'symmetric'] @classmethod def postprocess(cls, options): if 'split' in options: raise NotImplementedError("'split' option is not implemented yet") class Gaussian(BooleanOption, metaclass=OptionType): """``gaussian`` option to polynomial manipulation functions. """ option = 'gaussian' requires = [] # type: List[str] excludes = ['field', 'greedy', 'domain', 'split', 'extension', 'modulus', 'symmetric'] @classmethod def postprocess(cls, options): if 'gaussian' in options and options['gaussian'] is True: options['domain'] = sympy.polys.domains.QQ_I Extension.postprocess(options) class Extension(Option, metaclass=OptionType): """``extension`` option to polynomial manipulation functions. """ option = 'extension' requires = [] # type: List[str] excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus', 'symmetric'] @classmethod def preprocess(cls, extension): if extension == 1: return bool(extension) elif extension == 0: raise OptionError("'False' is an invalid argument for 'extension'") else: if not hasattr(extension, '__iter__'): extension = {extension} else: if not extension: extension = None else: extension = set(extension) return extension @classmethod def postprocess(cls, options): if 'extension' in options and options['extension'] is not True: options['domain'] = sympy.polys.domains.QQ.algebraic_field( *options['extension']) class Modulus(Option, metaclass=OptionType): """``modulus`` option to polynomial manipulation functions. """ option = 'modulus' requires = [] # type: List[str] excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension'] @classmethod def preprocess(cls, modulus): modulus = sympify(modulus) if modulus.is_Integer and modulus > 0: return int(modulus) else: raise OptionError( "'modulus' must a positive integer, got %s" % modulus) @classmethod def postprocess(cls, options): if 'modulus' in options: modulus = options['modulus'] symmetric = options.get('symmetric', True) options['domain'] = sympy.polys.domains.FF(modulus, symmetric) class Symmetric(BooleanOption, metaclass=OptionType): """``symmetric`` option to polynomial manipulation functions. """ option = 'symmetric' requires = ['modulus'] excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension'] class Strict(BooleanOption, metaclass=OptionType): """``strict`` option to polynomial manipulation functions. """ option = 'strict' @classmethod def default(cls): return True class Auto(BooleanOption, Flag, metaclass=OptionType): """``auto`` flag to polynomial manipulation functions. """ option = 'auto' after = ['field', 'domain', 'extension', 'gaussian'] @classmethod def default(cls): return True @classmethod def postprocess(cls, options): if ('domain' in options or 'field' in options) and 'auto' not in options: options['auto'] = False class Frac(BooleanOption, Flag, metaclass=OptionType): """``auto`` option to polynomial manipulation functions. """ option = 'frac' @classmethod def default(cls): return False class Formal(BooleanOption, Flag, metaclass=OptionType): """``formal`` flag to polynomial manipulation functions. """ option = 'formal' @classmethod def default(cls): return False class Polys(BooleanOption, Flag, metaclass=OptionType): """``polys`` flag to polynomial manipulation functions. """ option = 'polys' class Include(BooleanOption, Flag, metaclass=OptionType): """``include`` flag to polynomial manipulation functions. """ option = 'include' @classmethod def default(cls): return False class All(BooleanOption, Flag, metaclass=OptionType): """``all`` flag to polynomial manipulation functions. """ option = 'all' @classmethod def default(cls): return False class Gen(Flag, metaclass=OptionType): """``gen`` flag to polynomial manipulation functions. """ option = 'gen' @classmethod def default(cls): return 0 @classmethod def preprocess(cls, gen): if isinstance(gen, (Basic, int)): return gen else: raise OptionError("invalid argument for 'gen' option") class Series(BooleanOption, Flag, metaclass=OptionType): """``series`` flag to polynomial manipulation functions. """ option = 'series' @classmethod def default(cls): return False class Symbols(Flag, metaclass=OptionType): """``symbols`` flag to polynomial manipulation functions. """ option = 'symbols' @classmethod def default(cls): return numbered_symbols('s', start=1) @classmethod def preprocess(cls, symbols): if hasattr(symbols, '__iter__'): return iter(symbols) else: raise OptionError("expected an iterator or iterable container, got %s" % symbols) class Method(Flag, metaclass=OptionType): """``method`` flag to polynomial manipulation functions. """ option = 'method' @classmethod def preprocess(cls, method): if isinstance(method, str): return method.lower() else: raise OptionError("expected a string, got %s" % method) def build_options(gens, args=None): """Construct options from keyword arguments or ... options. """ if args is None: gens, args = (), gens if len(args) != 1 or 'opt' not in args or gens: return Options(gens, args) else: return args['opt'] def allowed_flags(args, flags): """ Allow specified flags to be used in the given context. Examples ======== >>> from sympy.polys.polyoptions import allowed_flags >>> from sympy.polys.domains import ZZ >>> allowed_flags({'domain': ZZ}, []) >>> allowed_flags({'domain': ZZ, 'frac': True}, []) Traceback (most recent call last): ... FlagError: 'frac' flag is not allowed in this context >>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac']) """ flags = set(flags) for arg in args.keys(): try: if Options.__options__[arg].is_Flag and not arg in flags: raise FlagError( "'%s' flag is not allowed in this context" % arg) except KeyError: raise OptionError("'%s' is not a valid option" % arg) def set_defaults(options, **defaults): """Update options with default values. """ if 'defaults' not in options: options = dict(options) options['defaults'] = defaults return options Options._init_dependencies_order()
7fc1877a09fc56276180ac90bcfe82e62b642812603a479ec39c53f07fbe97d4
"""Tools and arithmetics for monomials of distributed polynomials. """ from itertools import combinations_with_replacement, product from textwrap import dedent from sympy.core import Mul, S, Tuple, sympify from sympy.core.compatibility import iterable from sympy.polys.polyerrors import ExactQuotientFailed from sympy.polys.polyutils import PicklableWithSlots, dict_from_expr from sympy.utilities import public from sympy.core.compatibility import is_sequence @public def itermonomials(variables, max_degrees, min_degrees=None): r""" `max_degrees` and `min_degrees` are either both integers or both lists. Unless otherwise specified, `min_degrees` is either 0 or [0,...,0]. A generator of all monomials `monom` is returned, such that either min_degree <= total_degree(monom) <= max_degree, or min_degrees[i] <= degree_list(monom)[i] <= max_degrees[i], for all i. Case I:: `max_degrees` and `min_degrees` are both integers. =========================================================== Given a set of variables `V` and a min_degree `N` and a max_degree `M` generate a set of monomials of degree less than or equal to `N` and greater than or equal to `M`. The total number of monomials in commutative variables is huge and is given by the following formula if `M = 0`: .. math:: \frac{(\#V + N)!}{\#V! N!} For example if we would like to generate a dense polynomial of a total degree `N = 50` and `M = 0`, which is the worst case, in 5 variables, assuming that exponents and all of coefficients are 32-bit long and stored in an array we would need almost 80 GiB of memory! Fortunately most polynomials, that we will encounter, are sparse. Examples ======== Consider monomials in commutative variables `x` and `y` and non-commutative variables `a` and `b`:: >>> from sympy import symbols >>> from sympy.polys.monomials import itermonomials >>> from sympy.polys.orderings import monomial_key >>> from sympy.abc import x, y >>> sorted(itermonomials([x, y], 2), key=monomial_key('grlex', [y, x])) [1, x, y, x**2, x*y, y**2] >>> sorted(itermonomials([x, y], 3), key=monomial_key('grlex', [y, x])) [1, x, y, x**2, x*y, y**2, x**3, x**2*y, x*y**2, y**3] >>> a, b = symbols('a, b', commutative=False) >>> set(itermonomials([a, b, x], 2)) {1, a, a**2, b, b**2, x, x**2, a*b, b*a, x*a, x*b} >>> sorted(itermonomials([x, y], 2, 1), key=monomial_key('grlex', [y, x])) [x, y, x**2, x*y, y**2] Case II:: `max_degrees` and `min_degrees` are both lists. ========================================================= If max_degrees = [d_1, ..., d_n] and min_degrees = [e_1, ..., e_n], the number of monomials generated is: (d_1 - e_1 + 1) * ... * (d_n - e_n + 1) Example ======= Let us generate all monomials `monom` in variables `x`, and `y` such that [1, 2][i] <= degree_list(monom)[i] <= [2, 4][i], i = 0, 1 :: >>> from sympy import symbols >>> from sympy.polys.monomials import itermonomials >>> from sympy.polys.orderings import monomial_key >>> from sympy.abc import x, y >>> sorted(itermonomials([x, y], [2, 4], [1, 2]), reverse=True, key=monomial_key('lex', [x, y])) [x**2*y**4, x**2*y**3, x**2*y**2, x*y**4, x*y**3, x*y**2] """ n = len(variables) if is_sequence(max_degrees): if len(max_degrees) != n: raise ValueError('Argument sizes do not match') if min_degrees is None: min_degrees = [0]*n elif not is_sequence(min_degrees): raise ValueError('min_degrees is not a list') else: if len(min_degrees) != n: raise ValueError('Argument sizes do not match') if any(i < 0 for i in min_degrees): raise ValueError("min_degrees can't contain negative numbers") total_degree = False else: max_degree = max_degrees if max_degree < 0: raise ValueError("max_degrees can't be negative") if min_degrees is None: min_degree = 0 else: if min_degrees < 0: raise ValueError("min_degrees can't be negative") min_degree = min_degrees total_degree = True if total_degree: if min_degree > max_degree: return if not variables or max_degree == 0: yield S.One return # Force to list in case of passed tuple or other incompatible collection variables = list(variables) + [S.One] if all(variable.is_commutative for variable in variables): monomials_list_comm = [] for item in combinations_with_replacement(variables, max_degree): powers = dict() for variable in variables: powers[variable] = 0 for variable in item: if variable != 1: powers[variable] += 1 if max(powers.values()) >= min_degree: monomials_list_comm.append(Mul(*item)) yield from set(monomials_list_comm) else: monomials_list_non_comm = [] for item in product(variables, repeat=max_degree): powers = dict() for variable in variables: powers[variable] = 0 for variable in item: if variable != 1: powers[variable] += 1 if max(powers.values()) >= min_degree: monomials_list_non_comm.append(Mul(*item)) yield from set(monomials_list_non_comm) else: if any(min_degrees[i] > max_degrees[i] for i in range(n)): raise ValueError('min_degrees[i] must be <= max_degrees[i] for all i') power_lists = [] for var, min_d, max_d in zip(variables, min_degrees, max_degrees): power_lists.append([var**i for i in range(min_d, max_d + 1)]) for powers in product(*power_lists): yield Mul(*powers) def monomial_count(V, N): r""" Computes the number of monomials. The number of monomials is given by the following formula: .. math:: \frac{(\#V + N)!}{\#V! N!} where `N` is a total degree and `V` is a set of variables. Examples ======== >>> from sympy.polys.monomials import itermonomials, monomial_count >>> from sympy.polys.orderings import monomial_key >>> from sympy.abc import x, y >>> monomial_count(2, 2) 6 >>> M = list(itermonomials([x, y], 2)) >>> sorted(M, key=monomial_key('grlex', [y, x])) [1, x, y, x**2, x*y, y**2] >>> len(M) 6 """ from sympy import factorial return factorial(V + N) / factorial(V) / factorial(N) def monomial_mul(A, B): """ Multiplication of tuples representing monomials. Examples ======== Lets multiply `x**3*y**4*z` with `x*y**2`:: >>> from sympy.polys.monomials import monomial_mul >>> monomial_mul((3, 4, 1), (1, 2, 0)) (4, 6, 1) which gives `x**4*y**5*z`. """ return tuple([ a + b for a, b in zip(A, B) ]) def monomial_div(A, B): """ Division of tuples representing monomials. Examples ======== Lets divide `x**3*y**4*z` by `x*y**2`:: >>> from sympy.polys.monomials import monomial_div >>> monomial_div((3, 4, 1), (1, 2, 0)) (2, 2, 1) which gives `x**2*y**2*z`. However:: >>> monomial_div((3, 4, 1), (1, 2, 2)) is None True `x*y**2*z**2` does not divide `x**3*y**4*z`. """ C = monomial_ldiv(A, B) if all(c >= 0 for c in C): return tuple(C) else: return None def monomial_ldiv(A, B): """ Division of tuples representing monomials. Examples ======== Lets divide `x**3*y**4*z` by `x*y**2`:: >>> from sympy.polys.monomials import monomial_ldiv >>> monomial_ldiv((3, 4, 1), (1, 2, 0)) (2, 2, 1) which gives `x**2*y**2*z`. >>> monomial_ldiv((3, 4, 1), (1, 2, 2)) (2, 2, -1) which gives `x**2*y**2*z**-1`. """ return tuple([ a - b for a, b in zip(A, B) ]) def monomial_pow(A, n): """Return the n-th pow of the monomial. """ return tuple([ a*n for a in A ]) def monomial_gcd(A, B): """ Greatest common divisor of tuples representing monomials. Examples ======== Lets compute GCD of `x*y**4*z` and `x**3*y**2`:: >>> from sympy.polys.monomials import monomial_gcd >>> monomial_gcd((1, 4, 1), (3, 2, 0)) (1, 2, 0) which gives `x*y**2`. """ return tuple([ min(a, b) for a, b in zip(A, B) ]) def monomial_lcm(A, B): """ Least common multiple of tuples representing monomials. Examples ======== Lets compute LCM of `x*y**4*z` and `x**3*y**2`:: >>> from sympy.polys.monomials import monomial_lcm >>> monomial_lcm((1, 4, 1), (3, 2, 0)) (3, 4, 1) which gives `x**3*y**4*z`. """ return tuple([ max(a, b) for a, b in zip(A, B) ]) def monomial_divides(A, B): """ Does there exist a monomial X such that XA == B? Examples ======== >>> from sympy.polys.monomials import monomial_divides >>> monomial_divides((1, 2), (3, 4)) True >>> monomial_divides((1, 2), (0, 2)) False """ return all(a <= b for a, b in zip(A, B)) def monomial_max(*monoms): """ Returns maximal degree for each variable in a set of monomials. Examples ======== Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`. We wish to find out what is the maximal degree for each of `x`, `y` and `z` variables:: >>> from sympy.polys.monomials import monomial_max >>> monomial_max((3,4,5), (0,5,1), (6,3,9)) (6, 5, 9) """ M = list(monoms[0]) for N in monoms[1:]: for i, n in enumerate(N): M[i] = max(M[i], n) return tuple(M) def monomial_min(*monoms): """ Returns minimal degree for each variable in a set of monomials. Examples ======== Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`. We wish to find out what is the minimal degree for each of `x`, `y` and `z` variables:: >>> from sympy.polys.monomials import monomial_min >>> monomial_min((3,4,5), (0,5,1), (6,3,9)) (0, 3, 1) """ M = list(monoms[0]) for N in monoms[1:]: for i, n in enumerate(N): M[i] = min(M[i], n) return tuple(M) def monomial_deg(M): """ Returns the total degree of a monomial. Examples ======== The total degree of `xy^2` is 3: >>> from sympy.polys.monomials import monomial_deg >>> monomial_deg((1, 2)) 3 """ return sum(M) def term_div(a, b, domain): """Division of two terms in over a ring/field. """ a_lm, a_lc = a b_lm, b_lc = b monom = monomial_div(a_lm, b_lm) if domain.is_Field: if monom is not None: return monom, domain.quo(a_lc, b_lc) else: return None else: if not (monom is None or a_lc % b_lc): return monom, domain.quo(a_lc, b_lc) else: return None class MonomialOps: """Code generator of fast monomial arithmetic functions. """ def __init__(self, ngens): self.ngens = ngens def _build(self, code, name): ns = {} exec(code, ns) return ns[name] def _vars(self, name): return [ "%s%s" % (name, i) for i in range(self.ngens) ] def mul(self): name = "monomial_mul" template = dedent("""\ def %(name)s(A, B): (%(A)s,) = A (%(B)s,) = B return (%(AB)s,) """) A = self._vars("a") B = self._vars("b") AB = [ "%s + %s" % (a, b) for a, b in zip(A, B) ] code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB)) return self._build(code, name) def pow(self): name = "monomial_pow" template = dedent("""\ def %(name)s(A, k): (%(A)s,) = A return (%(Ak)s,) """) A = self._vars("a") Ak = [ "%s*k" % a for a in A ] code = template % dict(name=name, A=", ".join(A), Ak=", ".join(Ak)) return self._build(code, name) def mulpow(self): name = "monomial_mulpow" template = dedent("""\ def %(name)s(A, B, k): (%(A)s,) = A (%(B)s,) = B return (%(ABk)s,) """) A = self._vars("a") B = self._vars("b") ABk = [ "%s + %s*k" % (a, b) for a, b in zip(A, B) ] code = template % dict(name=name, A=", ".join(A), B=", ".join(B), ABk=", ".join(ABk)) return self._build(code, name) def ldiv(self): name = "monomial_ldiv" template = dedent("""\ def %(name)s(A, B): (%(A)s,) = A (%(B)s,) = B return (%(AB)s,) """) A = self._vars("a") B = self._vars("b") AB = [ "%s - %s" % (a, b) for a, b in zip(A, B) ] code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB)) return self._build(code, name) def div(self): name = "monomial_div" template = dedent("""\ def %(name)s(A, B): (%(A)s,) = A (%(B)s,) = B %(RAB)s return (%(R)s,) """) A = self._vars("a") B = self._vars("b") RAB = [ "r%(i)s = a%(i)s - b%(i)s\n if r%(i)s < 0: return None" % dict(i=i) for i in range(self.ngens) ] R = self._vars("r") code = template % dict(name=name, A=", ".join(A), B=", ".join(B), RAB="\n ".join(RAB), R=", ".join(R)) return self._build(code, name) def lcm(self): name = "monomial_lcm" template = dedent("""\ def %(name)s(A, B): (%(A)s,) = A (%(B)s,) = B return (%(AB)s,) """) A = self._vars("a") B = self._vars("b") AB = [ "%s if %s >= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ] code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB)) return self._build(code, name) def gcd(self): name = "monomial_gcd" template = dedent("""\ def %(name)s(A, B): (%(A)s,) = A (%(B)s,) = B return (%(AB)s,) """) A = self._vars("a") B = self._vars("b") AB = [ "%s if %s <= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ] code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB)) return self._build(code, name) @public class Monomial(PicklableWithSlots): """Class representing a monomial, i.e. a product of powers. """ __slots__ = ('exponents', 'gens') def __init__(self, monom, gens=None): if not iterable(monom): rep, gens = dict_from_expr(sympify(monom), gens=gens) if len(rep) == 1 and list(rep.values())[0] == 1: monom = list(rep.keys())[0] else: raise ValueError("Expected a monomial got {}".format(monom)) self.exponents = tuple(map(int, monom)) self.gens = gens def rebuild(self, exponents, gens=None): return self.__class__(exponents, gens or self.gens) def __len__(self): return len(self.exponents) def __iter__(self): return iter(self.exponents) def __getitem__(self, item): return self.exponents[item] def __hash__(self): return hash((self.__class__.__name__, self.exponents, self.gens)) def __str__(self): if self.gens: return "*".join([ "%s**%s" % (gen, exp) for gen, exp in zip(self.gens, self.exponents) ]) else: return "%s(%s)" % (self.__class__.__name__, self.exponents) def as_expr(self, *gens): """Convert a monomial instance to a SymPy expression. """ gens = gens or self.gens if not gens: raise ValueError( "can't convert %s to an expression without generators" % self) return Mul(*[ gen**exp for gen, exp in zip(gens, self.exponents) ]) def __eq__(self, other): if isinstance(other, Monomial): exponents = other.exponents elif isinstance(other, (tuple, Tuple)): exponents = other else: return False return self.exponents == exponents def __ne__(self, other): return not self == other def __mul__(self, other): if isinstance(other, Monomial): exponents = other.exponents elif isinstance(other, (tuple, Tuple)): exponents = other else: raise NotImplementedError return self.rebuild(monomial_mul(self.exponents, exponents)) def __truediv__(self, other): if isinstance(other, Monomial): exponents = other.exponents elif isinstance(other, (tuple, Tuple)): exponents = other else: raise NotImplementedError result = monomial_div(self.exponents, exponents) if result is not None: return self.rebuild(result) else: raise ExactQuotientFailed(self, Monomial(other)) __floordiv__ = __truediv__ def __pow__(self, other): n = int(other) if not n: return self.rebuild([0]*len(self)) elif n > 0: exponents = self.exponents for i in range(1, n): exponents = monomial_mul(exponents, self.exponents) return self.rebuild(exponents) else: raise ValueError("a non-negative integer expected, got %s" % other) def gcd(self, other): """Greatest common divisor of monomials. """ if isinstance(other, Monomial): exponents = other.exponents elif isinstance(other, (tuple, Tuple)): exponents = other else: raise TypeError( "an instance of Monomial class expected, got %s" % other) return self.rebuild(monomial_gcd(self.exponents, exponents)) def lcm(self, other): """Least common multiple of monomials. """ if isinstance(other, Monomial): exponents = other.exponents elif isinstance(other, (tuple, Tuple)): exponents = other else: raise TypeError( "an instance of Monomial class expected, got %s" % other) return self.rebuild(monomial_lcm(self.exponents, exponents))
3f9984c24e53d0829ad80ce60ec1ec94891e3159a6bc15896462908411f1e223
"""Sparse rational function fields. """ from typing import Any, Dict from functools import reduce from operator import add, mul, lt, le, gt, ge from sympy.core.compatibility import is_sequence from sympy.core.expr import Expr from sympy.core.mod import Mod from sympy.core.numbers import Exp1 from sympy.core.singleton import S from sympy.core.symbol import Symbol from sympy.core.sympify import CantSympify, sympify from sympy.functions.elementary.exponential import ExpBase from sympy.polys.domains.domainelement import DomainElement from sympy.polys.domains.fractionfield import FractionField from sympy.polys.domains.polynomialring import PolynomialRing from sympy.polys.constructor import construct_domain from sympy.polys.orderings import lex from sympy.polys.polyerrors import CoercionFailed from sympy.polys.polyoptions import build_options from sympy.polys.polyutils import _parallel_dict_from_expr from sympy.polys.rings import PolyElement from sympy.printing.defaults import DefaultPrinting from sympy.utilities import public from sympy.utilities.magic import pollute @public def field(symbols, domain, order=lex): """Construct new rational function field returning (field, x1, ..., xn). """ _field = FracField(symbols, domain, order) return (_field,) + _field.gens @public def xfield(symbols, domain, order=lex): """Construct new rational function field returning (field, (x1, ..., xn)). """ _field = FracField(symbols, domain, order) return (_field, _field.gens) @public def vfield(symbols, domain, order=lex): """Construct new rational function field and inject generators into global namespace. """ _field = FracField(symbols, domain, order) pollute([ sym.name for sym in _field.symbols ], _field.gens) return _field @public def sfield(exprs, *symbols, **options): """Construct a field deriving generators and domain from options and input expressions. Parameters ========== exprs : :class:`Expr` or sequence of :class:`Expr` (sympifiable) symbols : sequence of :class:`Symbol`/:class:`Expr` options : keyword arguments understood by :class:`Options` Examples ======== >>> from sympy.core import symbols >>> from sympy.functions import exp, log >>> from sympy.polys.fields import sfield >>> x = symbols("x") >>> K, f = sfield((x*log(x) + 4*x**2)*exp(1/x + log(x)/3)/x**2) >>> K Rational function field in x, exp(1/x), log(x), x**(1/3) over ZZ with lex order >>> f (4*x**2*(exp(1/x)) + x*(exp(1/x))*(log(x)))/((x**(1/3))**5) """ single = False if not is_sequence(exprs): exprs, single = [exprs], True exprs = list(map(sympify, exprs)) opt = build_options(symbols, options) numdens = [] for expr in exprs: numdens.extend(expr.as_numer_denom()) reps, opt = _parallel_dict_from_expr(numdens, opt) if opt.domain is None: # NOTE: this is inefficient because construct_domain() automatically # performs conversion to the target domain. It shouldn't do this. coeffs = sum([list(rep.values()) for rep in reps], []) opt.domain, _ = construct_domain(coeffs, opt=opt) _field = FracField(opt.gens, opt.domain, opt.order) fracs = [] for i in range(0, len(reps), 2): fracs.append(_field(tuple(reps[i:i+2]))) if single: return (_field, fracs[0]) else: return (_field, fracs) _field_cache = {} # type: Dict[Any, Any] class FracField(DefaultPrinting): """Multivariate distributed rational function field. """ def __new__(cls, symbols, domain, order=lex): from sympy.polys.rings import PolyRing ring = PolyRing(symbols, domain, order) symbols = ring.symbols ngens = ring.ngens domain = ring.domain order = ring.order _hash_tuple = (cls.__name__, symbols, ngens, domain, order) obj = _field_cache.get(_hash_tuple) if obj is None: obj = object.__new__(cls) obj._hash_tuple = _hash_tuple obj._hash = hash(_hash_tuple) obj.ring = ring obj.dtype = type("FracElement", (FracElement,), {"field": obj}) obj.symbols = symbols obj.ngens = ngens obj.domain = domain obj.order = order obj.zero = obj.dtype(ring.zero) obj.one = obj.dtype(ring.one) obj.gens = obj._gens() for symbol, generator in zip(obj.symbols, obj.gens): if isinstance(symbol, Symbol): name = symbol.name if not hasattr(obj, name): setattr(obj, name, generator) _field_cache[_hash_tuple] = obj return obj def _gens(self): """Return a list of polynomial generators. """ return tuple([ self.dtype(gen) for gen in self.ring.gens ]) def __getnewargs__(self): return (self.symbols, self.domain, self.order) def __hash__(self): return self._hash def index(self, gen): if isinstance(gen, self.dtype): return self.ring.index(gen.to_poly()) else: raise ValueError("expected a %s, got %s instead" % (self.dtype,gen)) def __eq__(self, other): return isinstance(other, FracField) and \ (self.symbols, self.ngens, self.domain, self.order) == \ (other.symbols, other.ngens, other.domain, other.order) def __ne__(self, other): return not self == other def raw_new(self, numer, denom=None): return self.dtype(numer, denom) def new(self, numer, denom=None): if denom is None: denom = self.ring.one numer, denom = numer.cancel(denom) return self.raw_new(numer, denom) def domain_new(self, element): return self.domain.convert(element) def ground_new(self, element): try: return self.new(self.ring.ground_new(element)) except CoercionFailed: domain = self.domain if not domain.is_Field and domain.has_assoc_Field: ring = self.ring ground_field = domain.get_field() element = ground_field.convert(element) numer = ring.ground_new(ground_field.numer(element)) denom = ring.ground_new(ground_field.denom(element)) return self.raw_new(numer, denom) else: raise def field_new(self, element): if isinstance(element, FracElement): if self == element.field: return element if isinstance(self.domain, FractionField) and \ self.domain.field == element.field: return self.ground_new(element) elif isinstance(self.domain, PolynomialRing) and \ self.domain.ring.to_field() == element.field: return self.ground_new(element) else: raise NotImplementedError("conversion") elif isinstance(element, PolyElement): denom, numer = element.clear_denoms() if isinstance(self.domain, PolynomialRing) and \ numer.ring == self.domain.ring: numer = self.ring.ground_new(numer) elif isinstance(self.domain, FractionField) and \ numer.ring == self.domain.field.to_ring(): numer = self.ring.ground_new(numer) else: numer = numer.set_ring(self.ring) denom = self.ring.ground_new(denom) return self.raw_new(numer, denom) elif isinstance(element, tuple) and len(element) == 2: numer, denom = list(map(self.ring.ring_new, element)) return self.new(numer, denom) elif isinstance(element, str): raise NotImplementedError("parsing") elif isinstance(element, Expr): return self.from_expr(element) else: return self.ground_new(element) __call__ = field_new def _rebuild_expr(self, expr, mapping): domain = self.domain powers = tuple((gen, gen.as_base_exp()) for gen in mapping.keys() if gen.is_Pow or isinstance(gen, ExpBase)) def _rebuild(expr): generator = mapping.get(expr) if generator is not None: return generator elif expr.is_Add: return reduce(add, list(map(_rebuild, expr.args))) elif expr.is_Mul: return reduce(mul, list(map(_rebuild, expr.args))) elif expr.is_Pow or isinstance(expr, (ExpBase, Exp1)): b, e = expr.as_base_exp() # look for bg**eg whose integer power may be b**e for gen, (bg, eg) in powers: if bg == b and Mod(e, eg) == 0: return mapping.get(gen)**int(e/eg) if e.is_Integer and e is not S.One: return _rebuild(b)**int(e) try: return domain.convert(expr) except CoercionFailed: if not domain.is_Field and domain.has_assoc_Field: return domain.get_field().convert(expr) else: raise return _rebuild(sympify(expr)) def from_expr(self, expr): mapping = dict(list(zip(self.symbols, self.gens))) try: frac = self._rebuild_expr(expr, mapping) except CoercionFailed: raise ValueError("expected an expression convertible to a rational function in %s, got %s" % (self, expr)) else: return self.field_new(frac) def to_domain(self): return FractionField(self) def to_ring(self): from sympy.polys.rings import PolyRing return PolyRing(self.symbols, self.domain, self.order) class FracElement(DomainElement, DefaultPrinting, CantSympify): """Element of multivariate distributed rational function field. """ def __init__(self, numer, denom=None): if denom is None: denom = self.field.ring.one elif not denom: raise ZeroDivisionError("zero denominator") self.numer = numer self.denom = denom def raw_new(f, numer, denom): return f.__class__(numer, denom) def new(f, numer, denom): return f.raw_new(*numer.cancel(denom)) def to_poly(f): if f.denom != 1: raise ValueError("f.denom should be 1") return f.numer def parent(self): return self.field.to_domain() def __getnewargs__(self): return (self.field, self.numer, self.denom) _hash = None def __hash__(self): _hash = self._hash if _hash is None: self._hash = _hash = hash((self.field, self.numer, self.denom)) return _hash def copy(self): return self.raw_new(self.numer.copy(), self.denom.copy()) def set_field(self, new_field): if self.field == new_field: return self else: new_ring = new_field.ring numer = self.numer.set_ring(new_ring) denom = self.denom.set_ring(new_ring) return new_field.new(numer, denom) def as_expr(self, *symbols): return self.numer.as_expr(*symbols)/self.denom.as_expr(*symbols) def __eq__(f, g): if isinstance(g, FracElement) and f.field == g.field: return f.numer == g.numer and f.denom == g.denom else: return f.numer == g and f.denom == f.field.ring.one def __ne__(f, g): return not f == g def __bool__(f): return bool(f.numer) def sort_key(self): return (self.denom.sort_key(), self.numer.sort_key()) def _cmp(f1, f2, op): if isinstance(f2, f1.field.dtype): return op(f1.sort_key(), f2.sort_key()) else: return NotImplemented def __lt__(f1, f2): return f1._cmp(f2, lt) def __le__(f1, f2): return f1._cmp(f2, le) def __gt__(f1, f2): return f1._cmp(f2, gt) def __ge__(f1, f2): return f1._cmp(f2, ge) def __pos__(f): """Negate all coefficients in ``f``. """ return f.raw_new(f.numer, f.denom) def __neg__(f): """Negate all coefficients in ``f``. """ return f.raw_new(-f.numer, f.denom) def _extract_ground(self, element): domain = self.field.domain try: element = domain.convert(element) except CoercionFailed: if not domain.is_Field and domain.has_assoc_Field: ground_field = domain.get_field() try: element = ground_field.convert(element) except CoercionFailed: pass else: return -1, ground_field.numer(element), ground_field.denom(element) return 0, None, None else: return 1, element, None def __add__(f, g): """Add rational functions ``f`` and ``g``. """ field = f.field if not g: return f elif not f: return g elif isinstance(g, field.dtype): if f.denom == g.denom: return f.new(f.numer + g.numer, f.denom) else: return f.new(f.numer*g.denom + f.denom*g.numer, f.denom*g.denom) elif isinstance(g, field.ring.dtype): return f.new(f.numer + f.denom*g, f.denom) else: if isinstance(g, FracElement): if isinstance(field.domain, FractionField) and field.domain.field == g.field: pass elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: return g.__radd__(f) else: return NotImplemented elif isinstance(g, PolyElement): if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: pass else: return g.__radd__(f) return f.__radd__(g) def __radd__(f, c): if isinstance(c, f.field.ring.dtype): return f.new(f.numer + f.denom*c, f.denom) op, g_numer, g_denom = f._extract_ground(c) if op == 1: return f.new(f.numer + f.denom*g_numer, f.denom) elif not op: return NotImplemented else: return f.new(f.numer*g_denom + f.denom*g_numer, f.denom*g_denom) def __sub__(f, g): """Subtract rational functions ``f`` and ``g``. """ field = f.field if not g: return f elif not f: return -g elif isinstance(g, field.dtype): if f.denom == g.denom: return f.new(f.numer - g.numer, f.denom) else: return f.new(f.numer*g.denom - f.denom*g.numer, f.denom*g.denom) elif isinstance(g, field.ring.dtype): return f.new(f.numer - f.denom*g, f.denom) else: if isinstance(g, FracElement): if isinstance(field.domain, FractionField) and field.domain.field == g.field: pass elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: return g.__rsub__(f) else: return NotImplemented elif isinstance(g, PolyElement): if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: pass else: return g.__rsub__(f) op, g_numer, g_denom = f._extract_ground(g) if op == 1: return f.new(f.numer - f.denom*g_numer, f.denom) elif not op: return NotImplemented else: return f.new(f.numer*g_denom - f.denom*g_numer, f.denom*g_denom) def __rsub__(f, c): if isinstance(c, f.field.ring.dtype): return f.new(-f.numer + f.denom*c, f.denom) op, g_numer, g_denom = f._extract_ground(c) if op == 1: return f.new(-f.numer + f.denom*g_numer, f.denom) elif not op: return NotImplemented else: return f.new(-f.numer*g_denom + f.denom*g_numer, f.denom*g_denom) def __mul__(f, g): """Multiply rational functions ``f`` and ``g``. """ field = f.field if not f or not g: return field.zero elif isinstance(g, field.dtype): return f.new(f.numer*g.numer, f.denom*g.denom) elif isinstance(g, field.ring.dtype): return f.new(f.numer*g, f.denom) else: if isinstance(g, FracElement): if isinstance(field.domain, FractionField) and field.domain.field == g.field: pass elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: return g.__rmul__(f) else: return NotImplemented elif isinstance(g, PolyElement): if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: pass else: return g.__rmul__(f) return f.__rmul__(g) def __rmul__(f, c): if isinstance(c, f.field.ring.dtype): return f.new(f.numer*c, f.denom) op, g_numer, g_denom = f._extract_ground(c) if op == 1: return f.new(f.numer*g_numer, f.denom) elif not op: return NotImplemented else: return f.new(f.numer*g_numer, f.denom*g_denom) def __truediv__(f, g): """Computes quotient of fractions ``f`` and ``g``. """ field = f.field if not g: raise ZeroDivisionError elif isinstance(g, field.dtype): return f.new(f.numer*g.denom, f.denom*g.numer) elif isinstance(g, field.ring.dtype): return f.new(f.numer, f.denom*g) else: if isinstance(g, FracElement): if isinstance(field.domain, FractionField) and field.domain.field == g.field: pass elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: return g.__rtruediv__(f) else: return NotImplemented elif isinstance(g, PolyElement): if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: pass else: return g.__rtruediv__(f) op, g_numer, g_denom = f._extract_ground(g) if op == 1: return f.new(f.numer, f.denom*g_numer) elif not op: return NotImplemented else: return f.new(f.numer*g_denom, f.denom*g_numer) def __rtruediv__(f, c): if not f: raise ZeroDivisionError elif isinstance(c, f.field.ring.dtype): return f.new(f.denom*c, f.numer) op, g_numer, g_denom = f._extract_ground(c) if op == 1: return f.new(f.denom*g_numer, f.numer) elif not op: return NotImplemented else: return f.new(f.denom*g_numer, f.numer*g_denom) def __pow__(f, n): """Raise ``f`` to a non-negative power ``n``. """ if n >= 0: return f.raw_new(f.numer**n, f.denom**n) elif not f: raise ZeroDivisionError else: return f.raw_new(f.denom**-n, f.numer**-n) def diff(f, x): """Computes partial derivative in ``x``. Examples ======== >>> from sympy.polys.fields import field >>> from sympy.polys.domains import ZZ >>> _, x, y, z = field("x,y,z", ZZ) >>> ((x**2 + y)/(z + 1)).diff(x) 2*x/(z + 1) """ x = x.to_poly() return f.new(f.numer.diff(x)*f.denom - f.numer*f.denom.diff(x), f.denom**2) def __call__(f, *values): if 0 < len(values) <= f.field.ngens: return f.evaluate(list(zip(f.field.gens, values))) else: raise ValueError("expected at least 1 and at most %s values, got %s" % (f.field.ngens, len(values))) def evaluate(f, x, a=None): if isinstance(x, list) and a is None: x = [ (X.to_poly(), a) for X, a in x ] numer, denom = f.numer.evaluate(x), f.denom.evaluate(x) else: x = x.to_poly() numer, denom = f.numer.evaluate(x, a), f.denom.evaluate(x, a) field = numer.ring.to_field() return field.new(numer, denom) def subs(f, x, a=None): if isinstance(x, list) and a is None: x = [ (X.to_poly(), a) for X, a in x ] numer, denom = f.numer.subs(x), f.denom.subs(x) else: x = x.to_poly() numer, denom = f.numer.subs(x, a), f.denom.subs(x, a) return f.new(numer, denom) def compose(f, x, a=None): raise NotImplementedError
1245dd284c0b796606eb28bca3c3b80b6c14efeaa8b08984f41f369863593efd
"""Low-level linear systems solver. """ from sympy.utilities.iterables import connected_components from sympy.matrices import MutableDenseMatrix from sympy.polys.domains import EX from sympy.polys.rings import sring from sympy.polys.polyerrors import NotInvertible from sympy.polys.domainmatrix import DomainMatrix class PolyNonlinearError(Exception): """Raised by solve_lin_sys for nonlinear equations""" pass class RawMatrix(MutableDenseMatrix): _sympify = staticmethod(lambda x: x) def eqs_to_matrix(eqs_coeffs, eqs_rhs, gens, domain): """Get matrix from linear equations in dict format. Explanation =========== Get the matrix representation of a system of linear equations represented as dicts with low-level DomainElement coefficients. This is an *internal* function that is used by solve_lin_sys. Parameters ========== eqs_coeffs: list[dict[Symbol, DomainElement]] The left hand sides of the equations as dicts mapping from symbols to coefficients where the coefficients are instances of DomainElement. eqs_rhs: list[DomainElements] The right hand sides of the equations as instances of DomainElement. gens: list[Symbol] The unknowns in the system of equations. domain: Domain The domain for coefficients of both lhs and rhs. Returns ======= The augmented matrix representation of the system as a DomainMatrix. Examples ======== >>> from sympy import symbols, ZZ >>> from sympy.polys.solvers import eqs_to_matrix >>> x, y = symbols('x, y') >>> eqs_coeff = [{x:ZZ(1), y:ZZ(1)}, {x:ZZ(1), y:ZZ(-1)}] >>> eqs_rhs = [ZZ(0), ZZ(-1)] >>> eqs_to_matrix(eqs_coeff, eqs_rhs, [x, y], ZZ) DomainMatrix([[1, 1, 0], [1, -1, 1]], (2, 3), ZZ) See also ======== solve_lin_sys: Uses :func:`~eqs_to_matrix` internally """ sym2index = {x: n for n, x in enumerate(gens)} nrows = len(eqs_coeffs) ncols = len(gens) + 1 rows = [[domain.zero] * ncols for _ in range(nrows)] for row, eq_coeff, eq_rhs in zip(rows, eqs_coeffs, eqs_rhs): for sym, coeff in eq_coeff.items(): row[sym2index[sym]] = domain.convert(coeff) row[-1] = -domain.convert(eq_rhs) return DomainMatrix(rows, (nrows, ncols), domain) def sympy_eqs_to_ring(eqs, symbols): """Convert a system of equations from Expr to a PolyRing Explanation =========== High-level functions like ``solve`` expect Expr as inputs but can use ``solve_lin_sys`` internally. This function converts equations from ``Expr`` to the low-level poly types used by the ``solve_lin_sys`` function. Parameters ========== eqs: List of Expr A list of equations as Expr instances symbols: List of Symbol A list of the symbols that are the unknowns in the system of equations. Returns ======= Tuple[List[PolyElement], Ring]: The equations as PolyElement instances and the ring of polynomials within which each equation is represented. Examples ======== >>> from sympy import symbols >>> from sympy.polys.solvers import sympy_eqs_to_ring >>> a, x, y = symbols('a, x, y') >>> eqs = [x-y, x+a*y] >>> eqs_ring, ring = sympy_eqs_to_ring(eqs, [x, y]) >>> eqs_ring [x - y, x + a*y] >>> type(eqs_ring[0]) <class 'sympy.polys.rings.PolyElement'> >>> ring ZZ(a)[x,y] With the equations in this form they can be passed to ``solve_lin_sys``: >>> from sympy.polys.solvers import solve_lin_sys >>> solve_lin_sys(eqs_ring, ring) {y: 0, x: 0} """ try: K, eqs_K = sring(eqs, symbols, field=True, extension=True) except NotInvertible: # https://github.com/sympy/sympy/issues/18874 K, eqs_K = sring(eqs, symbols, domain=EX) return eqs_K, K.to_domain() def solve_lin_sys(eqs, ring, _raw=True): """Solve a system of linear equations from a PolynomialRing Explanation =========== Solves a system of linear equations given as PolyElement instances of a PolynomialRing. The basic arithmetic is carried out using instance of DomainElement which is more efficient than :class:`~sympy.core.expr.Expr` for the most common inputs. While this is a public function it is intended primarily for internal use so its interface is not necessarily convenient. Users are suggested to use the :func:`sympy.solvers.solveset.linsolve` function (which uses this function internally) instead. Parameters ========== eqs: list[PolyElement] The linear equations to be solved as elements of a PolynomialRing (assumed equal to zero). ring: PolynomialRing The polynomial ring from which eqs are drawn. The generators of this ring are the unkowns to be solved for and the domain of the ring is the domain of the coefficients of the system of equations. _raw: bool If *_raw* is False, the keys and values in the returned dictionary will be of type Expr (and the unit of the field will be removed from the keys) otherwise the low-level polys types will be returned, e.g. PolyElement: PythonRational. Returns ======= ``None`` if the system has no solution. dict[Symbol, Expr] if _raw=False dict[Symbol, DomainElement] if _raw=True. Examples ======== >>> from sympy import symbols >>> from sympy.polys.solvers import solve_lin_sys, sympy_eqs_to_ring >>> x, y = symbols('x, y') >>> eqs = [x - y, x + y - 2] >>> eqs_ring, ring = sympy_eqs_to_ring(eqs, [x, y]) >>> solve_lin_sys(eqs_ring, ring) {y: 1, x: 1} Passing ``_raw=False`` returns the same result except that the keys are ``Expr`` rather than low-level poly types. >>> solve_lin_sys(eqs_ring, ring, _raw=False) {x: 1, y: 1} See also ======== sympy_eqs_to_ring: prepares the inputs to ``solve_lin_sys``. linsolve: ``linsolve`` uses ``solve_lin_sys`` internally. sympy.solvers.solvers.solve: ``solve`` uses ``solve_lin_sys`` internally. """ as_expr = not _raw assert ring.domain.is_Field eqs_dict = [dict(eq) for eq in eqs] one_monom = ring.one.monoms()[0] zero = ring.domain.zero eqs_rhs = [] eqs_coeffs = [] for eq_dict in eqs_dict: eq_rhs = eq_dict.pop(one_monom, zero) eq_coeffs = {} for monom, coeff in eq_dict.items(): if sum(monom) != 1: msg = "Nonlinear term encountered in solve_lin_sys" raise PolyNonlinearError(msg) eq_coeffs[ring.gens[monom.index(1)]] = coeff if not eq_coeffs: if not eq_rhs: continue else: return None eqs_rhs.append(eq_rhs) eqs_coeffs.append(eq_coeffs) result = _solve_lin_sys(eqs_coeffs, eqs_rhs, ring) if result is not None and as_expr: def to_sympy(x): as_expr = getattr(x, 'as_expr', None) if as_expr: return as_expr() else: return ring.domain.to_sympy(x) tresult = {to_sympy(sym): to_sympy(val) for sym, val in result.items()} # Remove 1.0x result = {} for k, v in tresult.items(): if k.is_Mul: c, s = k.as_coeff_Mul() result[s] = v/c else: result[k] = v return result def _solve_lin_sys(eqs_coeffs, eqs_rhs, ring): """Solve a linear system from dict of PolynomialRing coefficients Explanation =========== This is an **internal** function used by :func:`solve_lin_sys` after the equations have been preprocessed. The role of this function is to split the system into connected components and pass those to :func:`_solve_lin_sys_component`. Examples ======== Setup a system for $x-y=0$ and $x+y=2$ and solve: >>> from sympy import symbols, sring >>> from sympy.polys.solvers import _solve_lin_sys >>> x, y = symbols('x, y') >>> R, (xr, yr) = sring([x, y], [x, y]) >>> eqs = [{xr:R.one, yr:-R.one}, {xr:R.one, yr:R.one}] >>> eqs_rhs = [R.zero, -2*R.one] >>> _solve_lin_sys(eqs, eqs_rhs, R) {y: 1, x: 1} See also ======== solve_lin_sys: This function is used internally by :func:`solve_lin_sys`. """ V = ring.gens E = [] for eq_coeffs in eqs_coeffs: syms = list(eq_coeffs) E.extend(zip(syms[:-1], syms[1:])) G = V, E components = connected_components(G) sym2comp = {} for n, component in enumerate(components): for sym in component: sym2comp[sym] = n subsystems = [([], []) for _ in range(len(components))] for eq_coeff, eq_rhs in zip(eqs_coeffs, eqs_rhs): sym = next(iter(eq_coeff), None) sub_coeff, sub_rhs = subsystems[sym2comp[sym]] sub_coeff.append(eq_coeff) sub_rhs.append(eq_rhs) sol = {} for subsystem in subsystems: subsol = _solve_lin_sys_component(subsystem[0], subsystem[1], ring) if subsol is None: return None sol.update(subsol) return sol def _solve_lin_sys_component(eqs_coeffs, eqs_rhs, ring): """Solve a linear system from dict of PolynomialRing coefficients Explanation =========== This is an **internal** function used by :func:`solve_lin_sys` after the equations have been preprocessed. After :func:`_solve_lin_sys` splits the system into connected components this function is called for each component. The system of equations is solved using Gauss-Jordan elimination with division followed by back-substitution. Examples ======== Setup a system for $x-y=0$ and $x+y=2$ and solve: >>> from sympy import symbols, sring >>> from sympy.polys.solvers import _solve_lin_sys_component >>> x, y = symbols('x, y') >>> R, (xr, yr) = sring([x, y], [x, y]) >>> eqs = [{xr:R.one, yr:-R.one}, {xr:R.one, yr:R.one}] >>> eqs_rhs = [R.zero, -2*R.one] >>> _solve_lin_sys_component(eqs, eqs_rhs, R) {y: 1, x: 1} See also ======== solve_lin_sys: This function is used internally by :func:`solve_lin_sys`. """ # transform from equations to matrix form matrix = eqs_to_matrix(eqs_coeffs, eqs_rhs, ring.gens, ring.domain) # convert to a field for rref if not matrix.domain.is_Field: matrix = matrix.to_field() # solve by row-reduction echelon, pivots = matrix.rref() # construct the returnable form of the solutions keys = ring.gens if pivots and pivots[-1] == len(keys): return None if len(pivots) == len(keys): sol = [] for s in [row[-1] for row in echelon.rep]: a = s sol.append(a) sols = dict(zip(keys, sol)) else: sols = {} g = ring.gens # Extract ground domain coefficients and convert to the ring: if hasattr(ring, 'ring'): convert = ring.ring.ground_new else: convert = ring.ground_new echelon = [[convert(eij) for eij in ei] for ei in echelon.rep] for i, p in enumerate(pivots): v = echelon[i][-1] - sum(echelon[i][j]*g[j] for j in range(p+1, len(g)) if echelon[i][j]) sols[keys[p]] = v return sols
41fcb53a856364874e519abf9d6e39348299de92c32ee9516e36aa79e24e691b
"""Useful utilities for higher level polynomial classes. """ from sympy.core import (S, Add, Mul, Pow, Eq, Expr, expand_mul, expand_multinomial) from sympy.core.exprtools import decompose_power, decompose_power_rat from sympy.polys.polyerrors import PolynomialError, GeneratorsError from sympy.polys.polyoptions import build_options import re _gens_order = { 'a': 301, 'b': 302, 'c': 303, 'd': 304, 'e': 305, 'f': 306, 'g': 307, 'h': 308, 'i': 309, 'j': 310, 'k': 311, 'l': 312, 'm': 313, 'n': 314, 'o': 315, 'p': 216, 'q': 217, 'r': 218, 's': 219, 't': 220, 'u': 221, 'v': 222, 'w': 223, 'x': 124, 'y': 125, 'z': 126, } _max_order = 1000 _re_gen = re.compile(r"^(.+?)(\d*)$") def _nsort(roots, separated=False): """Sort the numerical roots putting the real roots first, then sorting according to real and imaginary parts. If ``separated`` is True, then the real and imaginary roots will be returned in two lists, respectively. This routine tries to avoid issue 6137 by separating the roots into real and imaginary parts before evaluation. In addition, the sorting will raise an error if any computation cannot be done with precision. """ if not all(r.is_number for r in roots): raise NotImplementedError # see issue 6137: # get the real part of the evaluated real and imaginary parts of each root key = [[i.n(2).as_real_imag()[0] for i in r.as_real_imag()] for r in roots] # make sure the parts were computed with precision if len(roots) > 1 and any(i._prec == 1 for k in key for i in k): raise NotImplementedError("could not compute root with precision") # insert a key to indicate if the root has an imaginary part key = [(1 if i else 0, r, i) for r, i in key] key = sorted(zip(key, roots)) # return the real and imaginary roots separately if desired if separated: r = [] i = [] for (im, _, _), v in key: if im: i.append(v) else: r.append(v) return r, i _, roots = zip(*key) return list(roots) def _sort_gens(gens, **args): """Sort generators in a reasonably intelligent way. """ opt = build_options(args) gens_order, wrt = {}, None if opt is not None: gens_order, wrt = {}, opt.wrt for i, gen in enumerate(opt.sort): gens_order[gen] = i + 1 def order_key(gen): gen = str(gen) if wrt is not None: try: return (-len(wrt) + wrt.index(gen), gen, 0) except ValueError: pass name, index = _re_gen.match(gen).groups() if index: index = int(index) else: index = 0 try: return ( gens_order[name], name, index) except KeyError: pass try: return (_gens_order[name], name, index) except KeyError: pass return (_max_order, name, index) try: gens = sorted(gens, key=order_key) except TypeError: # pragma: no cover pass return tuple(gens) def _unify_gens(f_gens, g_gens): """Unify generators in a reasonably intelligent way. """ f_gens = list(f_gens) g_gens = list(g_gens) if f_gens == g_gens: return tuple(f_gens) gens, common, k = [], [], 0 for gen in f_gens: if gen in g_gens: common.append(gen) for i, gen in enumerate(g_gens): if gen in common: g_gens[i], k = common[k], k + 1 for gen in common: i = f_gens.index(gen) gens.extend(f_gens[:i]) f_gens = f_gens[i + 1:] i = g_gens.index(gen) gens.extend(g_gens[:i]) g_gens = g_gens[i + 1:] gens.append(gen) gens.extend(f_gens) gens.extend(g_gens) return tuple(gens) def _analyze_gens(gens): """Support for passing generators as `*gens` and `[gens]`. """ if len(gens) == 1 and hasattr(gens[0], '__iter__'): return tuple(gens[0]) else: return tuple(gens) def _sort_factors(factors, **args): """Sort low-level factors in increasing 'complexity' order. """ def order_if_multiple_key(factor): (f, n) = factor return (len(f), n, f) def order_no_multiple_key(f): return (len(f), f) if args.get('multiple', True): return sorted(factors, key=order_if_multiple_key) else: return sorted(factors, key=order_no_multiple_key) illegal = [S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity] finf = [float(i) for i in illegal[1:3]] def _not_a_coeff(expr): """Do not treat NaN and infinities as valid polynomial coefficients. """ if expr in illegal or expr in finf: return True if type(expr) is float and float(expr) != expr: return True # nan return # could be def _parallel_dict_from_expr_if_gens(exprs, opt): """Transform expressions into a multinomial form given generators. """ k, indices = len(opt.gens), {} for i, g in enumerate(opt.gens): indices[g] = i polys = [] for expr in exprs: poly = {} if expr.is_Equality: expr = expr.lhs - expr.rhs for term in Add.make_args(expr): coeff, monom = [], [0]*k for factor in Mul.make_args(term): if not _not_a_coeff(factor) and factor.is_Number: coeff.append(factor) else: try: if opt.series is False: base, exp = decompose_power(factor) if exp < 0: exp, base = -exp, Pow(base, -S.One) else: base, exp = decompose_power_rat(factor) monom[indices[base]] = exp except KeyError: if not factor.free_symbols.intersection(opt.gens): coeff.append(factor) else: raise PolynomialError("%s contains an element of " "the set of generators." % factor) monom = tuple(monom) if monom in poly: poly[monom] += Mul(*coeff) else: poly[monom] = Mul(*coeff) polys.append(poly) return polys, opt.gens def _parallel_dict_from_expr_no_gens(exprs, opt): """Transform expressions into a multinomial form and figure out generators. """ if opt.domain is not None: def _is_coeff(factor): return factor in opt.domain elif opt.extension is True: def _is_coeff(factor): return factor.is_algebraic elif opt.greedy is not False: def _is_coeff(factor): return factor is S.ImaginaryUnit else: def _is_coeff(factor): return factor.is_number gens, reprs = set(), [] for expr in exprs: terms = [] if expr.is_Equality: expr = expr.lhs - expr.rhs for term in Add.make_args(expr): coeff, elements = [], {} for factor in Mul.make_args(term): if not _not_a_coeff(factor) and (factor.is_Number or _is_coeff(factor)): coeff.append(factor) else: if opt.series is False: base, exp = decompose_power(factor) if exp < 0: exp, base = -exp, Pow(base, -S.One) else: base, exp = decompose_power_rat(factor) elements[base] = elements.setdefault(base, 0) + exp gens.add(base) terms.append((coeff, elements)) reprs.append(terms) gens = _sort_gens(gens, opt=opt) k, indices = len(gens), {} for i, g in enumerate(gens): indices[g] = i polys = [] for terms in reprs: poly = {} for coeff, term in terms: monom = [0]*k for base, exp in term.items(): monom[indices[base]] = exp monom = tuple(monom) if monom in poly: poly[monom] += Mul(*coeff) else: poly[monom] = Mul(*coeff) polys.append(poly) return polys, tuple(gens) def _dict_from_expr_if_gens(expr, opt): """Transform an expression into a multinomial form given generators. """ (poly,), gens = _parallel_dict_from_expr_if_gens((expr,), opt) return poly, gens def _dict_from_expr_no_gens(expr, opt): """Transform an expression into a multinomial form and figure out generators. """ (poly,), gens = _parallel_dict_from_expr_no_gens((expr,), opt) return poly, gens def parallel_dict_from_expr(exprs, **args): """Transform expressions into a multinomial form. """ reps, opt = _parallel_dict_from_expr(exprs, build_options(args)) return reps, opt.gens def _parallel_dict_from_expr(exprs, opt): """Transform expressions into a multinomial form. """ if opt.expand is not False: exprs = [ expr.expand() for expr in exprs ] if any(expr.is_commutative is False for expr in exprs): raise PolynomialError('non-commutative expressions are not supported') if opt.gens: reps, gens = _parallel_dict_from_expr_if_gens(exprs, opt) else: reps, gens = _parallel_dict_from_expr_no_gens(exprs, opt) return reps, opt.clone({'gens': gens}) def dict_from_expr(expr, **args): """Transform an expression into a multinomial form. """ rep, opt = _dict_from_expr(expr, build_options(args)) return rep, opt.gens def _dict_from_expr(expr, opt): """Transform an expression into a multinomial form. """ if expr.is_commutative is False: raise PolynomialError('non-commutative expressions are not supported') def _is_expandable_pow(expr): return (expr.is_Pow and expr.exp.is_positive and expr.exp.is_Integer and expr.base.is_Add) if opt.expand is not False: if not isinstance(expr, (Expr, Eq)): raise PolynomialError('expression must be of type Expr') expr = expr.expand() # TODO: Integrate this into expand() itself while any(_is_expandable_pow(i) or i.is_Mul and any(_is_expandable_pow(j) for j in i.args) for i in Add.make_args(expr)): expr = expand_multinomial(expr) while any(i.is_Mul and any(j.is_Add for j in i.args) for i in Add.make_args(expr)): expr = expand_mul(expr) if opt.gens: rep, gens = _dict_from_expr_if_gens(expr, opt) else: rep, gens = _dict_from_expr_no_gens(expr, opt) return rep, opt.clone({'gens': gens}) def expr_from_dict(rep, *gens): """Convert a multinomial form into an expression. """ result = [] for monom, coeff in rep.items(): term = [coeff] for g, m in zip(gens, monom): if m: term.append(Pow(g, m)) result.append(Mul(*term)) return Add(*result) parallel_dict_from_basic = parallel_dict_from_expr dict_from_basic = dict_from_expr basic_from_dict = expr_from_dict def _dict_reorder(rep, gens, new_gens): """Reorder levels using dict representation. """ gens = list(gens) monoms = rep.keys() coeffs = rep.values() new_monoms = [ [] for _ in range(len(rep)) ] used_indices = set() for gen in new_gens: try: j = gens.index(gen) used_indices.add(j) for M, new_M in zip(monoms, new_monoms): new_M.append(M[j]) except ValueError: for new_M in new_monoms: new_M.append(0) for i, _ in enumerate(gens): if i not in used_indices: for monom in monoms: if monom[i]: raise GeneratorsError("unable to drop generators") return map(tuple, new_monoms), coeffs class PicklableWithSlots: """ Mixin class that allows to pickle objects with ``__slots__``. Examples ======== First define a class that mixes :class:`PicklableWithSlots` in:: >>> from sympy.polys.polyutils import PicklableWithSlots >>> class Some(PicklableWithSlots): ... __slots__ = ('foo', 'bar') ... ... def __init__(self, foo, bar): ... self.foo = foo ... self.bar = bar To make :mod:`pickle` happy in doctest we have to use these hacks:: >>> import builtins >>> builtins.Some = Some >>> from sympy.polys import polyutils >>> polyutils.Some = Some Next lets see if we can create an instance, pickle it and unpickle:: >>> some = Some('abc', 10) >>> some.foo, some.bar ('abc', 10) >>> from pickle import dumps, loads >>> some2 = loads(dumps(some)) >>> some2.foo, some2.bar ('abc', 10) """ __slots__ = () def __getstate__(self, cls=None): if cls is None: # This is the case for the instance that gets pickled cls = self.__class__ d = {} # Get all data that should be stored from super classes for c in cls.__bases__: if hasattr(c, "__getstate__"): d.update(c.__getstate__(self, c)) # Get all information that should be stored from cls and return the dict for name in cls.__slots__: if hasattr(self, name): d[name] = getattr(self, name) return d def __setstate__(self, d): # All values that were pickled are now assigned to a fresh instance for name, value in d.items(): try: setattr(self, name, value) except AttributeError: # This is needed in cases like Rational :> Half pass
2d5b0104ed41e074eb503c4a5878c0739cf77d13a24086874da1742d9d30fa36
"""Computational algebraic field theory. """ from functools import reduce from sympy import ( S, Rational, AlgebraicNumber, GoldenRatio, TribonacciConstant, Add, Mul, sympify, Dummy, expand_mul, I, pi ) from sympy.functions import sqrt, cbrt from sympy.core.exprtools import Factors from sympy.core.function import _mexpand from sympy.functions.elementary.exponential import exp from sympy.functions.elementary.trigonometric import cos, sin from sympy.ntheory import sieve from sympy.ntheory.factor_ import divisors from sympy.polys.densetools import dup_eval from sympy.polys.domains import ZZ, QQ from sympy.polys.orthopolys import dup_chebyshevt from sympy.polys.polyerrors import ( IsomorphismFailed, NotAlgebraic, GeneratorsError, ) from sympy.polys.polytools import ( Poly, PurePoly, invert, factor_list, groebner, resultant, degree, poly_from_expr, parallel_poly_from_expr, lcm ) from sympy.polys.polyutils import dict_from_expr, expr_from_dict from sympy.polys.ring_series import rs_compose_add from sympy.polys.rings import ring from sympy.polys.rootoftools import CRootOf from sympy.polys.specialpolys import cyclotomic_poly from sympy.printing.lambdarepr import LambdaPrinter from sympy.printing.pycode import PythonCodePrinter, MpmathPrinter from sympy.simplify.radsimp import _split_gcd from sympy.simplify.simplify import _is_sum_surds from sympy.utilities import ( numbered_symbols, lambdify, public, sift ) from mpmath import pslq, mp def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5): """ Return a factor having root ``v`` It is assumed that one of the factors has root ``v``. """ if isinstance(factors[0], tuple): factors = [f[0] for f in factors] if len(factors) == 1: return factors[0] points = {x:v} symbols = dom.symbols if hasattr(dom, 'symbols') else [] t = QQ(1, 10) for n in range(bound**len(symbols)): prec1 = 10 n_temp = n for s in symbols: points[s] = n_temp % bound n_temp = n_temp // bound while True: candidates = [] eps = t**(prec1 // 2) for f in factors: if abs(f.as_expr().evalf(prec1, points)) < eps: candidates.append(f) if candidates: factors = candidates if len(factors) == 1: return factors[0] if prec1 > prec: break prec1 *= 2 raise NotImplementedError("multiple candidates for the minimal polynomial of %s" % v) def _separate_sq(p): """ helper function for ``_minimal_polynomial_sq`` It selects a rational ``g`` such that the polynomial ``p`` consists of a sum of terms whose surds squared have gcd equal to ``g`` and a sum of terms with surds squared prime with ``g``; then it takes the field norm to eliminate ``sqrt(g)`` See simplify.simplify.split_surds and polytools.sqf_norm. Examples ======== >>> from sympy import sqrt >>> from sympy.abc import x >>> from sympy.polys.numberfields import _separate_sq >>> p= -x + sqrt(2) + sqrt(3) + sqrt(7) >>> p = _separate_sq(p); p -x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8 >>> p = _separate_sq(p); p -x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20 >>> p = _separate_sq(p); p -x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400 """ from sympy.utilities.iterables import sift def is_sqrt(expr): return expr.is_Pow and expr.exp is S.Half # p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)] a = [] for y in p.args: if not y.is_Mul: if is_sqrt(y): a.append((S.One, y**2)) elif y.is_Atom: a.append((y, S.One)) elif y.is_Pow and y.exp.is_integer: a.append((y, S.One)) else: raise NotImplementedError continue T, F = sift(y.args, is_sqrt, binary=True) a.append((Mul(*F), Mul(*T)**2)) a.sort(key=lambda z: z[1]) if a[-1][1] is S.One: # there are no surds return p surds = [z for y, z in a] for i in range(len(surds)): if surds[i] != 1: break g, b1, b2 = _split_gcd(*surds[i:]) a1 = [] a2 = [] for y, z in a: if z in b1: a1.append(y*z**S.Half) else: a2.append(y*z**S.Half) p1 = Add(*a1) p2 = Add(*a2) p = _mexpand(p1**2) - _mexpand(p2**2) return p def _minimal_polynomial_sq(p, n, x): """ Returns the minimal polynomial for the ``nth-root`` of a sum of surds or ``None`` if it fails. Parameters ========== p : sum of surds n : positive integer x : variable of the returned polynomial Examples ======== >>> from sympy.polys.numberfields import _minimal_polynomial_sq >>> from sympy import sqrt >>> from sympy.abc import x >>> q = 1 + sqrt(2) + sqrt(3) >>> _minimal_polynomial_sq(q, 3, x) x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8 """ from sympy.simplify.simplify import _is_sum_surds p = sympify(p) n = sympify(n) if not n.is_Integer or not n > 0 or not _is_sum_surds(p): return None pn = p**Rational(1, n) # eliminate the square roots p -= x while 1: p1 = _separate_sq(p) if p1 is p: p = p1.subs({x:x**n}) break else: p = p1 # _separate_sq eliminates field extensions in a minimal way, so that # if n = 1 then `p = constant*(minimal_polynomial(p))` # if n > 1 it contains the minimal polynomial as a factor. if n == 1: p1 = Poly(p) if p.coeff(x**p1.degree(x)) < 0: p = -p p = p.primitive()[1] return p # by construction `p` has root `pn` # the minimal polynomial is the factor vanishing in x = pn factors = factor_list(p)[1] result = _choose_factor(factors, x, pn) return result def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None): """ return the minimal polynomial for ``op(ex1, ex2)`` Parameters ========== op : operation ``Add`` or ``Mul`` ex1, ex2 : expressions for the algebraic elements x : indeterminate of the polynomials dom: ground domain mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None Examples ======== >>> from sympy import sqrt, Add, Mul, QQ >>> from sympy.polys.numberfields import _minpoly_op_algebraic_element >>> from sympy.abc import x, y >>> p1 = sqrt(sqrt(2) + 1) >>> p2 = sqrt(sqrt(2) - 1) >>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ) x - 1 >>> q1 = sqrt(y) >>> q2 = 1 / y >>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.frac_field(y)) x**2*y**2 - 2*x*y - y**3 + 1 References ========== .. [1] https://en.wikipedia.org/wiki/Resultant .. [2] I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638 "Degrees of sums in a separable field extension". """ y = Dummy(str(x)) if mp1 is None: mp1 = _minpoly_compose(ex1, x, dom) if mp2 is None: mp2 = _minpoly_compose(ex2, y, dom) else: mp2 = mp2.subs({x: y}) if op is Add: # mp1a = mp1.subs({x: x - y}) if dom == QQ: R, X = ring('X', QQ) p1 = R(dict_from_expr(mp1)[0]) p2 = R(dict_from_expr(mp2)[0]) else: (p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y) r = p1.compose(p2) mp1a = r.as_expr() elif op is Mul: mp1a = _muly(mp1, x, y) else: raise NotImplementedError('option not available') if op is Mul or dom != QQ: r = resultant(mp1a, mp2, gens=[y, x]) else: r = rs_compose_add(p1, p2) r = expr_from_dict(r.as_expr_dict(), x) deg1 = degree(mp1, x) deg2 = degree(mp2, y) if op is Mul and deg1 == 1 or deg2 == 1: # if deg1 = 1, then mp1 = x - a; mp1a = x - y - a; # r = mp2(x - a), so that `r` is irreducible return r r = Poly(r, x, domain=dom) _, factors = r.factor_list() res = _choose_factor(factors, x, op(ex1, ex2), dom) return res.as_expr() def _invertx(p, x): """ Returns ``expand_mul(x**degree(p, x)*p.subs(x, 1/x))`` """ p1 = poly_from_expr(p, x)[0] n = degree(p1) a = [c * x**(n - i) for (i,), c in p1.terms()] return Add(*a) def _muly(p, x, y): """ Returns ``_mexpand(y**deg*p.subs({x:x / y}))`` """ p1 = poly_from_expr(p, x)[0] n = degree(p1) a = [c * x**i * y**(n - i) for (i,), c in p1.terms()] return Add(*a) def _minpoly_pow(ex, pw, x, dom, mp=None): """ Returns ``minpoly(ex**pw, x)`` Parameters ========== ex : algebraic element pw : rational number x : indeterminate of the polynomial dom: ground domain mp : minimal polynomial of ``p`` Examples ======== >>> from sympy import sqrt, QQ, Rational >>> from sympy.polys.numberfields import _minpoly_pow, minpoly >>> from sympy.abc import x, y >>> p = sqrt(1 + sqrt(2)) >>> _minpoly_pow(p, 2, x, QQ) x**2 - 2*x - 1 >>> minpoly(p**2, x) x**2 - 2*x - 1 >>> _minpoly_pow(y, Rational(1, 3), x, QQ.frac_field(y)) x**3 - y >>> minpoly(y**Rational(1, 3), x) x**3 - y """ pw = sympify(pw) if not mp: mp = _minpoly_compose(ex, x, dom) if not pw.is_rational: raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) if pw < 0: if mp == x: raise ZeroDivisionError('%s is zero' % ex) mp = _invertx(mp, x) if pw == -1: return mp pw = -pw ex = 1/ex y = Dummy(str(x)) mp = mp.subs({x: y}) n, d = pw.as_numer_denom() res = Poly(resultant(mp, x**d - y**n, gens=[y]), x, domain=dom) _, factors = res.factor_list() res = _choose_factor(factors, x, ex**pw, dom) return res.as_expr() def _minpoly_add(x, dom, *a): """ returns ``minpoly(Add(*a), dom, x)`` """ mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom) p = a[0] + a[1] for px in a[2:]: mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp) p = p + px return mp def _minpoly_mul(x, dom, *a): """ returns ``minpoly(Mul(*a), dom, x)`` """ mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom) p = a[0] * a[1] for px in a[2:]: mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp) p = p * px return mp def _minpoly_sin(ex, x): """ Returns the minimal polynomial of ``sin(ex)`` see http://mathworld.wolfram.com/TrigonometryAngles.html """ c, a = ex.args[0].as_coeff_Mul() if a is pi: if c.is_rational: n = c.q q = sympify(n) if q.is_prime: # for a = pi*p/q with q odd prime, using chebyshevt # write sin(q*a) = mp(sin(a))*sin(a); # the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1 a = dup_chebyshevt(n, ZZ) return Add(*[x**(n - i - 1)*a[i] for i in range(n)]) if c.p == 1: if q == 9: return 64*x**6 - 96*x**4 + 36*x**2 - 3 if n % 2 == 1: # for a = pi*p/q with q odd, use # sin(q*a) = 0 to see that the minimal polynomial must be # a factor of dup_chebyshevt(n, ZZ) a = dup_chebyshevt(n, ZZ) a = [x**(n - i)*a[i] for i in range(n + 1)] r = Add(*a) _, factors = factor_list(r) res = _choose_factor(factors, x, ex) return res expr = ((1 - cos(2*c*pi))/2)**S.Half res = _minpoly_compose(expr, x, QQ) return res raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) def _minpoly_cos(ex, x): """ Returns the minimal polynomial of ``cos(ex)`` see http://mathworld.wolfram.com/TrigonometryAngles.html """ from sympy import sqrt c, a = ex.args[0].as_coeff_Mul() if a is pi: if c.is_rational: if c.p == 1: if c.q == 7: return 8*x**3 - 4*x**2 - 4*x + 1 if c.q == 9: return 8*x**3 - 6*x + 1 elif c.p == 2: q = sympify(c.q) if q.is_prime: s = _minpoly_sin(ex, x) return _mexpand(s.subs({x:sqrt((1 - x)/2)})) # for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p n = int(c.q) a = dup_chebyshevt(n, ZZ) a = [x**(n - i)*a[i] for i in range(n + 1)] r = Add(*a) - (-1)**c.p _, factors = factor_list(r) res = _choose_factor(factors, x, ex) return res raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) def _minpoly_exp(ex, x): """ Returns the minimal polynomial of ``exp(ex)`` """ c, a = ex.args[0].as_coeff_Mul() q = sympify(c.q) if a == I*pi: if c.is_rational: if c.p == 1 or c.p == -1: if q == 3: return x**2 - x + 1 if q == 4: return x**4 + 1 if q == 6: return x**4 - x**2 + 1 if q == 8: return x**8 + 1 if q == 9: return x**6 - x**3 + 1 if q == 10: return x**8 - x**6 + x**4 - x**2 + 1 if q.is_prime: s = 0 for i in range(q): s += (-x)**i return s # x**(2*q) = product(factors) factors = [cyclotomic_poly(i, x) for i in divisors(2*q)] mp = _choose_factor(factors, x, ex) return mp else: raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) def _minpoly_rootof(ex, x): """ Returns the minimal polynomial of a ``CRootOf`` object. """ p = ex.expr p = p.subs({ex.poly.gens[0]:x}) _, factors = factor_list(p, x) result = _choose_factor(factors, x, ex) return result def _minpoly_compose(ex, x, dom): """ Computes the minimal polynomial of an algebraic element using operations on minimal polynomials Examples ======== >>> from sympy import minimal_polynomial, sqrt, Rational >>> from sympy.abc import x, y >>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=True) x**2 - 2*x - 1 >>> minimal_polynomial(sqrt(y) + 1/y, x, compose=True) x**2*y**2 - 2*x*y - y**3 + 1 """ if ex.is_Rational: return ex.q*x - ex.p if ex is I: _, factors = factor_list(x**2 + 1, x, domain=dom) return x**2 + 1 if len(factors) == 1 else x - I if ex is GoldenRatio: _, factors = factor_list(x**2 - x - 1, x, domain=dom) if len(factors) == 1: return x**2 - x - 1 else: return _choose_factor(factors, x, (1 + sqrt(5))/2, dom=dom) if ex is TribonacciConstant: _, factors = factor_list(x**3 - x**2 - x - 1, x, domain=dom) if len(factors) == 1: return x**3 - x**2 - x - 1 else: fac = (1 + cbrt(19 - 3*sqrt(33)) + cbrt(19 + 3*sqrt(33))) / 3 return _choose_factor(factors, x, fac, dom=dom) if hasattr(dom, 'symbols') and ex in dom.symbols: return x - ex if dom.is_QQ and _is_sum_surds(ex): # eliminate the square roots ex -= x while 1: ex1 = _separate_sq(ex) if ex1 is ex: return ex else: ex = ex1 if ex.is_Add: res = _minpoly_add(x, dom, *ex.args) elif ex.is_Mul: f = Factors(ex).factors r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational) if r[True] and dom == QQ: ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]]) r1 = dict(r[True]) dens = [y.q for y in r1.values()] lcmdens = reduce(lcm, dens, 1) neg1 = S.NegativeOne expn1 = r1.pop(neg1, S.Zero) nums = [base**(y.p*lcmdens // y.q) for base, y in r1.items()] ex2 = Mul(*nums) mp1 = minimal_polynomial(ex1, x) # use the fact that in SymPy canonicalization products of integers # raised to rational powers are organized in relatively prime # bases, and that in ``base**(n/d)`` a perfect power is # simplified with the root # Powers of -1 have to be treated separately to preserve sign. mp2 = ex2.q*x**lcmdens - ex2.p*neg1**(expn1*lcmdens) ex2 = neg1**expn1 * ex2**Rational(1, lcmdens) res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2) else: res = _minpoly_mul(x, dom, *ex.args) elif ex.is_Pow: res = _minpoly_pow(ex.base, ex.exp, x, dom) elif ex.__class__ is sin: res = _minpoly_sin(ex, x) elif ex.__class__ is cos: res = _minpoly_cos(ex, x) elif ex.__class__ is exp: res = _minpoly_exp(ex, x) elif ex.__class__ is CRootOf: res = _minpoly_rootof(ex, x) else: raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) return res @public def minimal_polynomial(ex, x=None, compose=True, polys=False, domain=None): """ Computes the minimal polynomial of an algebraic element. Parameters ========== ex : Expr Element or expression whose minimal polynomial is to be calculated. x : Symbol, optional Independent variable of the minimal polynomial compose : boolean, optional (default=True) Method to use for computing minimal polynomial. If ``compose=True`` (default) then ``_minpoly_compose`` is used, if ``compose=False`` then groebner bases are used. polys : boolean, optional (default=False) If ``True`` returns a ``Poly`` object else an ``Expr`` object. domain : Domain, optional Ground domain Notes ===== By default ``compose=True``, the minimal polynomial of the subexpressions of ``ex`` are computed, then the arithmetic operations on them are performed using the resultant and factorization. If ``compose=False``, a bottom-up algorithm is used with ``groebner``. The default algorithm stalls less frequently. If no ground domain is given, it will be generated automatically from the expression. Examples ======== >>> from sympy import minimal_polynomial, sqrt, solve, QQ >>> from sympy.abc import x, y >>> minimal_polynomial(sqrt(2), x) x**2 - 2 >>> minimal_polynomial(sqrt(2), x, domain=QQ.algebraic_field(sqrt(2))) x - sqrt(2) >>> minimal_polynomial(sqrt(2) + sqrt(3), x) x**4 - 10*x**2 + 1 >>> minimal_polynomial(solve(x**3 + x + 3)[0], x) x**3 + x + 3 >>> minimal_polynomial(sqrt(y), x) x**2 - y """ from sympy.polys.polytools import degree from sympy.polys.domains import FractionField from sympy.core.basic import preorder_traversal ex = sympify(ex) if ex.is_number: # not sure if it's always needed but try it for numbers (issue 8354) ex = _mexpand(ex, recursive=True) for expr in preorder_traversal(ex): if expr.is_AlgebraicNumber: compose = False break if x is not None: x, cls = sympify(x), Poly else: x, cls = Dummy('x'), PurePoly if not domain: if ex.free_symbols: domain = FractionField(QQ, list(ex.free_symbols)) else: domain = QQ if hasattr(domain, 'symbols') and x in domain.symbols: raise GeneratorsError("the variable %s is an element of the ground " "domain %s" % (x, domain)) if compose: result = _minpoly_compose(ex, x, domain) result = result.primitive()[1] c = result.coeff(x**degree(result, x)) if c.is_negative: result = expand_mul(-result) return cls(result, x, field=True) if polys else result.collect(x) if not domain.is_QQ: raise NotImplementedError("groebner method only works for QQ") result = _minpoly_groebner(ex, x, cls) return cls(result, x, field=True) if polys else result.collect(x) def _minpoly_groebner(ex, x, cls): """ Computes the minimal polynomial of an algebraic number using Groebner bases Examples ======== >>> from sympy import minimal_polynomial, sqrt, Rational >>> from sympy.abc import x >>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=False) x**2 - 2*x - 1 """ from sympy.polys.polytools import degree from sympy.core.function import expand_multinomial generator = numbered_symbols('a', cls=Dummy) mapping, symbols = {}, {} def update_mapping(ex, exp, base=None): a = next(generator) symbols[ex] = a if base is not None: mapping[ex] = a**exp + base else: mapping[ex] = exp.as_expr(a) return a def bottom_up_scan(ex): if ex.is_Atom: if ex is S.ImaginaryUnit: if ex not in mapping: return update_mapping(ex, 2, 1) else: return symbols[ex] elif ex.is_Rational: return ex elif ex.is_Add: return Add(*[ bottom_up_scan(g) for g in ex.args ]) elif ex.is_Mul: return Mul(*[ bottom_up_scan(g) for g in ex.args ]) elif ex.is_Pow: if ex.exp.is_Rational: if ex.exp < 0: minpoly_base = _minpoly_groebner(ex.base, x, cls) inverse = invert(x, minpoly_base).as_expr() base_inv = inverse.subs(x, ex.base).expand() if ex.exp == -1: return bottom_up_scan(base_inv) else: ex = base_inv**(-ex.exp) if not ex.exp.is_Integer: base, exp = ( ex.base**ex.exp.p).expand(), Rational(1, ex.exp.q) else: base, exp = ex.base, ex.exp base = bottom_up_scan(base) expr = base**exp if expr not in mapping: return update_mapping(expr, 1/exp, -base) else: return symbols[expr] elif ex.is_AlgebraicNumber: if ex.root not in mapping: return update_mapping(ex.root, ex.minpoly) else: return symbols[ex.root] raise NotAlgebraic("%s doesn't seem to be an algebraic number" % ex) def simpler_inverse(ex): """ Returns True if it is more likely that the minimal polynomial algorithm works better with the inverse """ if ex.is_Pow: if (1/ex.exp).is_integer and ex.exp < 0: if ex.base.is_Add: return True if ex.is_Mul: hit = True for p in ex.args: if p.is_Add: return False if p.is_Pow: if p.base.is_Add and p.exp > 0: return False if hit: return True return False inverted = False ex = expand_multinomial(ex) if ex.is_AlgebraicNumber: return ex.minpoly.as_expr(x) elif ex.is_Rational: result = ex.q*x - ex.p else: inverted = simpler_inverse(ex) if inverted: ex = ex**-1 res = None if ex.is_Pow and (1/ex.exp).is_Integer: n = 1/ex.exp res = _minimal_polynomial_sq(ex.base, n, x) elif _is_sum_surds(ex): res = _minimal_polynomial_sq(ex, S.One, x) if res is not None: result = res if res is None: bus = bottom_up_scan(ex) F = [x - bus] + list(mapping.values()) G = groebner(F, list(symbols.values()) + [x], order='lex') _, factors = factor_list(G[-1]) # by construction G[-1] has root `ex` result = _choose_factor(factors, x, ex) if inverted: result = _invertx(result, x) if result.coeff(x**degree(result, x)) < 0: result = expand_mul(-result) return result minpoly = minimal_polynomial def _switch_domain(g, K): # An algebraic relation f(a, b) = 0 over Q can also be written # g(b) = 0 where g is in Q(a)[x] and h(a) = 0 where h is in Q(b)[x]. # This function transforms g into h where Q(b) = K. frep = g.rep.inject() hrep = frep.eject(K, front=True) return g.new(hrep, g.gens[0]) def _linsolve(p): # Compute root of linear polynomial. c, d = p.rep.rep return -d/c @public def primitive_element(extension, x=None, *, ex=False, polys=False): """Construct a common number field for all extensions. """ if not extension: raise ValueError("can't compute primitive element for empty extension") if x is not None: x, cls = sympify(x), Poly else: x, cls = Dummy('x'), PurePoly if not ex: gen, coeffs = extension[0], [1] g = minimal_polynomial(gen, x, polys=True) for ext in extension[1:]: _, factors = factor_list(g, extension=ext) g = _choose_factor(factors, x, gen) s, _, g = g.sqf_norm() gen += s*ext coeffs.append(s) if not polys: return g.as_expr(), coeffs else: return cls(g), coeffs gen, coeffs = extension[0], [1] f = minimal_polynomial(gen, x, polys=True) K = QQ.algebraic_field((f, gen)) # incrementally constructed field reps = [K.unit] # representations of extension elements in K for ext in extension[1:]: p = minimal_polynomial(ext, x, polys=True) L = QQ.algebraic_field((p, ext)) _, factors = factor_list(f, domain=L) f = _choose_factor(factors, x, gen) s, g, f = f.sqf_norm() gen += s*ext coeffs.append(s) K = QQ.algebraic_field((f, gen)) h = _switch_domain(g, K) erep = _linsolve(h.gcd(p)) # ext as element of K ogen = K.unit - s*erep # old gen as element of K reps = [dup_eval(_.rep, ogen, K) for _ in reps] + [erep] H = [_.rep for _ in reps] if not polys: return f.as_expr(), coeffs, H else: return f, coeffs, H def is_isomorphism_possible(a, b): """Returns `True` if there is a chance for isomorphism. """ n = a.minpoly.degree() m = b.minpoly.degree() if m % n != 0: return False if n == m: return True da = a.minpoly.discriminant() db = b.minpoly.discriminant() i, k, half = 1, m//n, db//2 while True: p = sieve[i] P = p**k if P > half: break if ((da % p) % 2) and not (db % P): return False i += 1 return True def field_isomorphism_pslq(a, b): """Construct field isomorphism using PSLQ algorithm. """ if not a.root.is_real or not b.root.is_real: raise NotImplementedError("PSLQ doesn't support complex coefficients") f = a.minpoly g = b.minpoly.replace(f.gen) n, m, prev = 100, b.minpoly.degree(), None for i in range(1, 5): A = a.root.evalf(n) B = b.root.evalf(n) basis = [1, B] + [ B**i for i in range(2, m) ] + [A] dps, mp.dps = mp.dps, n coeffs = pslq(basis, maxcoeff=int(1e10), maxsteps=1000) mp.dps = dps if coeffs is None: break if coeffs != prev: prev = coeffs else: break coeffs = [S(c)/coeffs[-1] for c in coeffs[:-1]] while not coeffs[-1]: coeffs.pop() coeffs = list(reversed(coeffs)) h = Poly(coeffs, f.gen, domain='QQ') if f.compose(h).rem(g).is_zero: d, approx = len(coeffs) - 1, 0 for i, coeff in enumerate(coeffs): approx += coeff*B**(d - i) if A*approx < 0: return [ -c for c in coeffs ] else: return coeffs elif f.compose(-h).rem(g).is_zero: return [ -c for c in coeffs ] else: n *= 2 return None def field_isomorphism_factor(a, b): """Construct field isomorphism via factorization. """ _, factors = factor_list(a.minpoly, extension=b) for f, _ in factors: if f.degree() == 1: coeffs = f.rep.TC().to_sympy_list() d, terms = len(coeffs) - 1, [] for i, coeff in enumerate(coeffs): terms.append(coeff*b.root**(d - i)) root = Add(*terms) if (a.root - root).evalf(chop=True) == 0: return coeffs if (a.root + root).evalf(chop=True) == 0: return [-c for c in coeffs] return None @public def field_isomorphism(a, b, *, fast=True): """Construct an isomorphism between two number fields. """ a, b = sympify(a), sympify(b) if not a.is_AlgebraicNumber: a = AlgebraicNumber(a) if not b.is_AlgebraicNumber: b = AlgebraicNumber(b) if a == b: return a.coeffs() n = a.minpoly.degree() m = b.minpoly.degree() if n == 1: return [a.root] if m % n != 0: return None if fast: try: result = field_isomorphism_pslq(a, b) if result is not None: return result except NotImplementedError: pass return field_isomorphism_factor(a, b) @public def to_number_field(extension, theta=None, *, gen=None): """Express `extension` in the field generated by `theta`. """ if hasattr(extension, '__iter__'): extension = list(extension) else: extension = [extension] if len(extension) == 1 and type(extension[0]) is tuple: return AlgebraicNumber(extension[0]) minpoly, coeffs = primitive_element(extension, gen, polys=True) root = sum([ coeff*ext for coeff, ext in zip(coeffs, extension) ]) if theta is None: return AlgebraicNumber((minpoly, root)) else: theta = sympify(theta) if not theta.is_AlgebraicNumber: theta = AlgebraicNumber(theta, gen=gen) coeffs = field_isomorphism(root, theta) if coeffs is not None: return AlgebraicNumber(theta, coeffs) else: raise IsomorphismFailed( "%s is not in a subfield of %s" % (root, theta.root)) class IntervalPrinter(MpmathPrinter, LambdaPrinter): """Use ``lambda`` printer but print numbers as ``mpi`` intervals. """ def _print_Integer(self, expr): return "mpi('%s')" % super(PythonCodePrinter, self)._print_Integer(expr) def _print_Rational(self, expr): return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr) def _print_Half(self, expr): return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr) def _print_Pow(self, expr): return super(MpmathPrinter, self)._print_Pow(expr, rational=True) @public def isolate(alg, eps=None, fast=False): """Give a rational isolating interval for an algebraic number. """ alg = sympify(alg) if alg.is_Rational: return (alg, alg) elif not alg.is_real: raise NotImplementedError( "complex algebraic numbers are not supported") func = lambdify((), alg, modules="mpmath", printer=IntervalPrinter()) poly = minpoly(alg, polys=True) intervals = poly.intervals(sqf=True) dps, done = mp.dps, False try: while not done: alg = func() for a, b in intervals: if a <= alg.a and alg.b <= b: done = True break else: mp.dps *= 2 finally: mp.dps = dps if eps is not None: a, b = poly.refine_root(a, b, eps=eps, fast=fast) return (a, b)
c7f475a279bb82f36ec44e1d1be37bb6275f2e12cad0a6bcfc505de63e5fdcf0
"""Tools for constructing domains for expressions. """ from sympy.core import sympify from sympy.core.compatibility import ordered from sympy.core.evalf import pure_complex from sympy.polys.domains import ZZ, QQ, ZZ_I, QQ_I, EX from sympy.polys.domains.complexfield import ComplexField from sympy.polys.domains.realfield import RealField from sympy.polys.polyoptions import build_options from sympy.polys.polyutils import parallel_dict_from_basic from sympy.utilities import public def _construct_simple(coeffs, opt): """Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. """ rationals = floats = complexes = algebraics = False float_numbers = [] if opt.extension is True: is_algebraic = lambda coeff: coeff.is_number and coeff.is_algebraic else: is_algebraic = lambda coeff: False for coeff in coeffs: if coeff.is_Rational: if not coeff.is_Integer: rationals = True elif coeff.is_Float: if algebraics: # there are both reals and algebraics -> EX return False else: floats = True float_numbers.append(coeff) else: is_complex = pure_complex(coeff) if is_complex: complexes = True x, y = is_complex if x.is_Rational and y.is_Rational: if not (x.is_Integer and y.is_Integer): rationals = True continue else: floats = True if x.is_Float: float_numbers.append(x) if y.is_Float: float_numbers.append(y) if is_algebraic(coeff): if floats: # there are both algebraics and reals -> EX return False algebraics = True else: # this is a composite domain, e.g. ZZ[X], EX return None # Use the maximum precision of all coefficients for the RR or CC # precision max_prec = max(c._prec for c in float_numbers) if float_numbers else 53 if algebraics: domain, result = _construct_algebraic(coeffs, opt) else: if floats and complexes: domain = ComplexField(prec=max_prec) elif floats: domain = RealField(prec=max_prec) elif rationals or opt.field: domain = QQ_I if complexes else QQ else: domain = ZZ_I if complexes else ZZ result = [domain.from_sympy(coeff) for coeff in coeffs] return domain, result def _construct_algebraic(coeffs, opt): """We know that coefficients are algebraic so construct the extension. """ from sympy.polys.numberfields import primitive_element exts = set() def build_trees(args): trees = [] for a in args: if a.is_Rational: tree = ('Q', QQ.from_sympy(a)) elif a.is_Add: tree = ('+', build_trees(a.args)) elif a.is_Mul: tree = ('*', build_trees(a.args)) else: tree = ('e', a) exts.add(a) trees.append(tree) return trees trees = build_trees(coeffs) exts = list(ordered(exts)) g, span, H = primitive_element(exts, ex=True, polys=True) root = sum([ s*ext for s, ext in zip(span, exts) ]) domain, g = QQ.algebraic_field((g, root)), g.rep.rep exts_dom = [domain.dtype.from_list(h, g, QQ) for h in H] exts_map = dict(zip(exts, exts_dom)) def convert_tree(tree): op, args = tree if op == 'Q': return domain.dtype.from_list([args], g, QQ) elif op == '+': return sum((convert_tree(a) for a in args), domain.zero) elif op == '*': # return prod(convert(a) for a in args) t = convert_tree(args[0]) for a in args[1:]: t *= convert_tree(a) return t elif op == 'e': return exts_map[args] else: raise RuntimeError result = [convert_tree(tree) for tree in trees] return domain, result def _construct_composite(coeffs, opt): """Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). """ numers, denoms = [], [] for coeff in coeffs: numer, denom = coeff.as_numer_denom() numers.append(numer) denoms.append(denom) polys, gens = parallel_dict_from_basic(numers + denoms) # XXX: sorting if not gens: return None if opt.composite is None: if any(gen.is_number and gen.is_algebraic for gen in gens): return None # generators are number-like so lets better use EX all_symbols = set() for gen in gens: symbols = gen.free_symbols if all_symbols & symbols: return None # there could be algebraic relations between generators else: all_symbols |= symbols n = len(gens) k = len(polys)//2 numers = polys[:k] denoms = polys[k:] if opt.field: fractions = True else: fractions, zeros = False, (0,)*n for denom in denoms: if len(denom) > 1 or zeros not in denom: fractions = True break coeffs = set() if not fractions: for numer, denom in zip(numers, denoms): denom = denom[zeros] for monom, coeff in numer.items(): coeff /= denom coeffs.add(coeff) numer[monom] = coeff else: for numer, denom in zip(numers, denoms): coeffs.update(list(numer.values())) coeffs.update(list(denom.values())) rationals = floats = complexes = False float_numbers = [] for coeff in coeffs: if coeff.is_Rational: if not coeff.is_Integer: rationals = True elif coeff.is_Float: floats = True float_numbers.append(coeff) else: is_complex = pure_complex(coeff) if is_complex is not None: complexes = True x, y = is_complex if x.is_Rational and y.is_Rational: if not (x.is_Integer and y.is_Integer): rationals = True else: floats = True if x.is_Float: float_numbers.append(x) if y.is_Float: float_numbers.append(y) max_prec = max(c._prec for c in float_numbers) if float_numbers else 53 if floats and complexes: ground = ComplexField(prec=max_prec) elif floats: ground = RealField(prec=max_prec) elif complexes: if rationals: ground = QQ_I else: ground = ZZ_I elif rationals: ground = QQ else: ground = ZZ result = [] if not fractions: domain = ground.poly_ring(*gens) for numer in numers: for monom, coeff in numer.items(): numer[monom] = ground.from_sympy(coeff) result.append(domain(numer)) else: domain = ground.frac_field(*gens) for numer, denom in zip(numers, denoms): for monom, coeff in numer.items(): numer[monom] = ground.from_sympy(coeff) for monom, coeff in denom.items(): denom[monom] = ground.from_sympy(coeff) result.append(domain((numer, denom))) return domain, result def _construct_expression(coeffs, opt): """The last resort case, i.e. use the expression domain. """ domain, result = EX, [] for coeff in coeffs: result.append(domain.from_sympy(coeff)) return domain, result @public def construct_domain(obj, **args): """Construct a minimal domain for the list of coefficients. """ opt = build_options(args) if hasattr(obj, '__iter__'): if isinstance(obj, dict): if not obj: monoms, coeffs = [], [] else: monoms, coeffs = list(zip(*list(obj.items()))) else: coeffs = obj else: coeffs = [obj] coeffs = list(map(sympify, coeffs)) result = _construct_simple(coeffs, opt) if result is not None: if result is not False: domain, coeffs = result else: domain, coeffs = _construct_expression(coeffs, opt) else: if opt.composite is False: result = None else: result = _construct_composite(coeffs, opt) if result is not None: domain, coeffs = result else: domain, coeffs = _construct_expression(coeffs, opt) if hasattr(obj, '__iter__'): if isinstance(obj, dict): return domain, dict(list(zip(monoms, coeffs))) else: return domain, coeffs else: return domain, coeffs[0]
69bd0c3119b64b042c678798e37b25b9c5076e696620cf7cdc1b62ac66571af3
"""py.test hacks to support XFAIL/XPASS""" import sys import functools import os import contextlib import warnings from sympy.utilities.exceptions import SymPyDeprecationWarning ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None) try: import pytest USE_PYTEST = getattr(sys, '_running_pytest', False) except ImportError: USE_PYTEST = False if USE_PYTEST: raises = pytest.raises warns = pytest.warns skip = pytest.skip XFAIL = pytest.mark.xfail SKIP = pytest.mark.skip slow = pytest.mark.slow nocache_fail = pytest.mark.nocache_fail from _pytest.outcomes import Failed else: # Not using pytest so define the things that would have been imported from # there. # _pytest._code.code.ExceptionInfo class ExceptionInfo: def __init__(self, value): self.value = value def __repr__(self): return "<ExceptionInfo {!r}>".format(self.value) def raises(expectedException, code=None): """ Tests that ``code`` raises the exception ``expectedException``. ``code`` may be a callable, such as a lambda expression or function name. If ``code`` is not given or None, ``raises`` will return a context manager for use in ``with`` statements; the code to execute then comes from the scope of the ``with``. ``raises()`` does nothing if the callable raises the expected exception, otherwise it raises an AssertionError. Examples ======== >>> from sympy.testing.pytest import raises >>> raises(ZeroDivisionError, lambda: 1/0) <ExceptionInfo ZeroDivisionError(...)> >>> raises(ZeroDivisionError, lambda: 1/2) Traceback (most recent call last): ... Failed: DID NOT RAISE >>> with raises(ZeroDivisionError): ... n = 1/0 >>> with raises(ZeroDivisionError): ... n = 1/2 Traceback (most recent call last): ... Failed: DID NOT RAISE Note that you cannot test multiple statements via ``with raises``: >>> with raises(ZeroDivisionError): ... n = 1/0 # will execute and raise, aborting the ``with`` ... n = 9999/0 # never executed This is just what ``with`` is supposed to do: abort the contained statement sequence at the first exception and let the context manager deal with the exception. To test multiple statements, you'll need a separate ``with`` for each: >>> with raises(ZeroDivisionError): ... n = 1/0 # will execute and raise >>> with raises(ZeroDivisionError): ... n = 9999/0 # will also execute and raise """ if code is None: return RaisesContext(expectedException) elif callable(code): try: code() except expectedException as e: return ExceptionInfo(e) raise Failed("DID NOT RAISE") elif isinstance(code, str): raise TypeError( '\'raises(xxx, "code")\' has been phased out; ' 'change \'raises(xxx, "expression")\' ' 'to \'raises(xxx, lambda: expression)\', ' '\'raises(xxx, "statement")\' ' 'to \'with raises(xxx): statement\'') else: raise TypeError( 'raises() expects a callable for the 2nd argument.') class RaisesContext: def __init__(self, expectedException): self.expectedException = expectedException def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: raise Failed("DID NOT RAISE") return issubclass(exc_type, self.expectedException) class XFail(Exception): pass class XPass(Exception): pass class Skipped(Exception): pass class Failed(Exception): # type: ignore pass def XFAIL(func): def wrapper(): try: func() except Exception as e: message = str(e) if message != "Timeout": raise XFail(func.__name__) else: raise Skipped("Timeout") raise XPass(func.__name__) wrapper = functools.update_wrapper(wrapper, func) return wrapper def skip(str): raise Skipped(str) def SKIP(reason): """Similar to ``skip()``, but this is a decorator. """ def wrapper(func): def func_wrapper(): raise Skipped(reason) func_wrapper = functools.update_wrapper(func_wrapper, func) return func_wrapper return wrapper def slow(func): func._slow = True def func_wrapper(): func() func_wrapper = functools.update_wrapper(func_wrapper, func) func_wrapper.__wrapped__ = func return func_wrapper def nocache_fail(func): "Dummy decorator for marking tests that fail when cache is disabled" return func @contextlib.contextmanager def warns(warningcls, *, match=''): '''Like raises but tests that warnings are emitted. >>> from sympy.testing.pytest import warns >>> import warnings >>> with warns(UserWarning): ... warnings.warn('deprecated', UserWarning) >>> with warns(UserWarning): ... pass Traceback (most recent call last): ... Failed: DID NOT WARN. No warnings of type UserWarning\ was emitted. The list of emitted warnings is: []. ''' # Absorbs all warnings in warnrec with warnings.catch_warnings(record=True) as warnrec: # Hide all warnings but make sure that our warning is emitted warnings.simplefilter("ignore") warnings.filterwarnings("always", match, warningcls) # Now run the test yield # Raise if expected warning not found if not any(issubclass(w.category, warningcls) for w in warnrec): msg = ('Failed: DID NOT WARN.' ' No warnings of type %s was emitted.' ' The list of emitted warnings is: %s.' ) % (warningcls, [w.message for w in warnrec]) raise Failed(msg) @contextlib.contextmanager def warns_deprecated_sympy(): '''Shorthand for ``warns(SymPyDeprecationWarning)`` This is the recommended way to test that ``SymPyDeprecationWarning`` is emitted for deprecated features in SymPy. To test for other warnings use ``warns``. To suppress warnings without asserting that they are emitted use ``ignore_warnings``. >>> from sympy.testing.pytest import warns_deprecated_sympy >>> from sympy.utilities.exceptions import SymPyDeprecationWarning >>> with warns_deprecated_sympy(): ... SymPyDeprecationWarning("Don't use", feature="old thing", ... deprecated_since_version="1.0", issue=123).warn() >>> with warns_deprecated_sympy(): ... pass Traceback (most recent call last): ... Failed: DID NOT WARN. No warnings of type \ SymPyDeprecationWarning was emitted. The list of emitted warnings is: []. ''' with warns(SymPyDeprecationWarning): yield @contextlib.contextmanager def ignore_warnings(warningcls): '''Context manager to suppress warnings during tests. This function is useful for suppressing warnings during tests. The warns function should be used to assert that a warning is raised. The ignore_warnings function is useful in situation when the warning is not guaranteed to be raised (e.g. on importing a module) or if the warning comes from third-party code. When the warning is coming (reliably) from SymPy the warns function should be preferred to ignore_warnings. >>> from sympy.testing.pytest import ignore_warnings >>> import warnings Here's a warning: >>> with warnings.catch_warnings(): # reset warnings in doctest ... warnings.simplefilter('error') ... warnings.warn('deprecated', UserWarning) Traceback (most recent call last): ... UserWarning: deprecated Let's suppress it with ignore_warnings: >>> with warnings.catch_warnings(): # reset warnings in doctest ... warnings.simplefilter('error') ... with ignore_warnings(UserWarning): ... warnings.warn('deprecated', UserWarning) (No warning emitted) ''' # Absorbs all warnings in warnrec with warnings.catch_warnings(record=True) as warnrec: # Make sure our warning doesn't get filtered warnings.simplefilter("always", warningcls) # Now run the test yield # Reissue any warnings that we aren't testing for for w in warnrec: if not issubclass(w.category, warningcls): warnings.warn_explicit(w.message, w.category, w.filename, w.lineno)
a7dcf592b9a5cf37114c5c1eebf4f444c22060753e736ac023ffca5e195941d9
"""benchmarking through py.test""" import py from py.__.test.item import Item from py.__.test.terminal.terminal import TerminalSession from math import ceil as _ceil, floor as _floor, log10 import timeit from inspect import getsource # from IPython.Magic.magic_timeit units = ["s", "ms", "us", "ns"] scaling = [1, 1e3, 1e6, 1e9] unitn = {s: i for i, s in enumerate(units)} precision = 3 # like py.test Directory but scan for 'bench_<smth>.py' class Directory(py.test.collect.Directory): def filefilter(self, path): b = path.purebasename ext = path.ext return b.startswith('bench_') and ext == '.py' # like py.test Module but scane for 'bench_<smth>' and 'timeit_<smth>' class Module(py.test.collect.Module): def funcnamefilter(self, name): return name.startswith('bench_') or name.startswith('timeit_') # Function level benchmarking driver class Timer(timeit.Timer): def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()): # copy of timeit.Timer.__init__ # similarity index 95% self.timer = timer stmt = timeit.reindent(stmt, 8) setup = timeit.reindent(setup, 4) src = timeit.template % {'stmt': stmt, 'setup': setup} self.src = src # Save for traceback display code = compile(src, timeit.dummy_src_name, "exec") ns = {} #exec(code, globals(), ns) -- original timeit code exec(code, globals, ns) # -- we use caller-provided globals instead self.inner = ns["inner"] class Function(py.__.test.item.Function): def __init__(self, *args, **kw): super().__init__(*args, **kw) self.benchtime = None self.benchtitle = None def execute(self, target, *args): # get func source without first 'def func(...):' line src = getsource(target) src = '\n'.join( src.splitlines()[1:] ) # extract benchmark title if target.func_doc is not None: self.benchtitle = target.func_doc else: self.benchtitle = src.splitlines()[0].strip() # XXX we ignore args timer = Timer(src, globals=target.func_globals) if self.name.startswith('timeit_'): # from IPython.Magic.magic_timeit repeat = 3 number = 1 for i in range(1, 10): t = timer.timeit(number) if t >= 0.2: number *= (0.2 / t) number = int(_ceil(number)) break if t <= 0.02: # we are not close enough to that 0.2s number *= 10 else: # since we are very close to be > 0.2s we'd better adjust number # so that timing time is not too high number *= (0.2 / t) number = int(_ceil(number)) break self.benchtime = min(timer.repeat(repeat, number)) / number # 'bench_<smth>' else: self.benchtime = timer.timeit(1) class BenchSession(TerminalSession): def header(self, colitems): super().header(colitems) def footer(self, colitems): super().footer(colitems) self.out.write('\n') self.print_bench_results() def print_bench_results(self): self.out.write('==============================\n') self.out.write(' *** BENCHMARKING RESULTS *** \n') self.out.write('==============================\n') self.out.write('\n') # benchname, time, benchtitle results = [] for item, outcome in self._memo: if isinstance(item, Item): best = item.benchtime if best is None: # skipped or failed benchmarks tstr = '---' else: # from IPython.Magic.magic_timeit if best > 0.0: order = min(-int(_floor(log10(best)) // 3), 3) else: order = 3 tstr = "%.*g %s" % ( precision, best * scaling[order], units[order]) results.append( [item.name, tstr, item.benchtitle] ) # dot/unit align second column # FIXME simpler? this is crappy -- shame on me... wm = [0]*len(units) we = [0]*len(units) for s in results: tstr = s[1] n, u = tstr.split() # unit n un = unitn[u] try: m, e = n.split('.') except ValueError: m, e = n, '' wm[un] = max(len(m), wm[un]) we[un] = max(len(e), we[un]) for s in results: tstr = s[1] n, u = tstr.split() un = unitn[u] try: m, e = n.split('.') except ValueError: m, e = n, '' m = m.rjust(wm[un]) e = e.ljust(we[un]) if e.strip(): n = '.'.join((m, e)) else: n = ' '.join((m, e)) # let's put the number into the right place txt = '' for i in range(len(units)): if i == un: txt += n else: txt += ' '*(wm[i] + we[i] + 1) s[1] = '%s %s' % (txt, u) # align all columns besides the last one for i in range(2): w = max(len(s[i]) for s in results) for s in results: s[i] = s[i].ljust(w) # show results for s in results: self.out.write('%s | %s | %s\n' % tuple(s)) def main(args=None): # hook our Directory/Module/Function as defaults from py.__.test import defaultconftest defaultconftest.Directory = Directory defaultconftest.Module = Module defaultconftest.Function = Function # hook BenchSession as py.test session config = py.test.config config._getsessionclass = lambda: BenchSession py.test.cmdline.main(args)
964ce56591524bc2f111102476ceaf783851228bda498707eef1f78932958d0e
""" This is our testing framework. Goals: * it should be compatible with py.test and operate very similarly (or identically) * doesn't require any external dependencies * preferably all the functionality should be in this file only * no magic, just import the test file and execute the test functions, that's it * portable """ from __future__ import print_function, division import os import sys import platform import inspect import traceback import pdb import re import linecache import time from fnmatch import fnmatch from timeit import default_timer as clock import doctest as pdoctest # avoid clashing with our doctest() function from doctest import DocTestFinder, DocTestRunner import random import subprocess import shutil import signal import stat import tempfile import warnings from contextlib import contextmanager from sympy.core.cache import clear_cache from sympy.core.compatibility import (PY3, unwrap) from sympy.external import import_module IS_WINDOWS = (os.name == 'nt') ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None) # emperically generated list of the proportion of time spent running # an even split of tests. This should periodically be regenerated. # A list of [.6, .1, .3] would mean that if the tests are evenly split # into '1/3', '2/3', '3/3', the first split would take 60% of the time, # the second 10% and the third 30%. These lists are normalized to sum # to 1, so [60, 10, 30] has the same behavior as [6, 1, 3] or [.6, .1, .3]. # # This list can be generated with the code: # from time import time # import sympy # import os # os.environ["TRAVIS_BUILD_NUMBER"] = '2' # Mock travis to get more correct densities # delays, num_splits = [], 30 # for i in range(1, num_splits + 1): # tic = time() # sympy.test(split='{}/{}'.format(i, num_splits), time_balance=False) # Add slow=True for slow tests # delays.append(time() - tic) # tot = sum(delays) # print([round(x / tot, 4) for x in delays]) SPLIT_DENSITY = [ 0.0059, 0.0027, 0.0068, 0.0011, 0.0006, 0.0058, 0.0047, 0.0046, 0.004, 0.0257, 0.0017, 0.0026, 0.004, 0.0032, 0.0016, 0.0015, 0.0004, 0.0011, 0.0016, 0.0014, 0.0077, 0.0137, 0.0217, 0.0074, 0.0043, 0.0067, 0.0236, 0.0004, 0.1189, 0.0142, 0.0234, 0.0003, 0.0003, 0.0047, 0.0006, 0.0013, 0.0004, 0.0008, 0.0007, 0.0006, 0.0139, 0.0013, 0.0007, 0.0051, 0.002, 0.0004, 0.0005, 0.0213, 0.0048, 0.0016, 0.0012, 0.0014, 0.0024, 0.0015, 0.0004, 0.0005, 0.0007, 0.011, 0.0062, 0.0015, 0.0021, 0.0049, 0.0006, 0.0006, 0.0011, 0.0006, 0.0019, 0.003, 0.0044, 0.0054, 0.0057, 0.0049, 0.0016, 0.0006, 0.0009, 0.0006, 0.0012, 0.0006, 0.0149, 0.0532, 0.0076, 0.0041, 0.0024, 0.0135, 0.0081, 0.2209, 0.0459, 0.0438, 0.0488, 0.0137, 0.002, 0.0003, 0.0008, 0.0039, 0.0024, 0.0005, 0.0004, 0.003, 0.056, 0.0026] SPLIT_DENSITY_SLOW = [0.0086, 0.0004, 0.0568, 0.0003, 0.0032, 0.0005, 0.0004, 0.0013, 0.0016, 0.0648, 0.0198, 0.1285, 0.098, 0.0005, 0.0064, 0.0003, 0.0004, 0.0026, 0.0007, 0.0051, 0.0089, 0.0024, 0.0033, 0.0057, 0.0005, 0.0003, 0.001, 0.0045, 0.0091, 0.0006, 0.0005, 0.0321, 0.0059, 0.1105, 0.216, 0.1489, 0.0004, 0.0003, 0.0006, 0.0483] class Skipped(Exception): pass class TimeOutError(Exception): pass class DependencyError(Exception): pass # add more flags ?? future_flags = division.compiler_flag def _indent(s, indent=4): """ Add the given number of space characters to the beginning of every non-blank line in ``s``, and return the result. If the string ``s`` is Unicode, it is encoded using the stdout encoding and the ``backslashreplace`` error handler. """ # This regexp matches the start of non-blank lines: return re.sub('(?m)^(?!$)', indent*' ', s) pdoctest._indent = _indent # type: ignore # override reporter to maintain windows and python3 def _report_failure(self, out, test, example, got): """ Report that the given example failed. """ s = self._checker.output_difference(example, got, self.optionflags) s = s.encode('raw_unicode_escape').decode('utf8', 'ignore') out(self._failure_header(test, example) + s) if PY3 and IS_WINDOWS: DocTestRunner.report_failure = _report_failure # type: ignore def convert_to_native_paths(lst): """ Converts a list of '/' separated paths into a list of native (os.sep separated) paths and converts to lowercase if the system is case insensitive. """ newlst = [] for i, rv in enumerate(lst): rv = os.path.join(*rv.split("/")) # on windows the slash after the colon is dropped if sys.platform == "win32": pos = rv.find(':') if pos != -1: if rv[pos + 1] != '\\': rv = rv[:pos + 1] + '\\' + rv[pos + 1:] newlst.append(os.path.normcase(rv)) return newlst def get_sympy_dir(): """ Returns the root sympy directory and set the global value indicating whether the system is case sensitive or not. """ this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..") sympy_dir = os.path.normpath(sympy_dir) return os.path.normcase(sympy_dir) def setup_pprint(): from sympy import pprint_use_unicode, init_printing import sympy.interactive.printing as interactive_printing # force pprint to be in ascii mode in doctests use_unicode_prev = pprint_use_unicode(False) # hook our nice, hash-stable strprinter init_printing(pretty_print=False) # Prevent init_printing() in doctests from affecting other doctests interactive_printing.NO_GLOBAL = True return use_unicode_prev @contextmanager def raise_on_deprecated(): """Context manager to make DeprecationWarning raise an error This is to catch SymPyDeprecationWarning from library code while running tests and doctests. It is important to use this context manager around each individual test/doctest in case some tests modify the warning filters. """ with warnings.catch_warnings(): warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*') yield def run_in_subprocess_with_hash_randomization( function, function_args=(), function_kwargs=None, command=sys.executable, module='sympy.testing.runtests', force=False): """ Run a function in a Python subprocess with hash randomization enabled. If hash randomization is not supported by the version of Python given, it returns False. Otherwise, it returns the exit value of the command. The function is passed to sys.exit(), so the return value of the function will be the return value. The environment variable PYTHONHASHSEED is used to seed Python's hash randomization. If it is set, this function will return False, because starting a new subprocess is unnecessary in that case. If it is not set, one is set at random, and the tests are run. Note that if this environment variable is set when Python starts, hash randomization is automatically enabled. To force a subprocess to be created even if PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a subprocess in Python versions that do not support hash randomization (see below), because those versions of Python do not support the ``-R`` flag. ``function`` should be a string name of a function that is importable from the module ``module``, like "_test". The default for ``module`` is "sympy.testing.runtests". ``function_args`` and ``function_kwargs`` should be a repr-able tuple and dict, respectively. The default Python command is sys.executable, which is the currently running Python command. This function is necessary because the seed for hash randomization must be set by the environment variable before Python starts. Hence, in order to use a predetermined seed for tests, we must start Python in a separate subprocess. Hash randomization was added in the minor Python versions 2.6.8, 2.7.3, 3.1.5, and 3.2.3, and is enabled by default in all Python versions after and including 3.3.0. Examples ======== >>> from sympy.testing.runtests import ( ... run_in_subprocess_with_hash_randomization) >>> # run the core tests in verbose mode >>> run_in_subprocess_with_hash_randomization("_test", ... function_args=("core",), ... function_kwargs={'verbose': True}) # doctest: +SKIP # Will return 0 if sys.executable supports hash randomization and tests # pass, 1 if they fail, and False if it does not support hash # randomization. """ cwd = get_sympy_dir() # Note, we must return False everywhere, not None, as subprocess.call will # sometimes return None. # First check if the Python version supports hash randomization # If it doesn't have this support, it won't recognize the -R flag p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) p.communicate() if p.returncode != 0: return False hash_seed = os.getenv("PYTHONHASHSEED") if not hash_seed: os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32)) else: if not force: return False function_kwargs = function_kwargs or {} # Now run the command commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" % (module, function, function, repr(function_args), repr(function_kwargs))) try: p = subprocess.Popen([command, "-R", "-c", commandstring], cwd=cwd) p.communicate() except KeyboardInterrupt: p.wait() finally: # Put the environment variable back, so that it reads correctly for # the current Python process. if hash_seed is None: del os.environ["PYTHONHASHSEED"] else: os.environ["PYTHONHASHSEED"] = hash_seed return p.returncode def run_all_tests(test_args=(), test_kwargs=None, doctest_args=(), doctest_kwargs=None, examples_args=(), examples_kwargs=None): """ Run all tests. Right now, this runs the regular tests (bin/test), the doctests (bin/doctest), the examples (examples/all.py), and the sage tests (see sympy/external/tests/test_sage.py). This is what ``setup.py test`` uses. You can pass arguments and keyword arguments to the test functions that support them (for now, test, doctest, and the examples). See the docstrings of those functions for a description of the available options. For example, to run the solvers tests with colors turned off: >>> from sympy.testing.runtests import run_all_tests >>> run_all_tests(test_args=("solvers",), ... test_kwargs={"colors:False"}) # doctest: +SKIP """ cwd = get_sympy_dir() tests_successful = True test_kwargs = test_kwargs or {} doctest_kwargs = doctest_kwargs or {} examples_kwargs = examples_kwargs or {'quiet': True} try: # Regular tests if not test(*test_args, **test_kwargs): # some regular test fails, so set the tests_successful # flag to false and continue running the doctests tests_successful = False # Doctests print() if not doctest(*doctest_args, **doctest_kwargs): tests_successful = False # Examples print() sys.path.append("examples") # examples/all.py from all import run_examples # type: ignore if not run_examples(*examples_args, **examples_kwargs): tests_successful = False # Sage tests if sys.platform != "win32" and not PY3 and os.path.exists("bin/test"): # run Sage tests; Sage currently doesn't support Windows or Python 3 # Only run Sage tests if 'bin/test' is present (it is missing from # our release because everything in the 'bin' directory gets # installed). dev_null = open(os.devnull, 'w') if subprocess.call("sage -v", shell=True, stdout=dev_null, stderr=dev_null) == 0: if subprocess.call("sage -python bin/test " "sympy/external/tests/test_sage.py", shell=True, cwd=cwd) != 0: tests_successful = False if tests_successful: return else: # Return nonzero exit code sys.exit(1) except KeyboardInterrupt: print() print("DO *NOT* COMMIT!") sys.exit(1) def test(*paths, subprocess=True, rerun=0, **kwargs): """ Run tests in the specified test_*.py files. Tests in a particular test_*.py file are run if any of the given strings in ``paths`` matches a part of the test file's path. If ``paths=[]``, tests in all test_*.py files are run. Notes: - If sort=False, tests are run in random order (not default). - Paths can be entered in native system format or in unix, forward-slash format. - Files that are on the blacklist can be tested by providing their path; they are only excluded if no paths are given. **Explanation of test results** ====== =============================================================== Output Meaning ====== =============================================================== . passed F failed X XPassed (expected to fail but passed) f XFAILed (expected to fail and indeed failed) s skipped w slow T timeout (e.g., when ``--timeout`` is used) K KeyboardInterrupt (when running the slow tests with ``--slow``, you can interrupt one of them without killing the test runner) ====== =============================================================== Colors have no additional meaning and are used just to facilitate interpreting the output. Examples ======== >>> import sympy Run all tests: >>> sympy.test() # doctest: +SKIP Run one file: >>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP >>> sympy.test("_basic") # doctest: +SKIP Run all tests in sympy/functions/ and some particular file: >>> sympy.test("sympy/core/tests/test_basic.py", ... "sympy/functions") # doctest: +SKIP Run all tests in sympy/core and sympy/utilities: >>> sympy.test("/core", "/util") # doctest: +SKIP Run specific test from a file: >>> sympy.test("sympy/core/tests/test_basic.py", ... kw="test_equality") # doctest: +SKIP Run specific test from any file: >>> sympy.test(kw="subs") # doctest: +SKIP Run the tests with verbose mode on: >>> sympy.test(verbose=True) # doctest: +SKIP Don't sort the test output: >>> sympy.test(sort=False) # doctest: +SKIP Turn on post-mortem pdb: >>> sympy.test(pdb=True) # doctest: +SKIP Turn off colors: >>> sympy.test(colors=False) # doctest: +SKIP Force colors, even when the output is not to a terminal (this is useful, e.g., if you are piping to ``less -r`` and you still want colors) >>> sympy.test(force_colors=False) # doctest: +SKIP The traceback verboseness can be set to "short" or "no" (default is "short") >>> sympy.test(tb='no') # doctest: +SKIP The ``split`` option can be passed to split the test run into parts. The split currently only splits the test files, though this may change in the future. ``split`` should be a string of the form 'a/b', which will run part ``a`` of ``b``. For instance, to run the first half of the test suite: >>> sympy.test(split='1/2') # doctest: +SKIP The ``time_balance`` option can be passed in conjunction with ``split``. If ``time_balance=True`` (the default for ``sympy.test``), sympy will attempt to split the tests such that each split takes equal time. This heuristic for balancing is based on pre-recorded test data. >>> sympy.test(split='1/2', time_balance=True) # doctest: +SKIP You can disable running the tests in a separate subprocess using ``subprocess=False``. This is done to support seeding hash randomization, which is enabled by default in the Python versions where it is supported. If subprocess=False, hash randomization is enabled/disabled according to whether it has been enabled or not in the calling Python process. However, even if it is enabled, the seed cannot be printed unless it is called from a new Python process. Hash randomization was added in the minor Python versions 2.6.8, 2.7.3, 3.1.5, and 3.2.3, and is enabled by default in all Python versions after and including 3.3.0. If hash randomization is not supported ``subprocess=False`` is used automatically. >>> sympy.test(subprocess=False) # doctest: +SKIP To set the hash randomization seed, set the environment variable ``PYTHONHASHSEED`` before running the tests. This can be done from within Python using >>> import os >>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP Or from the command line using $ PYTHONHASHSEED=42 ./bin/test If the seed is not set, a random seed will be chosen. Note that to reproduce the same hash values, you must use both the same seed as well as the same architecture (32-bit vs. 64-bit). """ # count up from 0, do not print 0 print_counter = lambda i : (print("rerun %d" % (rerun-i)) if rerun-i else None) if subprocess: # loop backwards so last i is 0 for i in range(rerun, -1, -1): print_counter(i) ret = run_in_subprocess_with_hash_randomization("_test", function_args=paths, function_kwargs=kwargs) if ret is False: break val = not bool(ret) # exit on the first failure or if done if not val or i == 0: return val # rerun even if hash randomization is not supported for i in range(rerun, -1, -1): print_counter(i) val = not bool(_test(*paths, **kwargs)) if not val or i == 0: return val def _test(*paths, verbose=False, tb="short", kw=None, pdb=False, colors=True, force_colors=False, sort=True, seed=None, timeout=False, fail_on_timeout=False, slow=False, enhance_asserts=False, split=None, time_balance=True, blacklist=('sympy/integrals/rubi/rubi_tests/tests',), fast_threshold=None, slow_threshold=None): """ Internal function that actually runs the tests. All keyword arguments from ``test()`` are passed to this function except for ``subprocess``. Returns 0 if tests passed and 1 if they failed. See the docstring of ``test()`` for more information. """ kw = kw or () # ensure that kw is a tuple if isinstance(kw, str): kw = (kw,) post_mortem = pdb if seed is None: seed = random.randrange(100000000) if ON_TRAVIS and timeout is False: # Travis times out if no activity is seen for 10 minutes. timeout = 595 fail_on_timeout = True if ON_TRAVIS: # pyglet does not work on Travis blacklist = list(blacklist) + ['sympy/plotting/pygletplot/tests'] blacklist = convert_to_native_paths(blacklist) r = PyTestReporter(verbose=verbose, tb=tb, colors=colors, force_colors=force_colors, split=split) t = SymPyTests(r, kw, post_mortem, seed, fast_threshold=fast_threshold, slow_threshold=slow_threshold) test_files = t.get_test_files('sympy') not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: matched = not_blacklisted else: paths = convert_to_native_paths(paths) matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break density = None if time_balance: if slow: density = SPLIT_DENSITY_SLOW else: density = SPLIT_DENSITY if split: matched = split_list(matched, split, density=density) t._testfiles.extend(matched) return int(not t.test(sort=sort, timeout=timeout, slow=slow, enhance_asserts=enhance_asserts, fail_on_timeout=fail_on_timeout)) def doctest(*paths, subprocess=True, rerun=0, **kwargs): r""" Runs doctests in all \*.py files in the sympy directory which match any of the given strings in ``paths`` or all tests if paths=[]. Notes: - Paths can be entered in native system format or in unix, forward-slash format. - Files that are on the blacklist can be tested by providing their path; they are only excluded if no paths are given. Examples ======== >>> import sympy Run all tests: >>> sympy.doctest() # doctest: +SKIP Run one file: >>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP >>> sympy.doctest("polynomial.rst") # doctest: +SKIP Run all tests in sympy/functions/ and some particular file: >>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP Run any file having polynomial in its name, doc/src/modules/polynomial.rst, sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py: >>> sympy.doctest("polynomial") # doctest: +SKIP The ``split`` option can be passed to split the test run into parts. The split currently only splits the test files, though this may change in the future. ``split`` should be a string of the form 'a/b', which will run part ``a`` of ``b``. Note that the regular doctests and the Sphinx doctests are split independently. For instance, to run the first half of the test suite: >>> sympy.doctest(split='1/2') # doctest: +SKIP The ``subprocess`` and ``verbose`` options are the same as with the function ``test()``. See the docstring of that function for more information. """ # count up from 0, do not print 0 print_counter = lambda i : (print("rerun %d" % (rerun-i)) if rerun-i else None) if subprocess: # loop backwards so last i is 0 for i in range(rerun, -1, -1): print_counter(i) ret = run_in_subprocess_with_hash_randomization("_doctest", function_args=paths, function_kwargs=kwargs) if ret is False: break val = not bool(ret) # exit on the first failure or if done if not val or i == 0: return val # rerun even if hash randomization is not supported for i in range(rerun, -1, -1): print_counter(i) val = not bool(_doctest(*paths, **kwargs)) if not val or i == 0: return val def _get_doctest_blacklist(): '''Get the default blacklist for the doctests''' blacklist = [] blacklist.extend([ "doc/src/modules/plotting.rst", # generates live plots "doc/src/modules/physics/mechanics/autolev_parser.rst", "sympy/galgebra.py", # no longer part of SymPy "sympy/this.py", # prints text "sympy/physics/gaussopt.py", # raises deprecation warning "sympy/matrices/densearith.py", # raises deprecation warning "sympy/matrices/densesolve.py", # raises deprecation warning "sympy/matrices/densetools.py", # raises deprecation warning "sympy/printing/ccode.py", # backwards compatibility shim, importing it breaks the codegen doctests "sympy/printing/fcode.py", # backwards compatibility shim, importing it breaks the codegen doctests "sympy/printing/cxxcode.py", # backwards compatibility shim, importing it breaks the codegen doctests "sympy/parsing/autolev/_antlr/autolevlexer.py", # generated code "sympy/parsing/autolev/_antlr/autolevparser.py", # generated code "sympy/parsing/autolev/_antlr/autolevlistener.py", # generated code "sympy/parsing/latex/_antlr/latexlexer.py", # generated code "sympy/parsing/latex/_antlr/latexparser.py", # generated code "sympy/integrals/rubi/rubi.py", "sympy/plotting/pygletplot/__init__.py", # crashes on some systems "sympy/plotting/pygletplot/plot.py", # crashes on some systems ]) # autolev parser tests num = 12 for i in range (1, num+1): blacklist.append("sympy/parsing/autolev/test-examples/ruletest" + str(i) + ".py") blacklist.extend(["sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py", "sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py", "sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py", "sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py"]) if import_module('numpy') is None: blacklist.extend([ "sympy/plotting/experimental_lambdify.py", "sympy/plotting/plot_implicit.py", "examples/advanced/autowrap_integrators.py", "examples/advanced/autowrap_ufuncify.py", "examples/intermediate/sample.py", "examples/intermediate/mplot2d.py", "examples/intermediate/mplot3d.py", "doc/src/modules/numeric-computation.rst" ]) else: if import_module('matplotlib') is None: blacklist.extend([ "examples/intermediate/mplot2d.py", "examples/intermediate/mplot3d.py" ]) else: # Use a non-windowed backend, so that the tests work on Travis import matplotlib matplotlib.use('Agg') if ON_TRAVIS or import_module('pyglet') is None: blacklist.extend(["sympy/plotting/pygletplot"]) if import_module('theano') is None: blacklist.extend([ "sympy/printing/theanocode.py", "doc/src/modules/numeric-computation.rst", ]) if import_module('antlr4') is None: blacklist.extend([ "sympy/parsing/autolev/__init__.py", "sympy/parsing/latex/_parse_latex_antlr.py", ]) if import_module('lfortran') is None: #throws ImportError when lfortran not installed blacklist.extend([ "sympy/parsing/sym_expr.py", ]) # disabled because of doctest failures in asmeurer's bot blacklist.extend([ "sympy/utilities/autowrap.py", "examples/advanced/autowrap_integrators.py", "examples/advanced/autowrap_ufuncify.py" ]) # blacklist these modules until issue 4840 is resolved blacklist.extend([ "sympy/conftest.py", # Python 2.7 issues "sympy/testing/benchmarking.py", ]) # These are deprecated stubs to be removed: blacklist.extend([ "sympy/utilities/benchmarking.py", "sympy/utilities/tmpfiles.py", "sympy/utilities/pytest.py", "sympy/utilities/runtests.py", "sympy/utilities/quality_unicode.py", "sympy/utilities/randtest.py", ]) blacklist = convert_to_native_paths(blacklist) return blacklist def _doctest(*paths, **kwargs): """ Internal function that actually runs the doctests. All keyword arguments from ``doctest()`` are passed to this function except for ``subprocess``. Returns 0 if tests passed and 1 if they failed. See the docstrings of ``doctest()`` and ``test()`` for more information. """ from sympy import pprint_use_unicode normal = kwargs.get("normal", False) verbose = kwargs.get("verbose", False) colors = kwargs.get("colors", True) force_colors = kwargs.get("force_colors", False) blacklist = kwargs.get("blacklist", []) split = kwargs.get('split', None) blacklist.extend(_get_doctest_blacklist()) # Use a non-windowed backend, so that the tests work on Travis if import_module('matplotlib') is not None: import matplotlib matplotlib.use('Agg') # Disable warnings for external modules import sympy.external sympy.external.importtools.WARN_OLD_VERSION = False sympy.external.importtools.WARN_NOT_INSTALLED = False # Disable showing up of plots from sympy.plotting.plot import unset_show unset_show() r = PyTestReporter(verbose, split=split, colors=colors,\ force_colors=force_colors) t = SymPyDocTests(r, normal) test_files = t.get_test_files('sympy') test_files.extend(t.get_test_files('examples', init_only=False)) not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: matched = not_blacklisted else: # take only what was requested...but not blacklisted items # and allow for partial match anywhere or fnmatch of name paths = convert_to_native_paths(paths) matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break if split: matched = split_list(matched, split) t._testfiles.extend(matched) # run the tests and record the result for this *py portion of the tests if t._testfiles: failed = not t.test() else: failed = False # N.B. # -------------------------------------------------------------------- # Here we test *.rst files at or below doc/src. Code from these must # be self supporting in terms of imports since there is no importing # of necessary modules by doctest.testfile. If you try to pass *.py # files through this they might fail because they will lack the needed # imports and smarter parsing that can be done with source code. # test_files = t.get_test_files('doc/src', '*.rst', init_only=False) test_files.sort() not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: matched = not_blacklisted else: # Take only what was requested as long as it's not on the blacklist. # Paths were already made native in *py tests so don't repeat here. # There's no chance of having a *py file slip through since we # only have *rst files in test_files. matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break if split: matched = split_list(matched, split) first_report = True for rst_file in matched: if not os.path.isfile(rst_file): continue old_displayhook = sys.displayhook try: use_unicode_prev = setup_pprint() out = sympytestfile( rst_file, module_relative=False, encoding='utf-8', optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE | pdoctest.IGNORE_EXCEPTION_DETAIL) finally: # make sure we return to the original displayhook in case some # doctest has changed that sys.displayhook = old_displayhook # The NO_GLOBAL flag overrides the no_global flag to init_printing # if True import sympy.interactive.printing as interactive_printing interactive_printing.NO_GLOBAL = False pprint_use_unicode(use_unicode_prev) rstfailed, tested = out if tested: failed = rstfailed or failed if first_report: first_report = False msg = 'rst doctests start' if not t._testfiles: r.start(msg=msg) else: r.write_center(msg) print() # use as the id, everything past the first 'sympy' file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:] print(file_id, end=" ") # get at least the name out so it is know who is being tested wid = r.terminal_width - len(file_id) - 1 # update width test_file = '[%s]' % (tested) report = '[%s]' % (rstfailed or 'OK') print(''.join( [test_file, ' '*(wid - len(test_file) - len(report)), report]) ) # the doctests for *py will have printed this message already if there was # a failure, so now only print it if there was intervening reporting by # testing the *rst as evidenced by first_report no longer being True. if not first_report and failed: print() print("DO *NOT* COMMIT!") return int(failed) sp = re.compile(r'([0-9]+)/([1-9][0-9]*)') def split_list(l, split, density=None): """ Splits a list into part a of b split should be a string of the form 'a/b'. For instance, '1/3' would give the split one of three. If the length of the list is not divisible by the number of splits, the last split will have more items. `density` may be specified as a list. If specified, tests will be balanced so that each split has as equal-as-possible amount of mass according to `density`. >>> from sympy.testing.runtests import split_list >>> a = list(range(10)) >>> split_list(a, '1/3') [0, 1, 2] >>> split_list(a, '2/3') [3, 4, 5] >>> split_list(a, '3/3') [6, 7, 8, 9] """ m = sp.match(split) if not m: raise ValueError("split must be a string of the form a/b where a and b are ints") i, t = map(int, m.groups()) if not density: return l[(i - 1)*len(l)//t : i*len(l)//t] # normalize density tot = sum(density) density = [x / tot for x in density] def density_inv(x): """Interpolate the inverse to the cumulative distribution function given by density""" if x <= 0: return 0 if x >= sum(density): return 1 # find the first time the cumulative sum surpasses x # and linearly interpolate cumm = 0 for i, d in enumerate(density): cumm += d if cumm >= x: break frac = (d - (cumm - x)) / d return (i + frac) / len(density) lower_frac = density_inv((i - 1) / t) higher_frac = density_inv(i / t) return l[int(lower_frac*len(l)) : int(higher_frac*len(l))] from collections import namedtuple SymPyTestResults = namedtuple('SymPyTestResults', 'failed attempted') def sympytestfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=pdoctest.DocTestParser(), encoding=None): """ Test examples in the given file. Return (#failures, #tests). Optional keyword arg ``module_relative`` specifies how filenames should be interpreted: - If ``module_relative`` is True (the default), then ``filename`` specifies a module-relative path. By default, this path is relative to the calling module's directory; but if the ``package`` argument is specified, then it is relative to that package. To ensure os-independence, ``filename`` should use "/" characters to separate path segments, and should not be an absolute path (i.e., it may not begin with "/"). - If ``module_relative`` is False, then ``filename`` specifies an os-specific path. The path may be absolute or relative (to the current working directory). Optional keyword arg ``name`` gives the name of the test; by default use the file's basename. Optional keyword argument ``package`` is a Python package or the name of a Python package whose directory should be used as the base directory for a module relative filename. If no package is specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify ``package`` if ``module_relative`` is False. Optional keyword arg ``globs`` gives a dict to be used as the globals when executing examples; by default, use {}. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg ``extraglobs`` gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. Optional keyword arg ``verbose`` prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg ``report`` prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg ``optionflags`` or's together module constants, and defaults to 0. Possible values (see the docs for details): - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - SKIP - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE Optional keyword arg ``raise_on_error`` raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Optional keyword arg ``parser`` specifies a DocTestParser (or subclass) that should be used to extract tests from the files. Optional keyword arg ``encoding`` specifies an encoding that should be used to convert the file to unicode. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path if not PY3: text, filename = pdoctest._load_testfile( filename, package, module_relative) if encoding is not None: text = text.decode(encoding) else: text, filename = pdoctest._load_testfile( filename, package, module_relative, encoding) # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: globs['__name__'] = '__main__' if raise_on_error: runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags) runner._checker = SymPyOutputChecker() # Read the file, convert it to a test, and run it. test = parser.get_doctest(text, globs, name, filename, 0) runner.run(test, compileflags=future_flags) if report: runner.summarize() if pdoctest.master is None: pdoctest.master = runner else: pdoctest.master.merge(runner) return SymPyTestResults(runner.failures, runner.tries) class SymPyTests: def __init__(self, reporter, kw="", post_mortem=False, seed=None, fast_threshold=None, slow_threshold=None): self._post_mortem = post_mortem self._kw = kw self._count = 0 self._root_dir = get_sympy_dir() self._reporter = reporter self._reporter.root_dir(self._root_dir) self._testfiles = [] self._seed = seed if seed is not None else random.random() # Defaults in seconds, from human / UX design limits # http://www.nngroup.com/articles/response-times-3-important-limits/ # # These defaults are *NOT* set in stone as we are measuring different # things, so others feel free to come up with a better yardstick :) if fast_threshold: self._fast_threshold = float(fast_threshold) else: self._fast_threshold = 8 if slow_threshold: self._slow_threshold = float(slow_threshold) else: self._slow_threshold = 10 def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False, fail_on_timeout=False): """ Runs the tests returning True if all tests pass, otherwise False. If sort=False run tests in random order. """ if sort: self._testfiles.sort() elif slow: pass else: random.seed(self._seed) random.shuffle(self._testfiles) self._reporter.start(self._seed) for f in self._testfiles: try: self.test_file(f, sort, timeout, slow, enhance_asserts, fail_on_timeout) except KeyboardInterrupt: print(" interrupted by user") self._reporter.finish() raise return self._reporter.finish() def _enhance_asserts(self, source): from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple, Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations) ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=', "Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not', "In": 'in', "NotIn": 'not in'} class Transform(NodeTransformer): def visit_Assert(self, stmt): if isinstance(stmt.test, Compare): compare = stmt.test values = [compare.left] + compare.comparators names = [ "_%s" % i for i, _ in enumerate(values) ] names_store = [ Name(n, Store()) for n in names ] names_load = [ Name(n, Load()) for n in names ] target = Tuple(names_store, Store()) value = Tuple(values, Load()) assign = Assign([target], value) new_compare = Compare(names_load[0], compare.ops, names_load[1:]) msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s" msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load())) test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset) return [assign, test] else: return stmt tree = parse(source) new_tree = Transform().visit(tree) return fix_missing_locations(new_tree) def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False, fail_on_timeout=False): reporter = self._reporter funcs = [] try: gl = {'__file__': filename} try: if PY3: open_file = lambda: open(filename, encoding="utf8") else: open_file = lambda: open(filename) with open_file() as f: source = f.read() if self._kw: for l in source.splitlines(): if l.lstrip().startswith('def '): if any(l.find(k) != -1 for k in self._kw): break else: return if enhance_asserts: try: source = self._enhance_asserts(source) except ImportError: pass code = compile(source, filename, "exec", flags=0, dont_inherit=True) exec(code, gl) except (SystemExit, KeyboardInterrupt): raise except ImportError: reporter.import_error(filename, sys.exc_info()) return except Exception: reporter.test_exception(sys.exc_info()) clear_cache() self._count += 1 random.seed(self._seed) disabled = gl.get("disabled", False) if not disabled: # we need to filter only those functions that begin with 'test_' # We have to be careful about decorated functions. As long as # the decorator uses functools.wraps, we can detect it. funcs = [] for f in gl: if (f.startswith("test_") and (inspect.isfunction(gl[f]) or inspect.ismethod(gl[f]))): func = gl[f] # Handle multiple decorators while hasattr(func, '__wrapped__'): func = func.__wrapped__ if inspect.getsourcefile(func) == filename: funcs.append(gl[f]) if slow: funcs = [f for f in funcs if getattr(f, '_slow', False)] # Sorting of XFAILed functions isn't fixed yet :-( funcs.sort(key=lambda x: inspect.getsourcelines(x)[1]) i = 0 while i < len(funcs): if inspect.isgeneratorfunction(funcs[i]): # some tests can be generators, that return the actual # test functions. We unpack it below: f = funcs.pop(i) for fg in f(): func = fg[0] args = fg[1:] fgw = lambda: func(*args) funcs.insert(i, fgw) i += 1 else: i += 1 # drop functions that are not selected with the keyword expression: funcs = [x for x in funcs if self.matches(x)] if not funcs: return except Exception: reporter.entering_filename(filename, len(funcs)) raise reporter.entering_filename(filename, len(funcs)) if not sort: random.shuffle(funcs) for f in funcs: start = time.time() reporter.entering_test(f) try: if getattr(f, '_slow', False) and not slow: raise Skipped("Slow") with raise_on_deprecated(): if timeout: self._timeout(f, timeout, fail_on_timeout) else: random.seed(self._seed) f() except KeyboardInterrupt: if getattr(f, '_slow', False): reporter.test_skip("KeyboardInterrupt") else: raise except Exception: if timeout: signal.alarm(0) # Disable the alarm. It could not be handled before. t, v, tr = sys.exc_info() if t is AssertionError: reporter.test_fail((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) elif t.__name__ == "Skipped": reporter.test_skip(v) elif t.__name__ == "XFail": reporter.test_xfail() elif t.__name__ == "XPass": reporter.test_xpass(v) else: reporter.test_exception((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) else: reporter.test_pass() taken = time.time() - start if taken > self._slow_threshold: filename = os.path.relpath(filename, reporter._root_dir) reporter.slow_test_functions.append( (filename + "::" + f.__name__, taken)) if getattr(f, '_slow', False) and slow: if taken < self._fast_threshold: filename = os.path.relpath(filename, reporter._root_dir) reporter.fast_test_functions.append( (filename + "::" + f.__name__, taken)) reporter.leaving_filename() def _timeout(self, function, timeout, fail_on_timeout): def callback(x, y): signal.alarm(0) if fail_on_timeout: raise TimeOutError("Timed out after %d seconds" % timeout) else: raise Skipped("Timeout") signal.signal(signal.SIGALRM, callback) signal.alarm(timeout) # Set an alarm with a given timeout function() signal.alarm(0) # Disable the alarm def matches(self, x): """ Does the keyword expression self._kw match "x"? Returns True/False. Always returns True if self._kw is "". """ if not self._kw: return True for kw in self._kw: if x.__name__.find(kw) != -1: return True return False def get_test_files(self, dir, pat='test_*.py'): """ Returns the list of test_*.py (default) files at or below directory ``dir`` relative to the sympy home directory. """ dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) g = [] for path, folders, files in os.walk(dir): g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)]) return sorted([os.path.normcase(gi) for gi in g]) class SymPyDocTests: def __init__(self, reporter, normal): self._count = 0 self._root_dir = get_sympy_dir() self._reporter = reporter self._reporter.root_dir(self._root_dir) self._normal = normal self._testfiles = [] def test(self): """ Runs the tests and returns True if all tests pass, otherwise False. """ self._reporter.start() for f in self._testfiles: try: self.test_file(f) except KeyboardInterrupt: print(" interrupted by user") self._reporter.finish() raise return self._reporter.finish() def test_file(self, filename): clear_cache() from io import StringIO import sympy.interactive.printing as interactive_printing from sympy import pprint_use_unicode rel_name = filename[len(self._root_dir) + 1:] dirname, file = os.path.split(filename) module = rel_name.replace(os.sep, '.')[:-3] if rel_name.startswith("examples"): # Examples files do not have __init__.py files, # So we have to temporarily extend sys.path to import them sys.path.insert(0, dirname) module = file[:-3] # remove ".py" try: module = pdoctest._normalize_module(module) tests = SymPyDocTestFinder().find(module) except (SystemExit, KeyboardInterrupt): raise except ImportError: self._reporter.import_error(filename, sys.exc_info()) return finally: if rel_name.startswith("examples"): del sys.path[0] tests = [test for test in tests if len(test.examples) > 0] # By default tests are sorted by alphabetical order by function name. # We sort by line number so one can edit the file sequentially from # bottom to top. However, if there are decorated functions, their line # numbers will be too large and for now one must just search for these # by text and function name. tests.sort(key=lambda x: -x.lineno) if not tests: return self._reporter.entering_filename(filename, len(tests)) for test in tests: assert len(test.examples) != 0 if self._reporter._verbose: self._reporter.write("\n{} ".format(test.name)) # check if there are external dependencies which need to be met if '_doctest_depends_on' in test.globs: try: self._check_dependencies(**test.globs['_doctest_depends_on']) except DependencyError as e: self._reporter.test_skip(v=str(e)) continue runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE | pdoctest.IGNORE_EXCEPTION_DETAIL) runner._checker = SymPyOutputChecker() old = sys.stdout new = StringIO() sys.stdout = new # If the testing is normal, the doctests get importing magic to # provide the global namespace. If not normal (the default) then # then must run on their own; all imports must be explicit within # a function's docstring. Once imported that import will be # available to the rest of the tests in a given function's # docstring (unless clear_globs=True below). if not self._normal: test.globs = {} # if this is uncommented then all the test would get is what # comes by default with a "from sympy import *" #exec('from sympy import *') in test.globs test.globs['print_function'] = print_function old_displayhook = sys.displayhook use_unicode_prev = setup_pprint() try: f, t = runner.run(test, compileflags=future_flags, out=new.write, clear_globs=False) except KeyboardInterrupt: raise finally: sys.stdout = old if f > 0: self._reporter.doctest_fail(test.name, new.getvalue()) else: self._reporter.test_pass() sys.displayhook = old_displayhook interactive_printing.NO_GLOBAL = False pprint_use_unicode(use_unicode_prev) self._reporter.leaving_filename() def get_test_files(self, dir, pat='*.py', init_only=True): r""" Returns the list of \*.py files (default) from which docstrings will be tested which are at or below directory ``dir``. By default, only those that have an __init__.py in their parent directory and do not start with ``test_`` will be included. """ def importable(x): """ Checks if given pathname x is an importable module by checking for __init__.py file. Returns True/False. Currently we only test if the __init__.py file exists in the directory with the file "x" (in theory we should also test all the parent dirs). """ init_py = os.path.join(os.path.dirname(x), "__init__.py") return os.path.exists(init_py) dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) g = [] for path, folders, files in os.walk(dir): g.extend([os.path.join(path, f) for f in files if not f.startswith('test_') and fnmatch(f, pat)]) if init_only: # skip files that are not importable (i.e. missing __init__.py) g = [x for x in g if importable(x)] return [os.path.normcase(gi) for gi in g] def _check_dependencies(self, executables=(), modules=(), disable_viewers=(), python_version=(3, 5)): """ Checks if the dependencies for the test are installed. Raises ``DependencyError`` it at least one dependency is not installed. """ for executable in executables: if not shutil.which(executable): raise DependencyError("Could not find %s" % executable) for module in modules: if module == 'matplotlib': matplotlib = import_module( 'matplotlib', import_kwargs={'fromlist': ['pyplot', 'cm', 'collections']}, min_module_version='1.0.0', catch=(RuntimeError,)) if matplotlib is None: raise DependencyError("Could not import matplotlib") else: if not import_module(module): raise DependencyError("Could not import %s" % module) if disable_viewers: tempdir = tempfile.mkdtemp() os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH']) vw = ('#!/usr/bin/env {}\n' 'import sys\n' 'if len(sys.argv) <= 1:\n' ' exit("wrong number of args")\n').format( 'python3' if PY3 else 'python') for viewer in disable_viewers: with open(os.path.join(tempdir, viewer), 'w') as fh: fh.write(vw) # make the file executable os.chmod(os.path.join(tempdir, viewer), stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR) if python_version: if sys.version_info < python_version: raise DependencyError("Requires Python >= " + '.'.join(map(str, python_version))) if 'pyglet' in modules: # monkey-patch pyglet s.t. it does not open a window during # doctesting import pyglet class DummyWindow: def __init__(self, *args, **kwargs): self.has_exit = True self.width = 600 self.height = 400 def set_vsync(self, x): pass def switch_to(self): pass def push_handlers(self, x): pass def close(self): pass pyglet.window.Window = DummyWindow class SymPyDocTestFinder(DocTestFinder): """ A class used to extract the DocTests that are relevant to a given object, from its docstring and the docstrings of its contained objects. Doctests can currently be extracted from the following object types: modules, functions, classes, methods, staticmethods, classmethods, and properties. Modified from doctest's version to look harder for code that appears comes from a different module. For example, the @vectorize decorator makes it look like functions come from multidimensional.py even though their code exists elsewhere. """ def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to ``tests``. """ if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if not self._recurse: return # Look for tests in a module's contained objects. if inspect.ismodule(obj): for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): # Make sure we don't run doctests functions or classes # from different modules if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (rawname %s)" % (val, module, rawname) try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except KeyboardInterrupt: raise # Look for tests in a module's __test__ dictionary. for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError("SymPyDocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError("SymPyDocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj): for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(unwrap(val)) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): # Make sure we don't run doctests functions or classes # from different modules if isinstance(val, property): if hasattr(val.fget, '__module__'): if val.fget.__module__ != module.__name__: continue else: if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (valname %s)" % ( val, module, valname) valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) def _get_test(self, obj, name, module, globs, source_lines): """ Return a DocTest for the given object, if it defines a docstring; otherwise, return None. """ lineno = None # Extract the object's docstring. If it doesn't have one, # then return None (no test for this object). if isinstance(obj, str): # obj is a string in the case for objects in the polys package. # Note that source_lines is a binary string (compiled polys # modules), which can't be handled by _find_lineno so determine # the line number here. docstring = obj matches = re.findall(r"line \d+", name) assert len(matches) == 1, \ "string '%s' does not contain lineno " % name # NOTE: this is not the exact linenumber but its better than no # lineno ;) lineno = int(matches[0][5:]) else: try: if obj.__doc__ is None: docstring = '' else: docstring = obj.__doc__ if not isinstance(docstring, str): docstring = str(docstring) except (TypeError, AttributeError): docstring = '' # Don't bother if the docstring is empty. if self._exclude_empty and not docstring: return None # check that properties have a docstring because _find_lineno # assumes it if isinstance(obj, property): if obj.fget.__doc__ is None: return None # Find the docstring's location in the file. if lineno is None: obj = unwrap(obj) # handling of properties is not implemented in _find_lineno so do # it here if hasattr(obj, 'func_closure') and obj.func_closure is not None: tobj = obj.func_closure[0].cell_contents elif isinstance(obj, property): tobj = obj.fget else: tobj = obj lineno = self._find_lineno(tobj, source_lines) if lineno is None: return None # Return a DocTest for this object. if module is None: filename = None else: filename = getattr(module, '__file__', module.__name__) if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] globs['_doctest_depends_on'] = getattr(obj, '_doctest_depends_on', {}) return self._parser.get_doctest(docstring, globs, name, filename, lineno) class SymPyDocTestRunner(DocTestRunner): """ A class used to run DocTest test cases, and accumulate statistics. The ``run`` method is used to process a single DocTest case. It returns a tuple ``(f, t)``, where ``t`` is the number of test cases tried, and ``f`` is the number of test cases that failed. Modified from the doctest version to not reset the sys.displayhook (see issue 5140). See the docstring of the original DocTestRunner for more information. """ def run(self, test, compileflags=None, out=None, clear_globs=True): """ Run the examples in ``test``, and display the results using the writer function ``out``. The examples are run in the namespace ``test.globs``. If ``clear_globs`` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use ``clear_globs=False``. ``compileflags`` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to ``globs``. The output of each example is checked using ``SymPyDocTestRunner.check_output``, and the results are formatted by the ``SymPyDocTestRunner.report_*`` methods. """ self.test = test if compileflags is None: compileflags = pdoctest._extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: out = save_stdout.write sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_set_trace = pdb.set_trace self.debugger = pdoctest._OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = pdoctest.linecache.getlines linecache.getlines = self.__patched_linecache_getlines # Fail for deprecation warnings with raise_on_deprecated(): try: test.globs['print_function'] = print_function return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines if clear_globs: test.globs.clear() # We have to override the name mangled methods. monkeypatched_methods = [ 'patched_linecache_getlines', 'run', 'record_outcome' ] for method in monkeypatched_methods: oldname = '_DocTestRunner__' + method newname = '_SymPyDocTestRunner__' + method setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname)) class SymPyOutputChecker(pdoctest.OutputChecker): """ Compared to the OutputChecker from the stdlib our OutputChecker class supports numerical comparison of floats occurring in the output of the doctest examples """ def __init__(self): # NOTE OutputChecker is an old-style class with no __init__ method, # so we can't call the base class version of __init__ here got_floats = r'(\d+\.\d*|\.\d+)' # floats in the 'want' string may contain ellipses want_floats = got_floats + r'(\.{3})?' front_sep = r'\s|\+|\-|\*|,' back_sep = front_sep + r'|j|e' fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep) fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep) self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend)) fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep) fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep) self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend)) def check_output(self, want, got, optionflags): """ Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. """ # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & pdoctest.ELLIPSIS: if pdoctest._ellipsis_match(want, got): return True # We didn't find any match; return false. return False class Reporter: """ Parent class for all reporters. """ pass class PyTestReporter(Reporter): """ Py.test like reporter. Should produce output identical to py.test. """ def __init__(self, verbose=False, tb="short", colors=True, force_colors=False, split=None): self._verbose = verbose self._tb_style = tb self._colors = colors self._force_colors = force_colors self._xfailed = 0 self._xpassed = [] self._failed = [] self._failed_doctest = [] self._passed = 0 self._skipped = 0 self._exceptions = [] self._terminal_width = None self._default_width = 80 self._split = split self._active_file = '' self._active_f = None # TODO: Should these be protected? self.slow_test_functions = [] self.fast_test_functions = [] # this tracks the x-position of the cursor (useful for positioning # things on the screen), without the need for any readline library: self._write_pos = 0 self._line_wrap = False def root_dir(self, dir): self._root_dir = dir @property def terminal_width(self): if self._terminal_width is not None: return self._terminal_width def findout_terminal_width(): if sys.platform == "win32": # Windows support is based on: # # http://code.activestate.com/recipes/ # 440694-determine-size-of-console-window-on-windows/ from ctypes import windll, create_string_buffer h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) if res: import struct (_, _, _, _, _, left, _, right, _, _, _) = \ struct.unpack("hhhhHhhhhhh", csbi.raw) return right - left else: return self._default_width if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty(): return self._default_width # leave PIPEs alone try: process = subprocess.Popen(['stty', '-a'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = process.stdout.read() if PY3: stdout = stdout.decode("utf-8") except OSError: pass else: # We support the following output formats from stty: # # 1) Linux -> columns 80 # 2) OS X -> 80 columns # 3) Solaris -> columns = 80 re_linux = r"columns\s+(?P<columns>\d+);" re_osx = r"(?P<columns>\d+)\s*columns;" re_solaris = r"columns\s+=\s+(?P<columns>\d+);" for regex in (re_linux, re_osx, re_solaris): match = re.search(regex, stdout) if match is not None: columns = match.group('columns') try: width = int(columns) except ValueError: pass if width != 0: return width return self._default_width width = findout_terminal_width() self._terminal_width = width return width def write(self, text, color="", align="left", width=None, force_colors=False): """ Prints a text on the screen. It uses sys.stdout.write(), so no readline library is necessary. Parameters ========== color : choose from the colors below, "" means default color align : "left"/"right", "left" is a normal print, "right" is aligned on the right-hand side of the screen, filled with spaces if necessary width : the screen width """ color_templates = ( ("Black", "0;30"), ("Red", "0;31"), ("Green", "0;32"), ("Brown", "0;33"), ("Blue", "0;34"), ("Purple", "0;35"), ("Cyan", "0;36"), ("LightGray", "0;37"), ("DarkGray", "1;30"), ("LightRed", "1;31"), ("LightGreen", "1;32"), ("Yellow", "1;33"), ("LightBlue", "1;34"), ("LightPurple", "1;35"), ("LightCyan", "1;36"), ("White", "1;37"), ) colors = {} for name, value in color_templates: colors[name] = value c_normal = '\033[0m' c_color = '\033[%sm' if width is None: width = self.terminal_width if align == "right": if self._write_pos + len(text) > width: # we don't fit on the current line, create a new line self.write("\n") self.write(" "*(width - self._write_pos - len(text))) if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \ sys.stdout.isatty(): # the stdout is not a terminal, this for example happens if the # output is piped to less, e.g. "bin/test | less". In this case, # the terminal control sequences would be printed verbatim, so # don't use any colors. color = "" elif sys.platform == "win32": # Windows consoles don't support ANSI escape sequences color = "" elif not self._colors: color = "" if self._line_wrap: if text[0] != "\n": sys.stdout.write("\n") # Avoid UnicodeEncodeError when printing out test failures if PY3 and IS_WINDOWS: text = text.encode('raw_unicode_escape').decode('utf8', 'ignore') elif PY3 and not sys.stdout.encoding.lower().startswith('utf'): text = text.encode(sys.stdout.encoding, 'backslashreplace' ).decode(sys.stdout.encoding) if color == "": sys.stdout.write(text) else: sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal)) sys.stdout.flush() l = text.rfind("\n") if l == -1: self._write_pos += len(text) else: self._write_pos = len(text) - l - 1 self._line_wrap = self._write_pos >= width self._write_pos %= width def write_center(self, text, delim="="): width = self.terminal_width if text != "": text = " %s " % text idx = (width - len(text)) // 2 t = delim*idx + text + delim*(width - idx - len(text)) self.write(t + "\n") def write_exception(self, e, val, tb): # remove the first item, as that is always runtests.py tb = tb.tb_next t = traceback.format_exception(e, val, tb) self.write("".join(t)) def start(self, seed=None, msg="test process starts"): self.write_center(msg) executable = sys.executable v = tuple(sys.version_info) python_version = "%s.%s.%s-%s-%s" % v implementation = platform.python_implementation() if implementation == 'PyPy': implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info self.write("executable: %s (%s) [%s]\n" % (executable, python_version, implementation)) from sympy.utilities.misc import ARCH self.write("architecture: %s\n" % ARCH) from sympy.core.cache import USE_CACHE self.write("cache: %s\n" % USE_CACHE) from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY version = '' if GROUND_TYPES =='gmpy': if HAS_GMPY == 1: import gmpy elif HAS_GMPY == 2: import gmpy2 as gmpy version = gmpy.version() self.write("ground types: %s %s\n" % (GROUND_TYPES, version)) numpy = import_module('numpy') self.write("numpy: %s\n" % (None if not numpy else numpy.__version__)) if seed is not None: self.write("random seed: %d\n" % seed) from sympy.utilities.misc import HASH_RANDOMIZATION self.write("hash randomization: ") hash_seed = os.getenv("PYTHONHASHSEED") or '0' if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)): self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed) else: self.write("off\n") if self._split: self.write("split: %s\n" % self._split) self.write('\n') self._t_start = clock() def finish(self): self._t_end = clock() self.write("\n") global text, linelen text = "tests finished: %d passed, " % self._passed linelen = len(text) def add_text(mytext): global text, linelen """Break new text if too long.""" if linelen + len(mytext) > self.terminal_width: text += '\n' linelen = 0 text += mytext linelen += len(mytext) if len(self._failed) > 0: add_text("%d failed, " % len(self._failed)) if len(self._failed_doctest) > 0: add_text("%d failed, " % len(self._failed_doctest)) if self._skipped > 0: add_text("%d skipped, " % self._skipped) if self._xfailed > 0: add_text("%d expected to fail, " % self._xfailed) if len(self._xpassed) > 0: add_text("%d expected to fail but passed, " % len(self._xpassed)) if len(self._exceptions) > 0: add_text("%d exceptions, " % len(self._exceptions)) add_text("in %.2f seconds" % (self._t_end - self._t_start)) if self.slow_test_functions: self.write_center('slowest tests', '_') sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1]) for slow_func_name, taken in sorted_slow: print('%s - Took %.3f seconds' % (slow_func_name, taken)) if self.fast_test_functions: self.write_center('unexpectedly fast tests', '_') sorted_fast = sorted(self.fast_test_functions, key=lambda r: r[1]) for fast_func_name, taken in sorted_fast: print('%s - Took %.3f seconds' % (fast_func_name, taken)) if len(self._xpassed) > 0: self.write_center("xpassed tests", "_") for e in self._xpassed: self.write("%s: %s\n" % (e[0], e[1])) self.write("\n") if self._tb_style != "no" and len(self._exceptions) > 0: for e in self._exceptions: filename, f, (t, val, tb) = e self.write_center("", "_") if f is None: s = "%s" % filename else: s = "%s:%s" % (filename, f.__name__) self.write_center(s, "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed) > 0: for e in self._failed: filename, f, (t, val, tb) = e self.write_center("", "_") self.write_center("%s:%s" % (filename, f.__name__), "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed_doctest) > 0: for e in self._failed_doctest: filename, msg = e self.write_center("", "_") self.write_center("%s" % filename, "_") self.write(msg) self.write("\n") self.write_center(text) ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \ len(self._failed_doctest) == 0 if not ok: self.write("DO *NOT* COMMIT!\n") return ok def entering_filename(self, filename, n): rel_name = filename[len(self._root_dir) + 1:] self._active_file = rel_name self._active_file_error = False self.write(rel_name) self.write("[%d] " % n) def leaving_filename(self): self.write(" ") if self._active_file_error: self.write("[FAIL]", "Red", align="right") else: self.write("[OK]", "Green", align="right") self.write("\n") if self._verbose: self.write("\n") def entering_test(self, f): self._active_f = f if self._verbose: self.write("\n" + f.__name__ + " ") def test_xfail(self): self._xfailed += 1 self.write("f", "Green") def test_xpass(self, v): message = str(v) self._xpassed.append((self._active_file, message)) self.write("X", "Green") def test_fail(self, exc_info): self._failed.append((self._active_file, self._active_f, exc_info)) self.write("F", "Red") self._active_file_error = True def doctest_fail(self, name, error_msg): # the first line contains "******", remove it: error_msg = "\n".join(error_msg.split("\n")[1:]) self._failed_doctest.append((name, error_msg)) self.write("F", "Red") self._active_file_error = True def test_pass(self, char="."): self._passed += 1 if self._verbose: self.write("ok", "Green") else: self.write(char, "Green") def test_skip(self, v=None): char = "s" self._skipped += 1 if v is not None: message = str(v) if message == "KeyboardInterrupt": char = "K" elif message == "Timeout": char = "T" elif message == "Slow": char = "w" if self._verbose: if v is not None: self.write(message + ' ', "Blue") else: self.write(" - ", "Blue") self.write(char, "Blue") def test_exception(self, exc_info): self._exceptions.append((self._active_file, self._active_f, exc_info)) if exc_info[0] is TimeOutError: self.write("T", "Red") else: self.write("E", "Red") self._active_file_error = True def import_error(self, filename, exc_info): self._exceptions.append((filename, None, exc_info)) rel_name = filename[len(self._root_dir) + 1:] self.write(rel_name) self.write("[?] Failed to import", "Red") self.write(" ") self.write("[FAIL]", "Red", align="right") self.write("\n")
6c563008504dc5ee564eddcf0a0d9accb26f469fc9803f7cb6e51fb3e465472b
from collections.abc import Callable from sympy.utilities.exceptions import SymPyDeprecationWarning from sympy.core.basic import Basic from sympy.core.cache import cacheit from sympy.core import S, Dummy, Lambda from sympy.core.symbol import Str from sympy import symbols, MatrixBase, ImmutableDenseMatrix from sympy.solvers import solve from sympy.vector.scalar import BaseScalar from sympy import eye, trigsimp, ImmutableMatrix as Matrix, sin, cos,\ sqrt, diff, Tuple, acos, atan2, simplify import sympy.vector from sympy.vector.orienters import (Orienter, AxisOrienter, BodyOrienter, SpaceOrienter, QuaternionOrienter) def CoordSysCartesian(*args, **kwargs): SymPyDeprecationWarning( feature="CoordSysCartesian", useinstead="CoordSys3D", issue=12865, deprecated_since_version="1.1" ).warn() return CoordSys3D(*args, **kwargs) class CoordSys3D(Basic): """ Represents a coordinate system in 3-D space. """ def __new__(cls, name, transformation=None, parent=None, location=None, rotation_matrix=None, vector_names=None, variable_names=None): """ The orientation/location parameters are necessary if this system is being defined at a certain orientation or location wrt another. Parameters ========== name : str The name of the new CoordSys3D instance. transformation : Lambda, Tuple, str Transformation defined by transformation equations or chosen from predefined ones. location : Vector The position vector of the new system's origin wrt the parent instance. rotation_matrix : SymPy ImmutableMatrix The rotation matrix of the new coordinate system with respect to the parent. In other words, the output of new_system.rotation_matrix(parent). parent : CoordSys3D The coordinate system wrt which the orientation/location (or both) is being defined. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. """ name = str(name) Vector = sympy.vector.Vector Point = sympy.vector.Point if not isinstance(name, str): raise TypeError("name should be a string") if transformation is not None: if (location is not None) or (rotation_matrix is not None): raise ValueError("specify either `transformation` or " "`location`/`rotation_matrix`") if isinstance(transformation, (Tuple, tuple, list)): if isinstance(transformation[0], MatrixBase): rotation_matrix = transformation[0] location = transformation[1] else: transformation = Lambda(transformation[0], transformation[1]) elif isinstance(transformation, Callable): x1, x2, x3 = symbols('x1 x2 x3', cls=Dummy) transformation = Lambda((x1, x2, x3), transformation(x1, x2, x3)) elif isinstance(transformation, str): transformation = Str(transformation) elif isinstance(transformation, (Str, Lambda)): pass else: raise TypeError("transformation: " "wrong type {}".format(type(transformation))) # If orientation information has been provided, store # the rotation matrix accordingly if rotation_matrix is None: rotation_matrix = ImmutableDenseMatrix(eye(3)) else: if not isinstance(rotation_matrix, MatrixBase): raise TypeError("rotation_matrix should be an Immutable" + "Matrix instance") rotation_matrix = rotation_matrix.as_immutable() # If location information is not given, adjust the default # location as Vector.zero if parent is not None: if not isinstance(parent, CoordSys3D): raise TypeError("parent should be a " + "CoordSys3D/None") if location is None: location = Vector.zero else: if not isinstance(location, Vector): raise TypeError("location should be a Vector") # Check that location does not contain base # scalars for x in location.free_symbols: if isinstance(x, BaseScalar): raise ValueError("location should not contain" + " BaseScalars") origin = parent.origin.locate_new(name + '.origin', location) else: location = Vector.zero origin = Point(name + '.origin') if transformation is None: transformation = Tuple(rotation_matrix, location) if isinstance(transformation, Tuple): lambda_transformation = CoordSys3D._compose_rotation_and_translation( transformation[0], transformation[1], parent ) r, l = transformation l = l._projections lambda_lame = CoordSys3D._get_lame_coeff('cartesian') lambda_inverse = lambda x, y, z: r.inv()*Matrix( [x-l[0], y-l[1], z-l[2]]) elif isinstance(transformation, Str): trname = transformation.name lambda_transformation = CoordSys3D._get_transformation_lambdas(trname) if parent is not None: if parent.lame_coefficients() != (S.One, S.One, S.One): raise ValueError('Parent for pre-defined coordinate ' 'system should be Cartesian.') lambda_lame = CoordSys3D._get_lame_coeff(trname) lambda_inverse = CoordSys3D._set_inv_trans_equations(trname) elif isinstance(transformation, Lambda): if not CoordSys3D._check_orthogonality(transformation): raise ValueError("The transformation equation does not " "create orthogonal coordinate system") lambda_transformation = transformation lambda_lame = CoordSys3D._calculate_lame_coeff(lambda_transformation) lambda_inverse = None else: lambda_transformation = lambda x, y, z: transformation(x, y, z) lambda_lame = CoordSys3D._get_lame_coeff(transformation) lambda_inverse = None if variable_names is None: if isinstance(transformation, Lambda): variable_names = ["x1", "x2", "x3"] elif isinstance(transformation, Str): if transformation.name == 'spherical': variable_names = ["r", "theta", "phi"] elif transformation.name == 'cylindrical': variable_names = ["r", "theta", "z"] else: variable_names = ["x", "y", "z"] else: variable_names = ["x", "y", "z"] if vector_names is None: vector_names = ["i", "j", "k"] # All systems that are defined as 'roots' are unequal, unless # they have the same name. # Systems defined at same orientation/position wrt the same # 'parent' are equal, irrespective of the name. # This is true even if the same orientation is provided via # different methods like Axis/Body/Space/Quaternion. # However, coincident systems may be seen as unequal if # positioned/oriented wrt different parents, even though # they may actually be 'coincident' wrt the root system. if parent is not None: obj = super().__new__( cls, Str(name), transformation, parent) else: obj = super().__new__( cls, Str(name), transformation) obj._name = name # Initialize the base vectors _check_strings('vector_names', vector_names) vector_names = list(vector_names) latex_vects = [(r'\mathbf{\hat{%s}_{%s}}' % (x, name)) for x in vector_names] pretty_vects = ['%s_%s' % (x, name) for x in vector_names] obj._vector_names = vector_names v1 = BaseVector(0, obj, pretty_vects[0], latex_vects[0]) v2 = BaseVector(1, obj, pretty_vects[1], latex_vects[1]) v3 = BaseVector(2, obj, pretty_vects[2], latex_vects[2]) obj._base_vectors = (v1, v2, v3) # Initialize the base scalars _check_strings('variable_names', vector_names) variable_names = list(variable_names) latex_scalars = [(r"\mathbf{{%s}_{%s}}" % (x, name)) for x in variable_names] pretty_scalars = ['%s_%s' % (x, name) for x in variable_names] obj._variable_names = variable_names obj._vector_names = vector_names x1 = BaseScalar(0, obj, pretty_scalars[0], latex_scalars[0]) x2 = BaseScalar(1, obj, pretty_scalars[1], latex_scalars[1]) x3 = BaseScalar(2, obj, pretty_scalars[2], latex_scalars[2]) obj._base_scalars = (x1, x2, x3) obj._transformation = transformation obj._transformation_lambda = lambda_transformation obj._lame_coefficients = lambda_lame(x1, x2, x3) obj._transformation_from_parent_lambda = lambda_inverse setattr(obj, variable_names[0], x1) setattr(obj, variable_names[1], x2) setattr(obj, variable_names[2], x3) setattr(obj, vector_names[0], v1) setattr(obj, vector_names[1], v2) setattr(obj, vector_names[2], v3) # Assign params obj._parent = parent if obj._parent is not None: obj._root = obj._parent._root else: obj._root = obj obj._parent_rotation_matrix = rotation_matrix obj._origin = origin # Return the instance return obj def _sympystr(self, printer): return self._name def __iter__(self): return iter(self.base_vectors()) @staticmethod def _check_orthogonality(equations): """ Helper method for _connect_to_cartesian. It checks if set of transformation equations create orthogonal curvilinear coordinate system Parameters ========== equations : Lambda Lambda of transformation equations """ x1, x2, x3 = symbols("x1, x2, x3", cls=Dummy) equations = equations(x1, x2, x3) v1 = Matrix([diff(equations[0], x1), diff(equations[1], x1), diff(equations[2], x1)]) v2 = Matrix([diff(equations[0], x2), diff(equations[1], x2), diff(equations[2], x2)]) v3 = Matrix([diff(equations[0], x3), diff(equations[1], x3), diff(equations[2], x3)]) if any(simplify(i[0] + i[1] + i[2]) == 0 for i in (v1, v2, v3)): return False else: if simplify(v1.dot(v2)) == 0 and simplify(v2.dot(v3)) == 0 \ and simplify(v3.dot(v1)) == 0: return True else: return False @staticmethod def _set_inv_trans_equations(curv_coord_name): """ Store information about inverse transformation equations for pre-defined coordinate systems. Parameters ========== curv_coord_name : str Name of coordinate system """ if curv_coord_name == 'cartesian': return lambda x, y, z: (x, y, z) if curv_coord_name == 'spherical': return lambda x, y, z: ( sqrt(x**2 + y**2 + z**2), acos(z/sqrt(x**2 + y**2 + z**2)), atan2(y, x) ) if curv_coord_name == 'cylindrical': return lambda x, y, z: ( sqrt(x**2 + y**2), atan2(y, x), z ) raise ValueError('Wrong set of parameters.' 'Type of coordinate system is defined') def _calculate_inv_trans_equations(self): """ Helper method for set_coordinate_type. It calculates inverse transformation equations for given transformations equations. """ x1, x2, x3 = symbols("x1, x2, x3", cls=Dummy, reals=True) x, y, z = symbols("x, y, z", cls=Dummy) equations = self._transformation(x1, x2, x3) solved = solve([equations[0] - x, equations[1] - y, equations[2] - z], (x1, x2, x3), dict=True)[0] solved = solved[x1], solved[x2], solved[x3] self._transformation_from_parent_lambda = \ lambda x1, x2, x3: tuple(i.subs(list(zip((x, y, z), (x1, x2, x3)))) for i in solved) @staticmethod def _get_lame_coeff(curv_coord_name): """ Store information about Lame coefficients for pre-defined coordinate systems. Parameters ========== curv_coord_name : str Name of coordinate system """ if isinstance(curv_coord_name, str): if curv_coord_name == 'cartesian': return lambda x, y, z: (S.One, S.One, S.One) if curv_coord_name == 'spherical': return lambda r, theta, phi: (S.One, r, r*sin(theta)) if curv_coord_name == 'cylindrical': return lambda r, theta, h: (S.One, r, S.One) raise ValueError('Wrong set of parameters.' ' Type of coordinate system is not defined') return CoordSys3D._calculate_lame_coefficients(curv_coord_name) @staticmethod def _calculate_lame_coeff(equations): """ It calculates Lame coefficients for given transformations equations. Parameters ========== equations : Lambda Lambda of transformation equations. """ return lambda x1, x2, x3: ( sqrt(diff(equations(x1, x2, x3)[0], x1)**2 + diff(equations(x1, x2, x3)[1], x1)**2 + diff(equations(x1, x2, x3)[2], x1)**2), sqrt(diff(equations(x1, x2, x3)[0], x2)**2 + diff(equations(x1, x2, x3)[1], x2)**2 + diff(equations(x1, x2, x3)[2], x2)**2), sqrt(diff(equations(x1, x2, x3)[0], x3)**2 + diff(equations(x1, x2, x3)[1], x3)**2 + diff(equations(x1, x2, x3)[2], x3)**2) ) def _inverse_rotation_matrix(self): """ Returns inverse rotation matrix. """ return simplify(self._parent_rotation_matrix**-1) @staticmethod def _get_transformation_lambdas(curv_coord_name): """ Store information about transformation equations for pre-defined coordinate systems. Parameters ========== curv_coord_name : str Name of coordinate system """ if isinstance(curv_coord_name, str): if curv_coord_name == 'cartesian': return lambda x, y, z: (x, y, z) if curv_coord_name == 'spherical': return lambda r, theta, phi: ( r*sin(theta)*cos(phi), r*sin(theta)*sin(phi), r*cos(theta) ) if curv_coord_name == 'cylindrical': return lambda r, theta, h: ( r*cos(theta), r*sin(theta), h ) raise ValueError('Wrong set of parameters.' 'Type of coordinate system is defined') @classmethod def _rotation_trans_equations(cls, matrix, equations): """ Returns the transformation equations obtained from rotation matrix. Parameters ========== matrix : Matrix Rotation matrix equations : tuple Transformation equations """ return tuple(matrix * Matrix(equations)) @property def origin(self): return self._origin @property def delop(self): SymPyDeprecationWarning( feature="coord_system.delop has been replaced.", useinstead="Use the Del() class", deprecated_since_version="1.1", issue=12866, ).warn() from sympy.vector.deloperator import Del return Del() def base_vectors(self): return self._base_vectors def base_scalars(self): return self._base_scalars def lame_coefficients(self): return self._lame_coefficients def transformation_to_parent(self): return self._transformation_lambda(*self.base_scalars()) def transformation_from_parent(self): if self._parent is None: raise ValueError("no parent coordinate system, use " "`transformation_from_parent_function()`") return self._transformation_from_parent_lambda( *self._parent.base_scalars()) def transformation_from_parent_function(self): return self._transformation_from_parent_lambda def rotation_matrix(self, other): """ Returns the direction cosine matrix(DCM), also known as the 'rotation matrix' of this coordinate system with respect to another system. If v_a is a vector defined in system 'A' (in matrix format) and v_b is the same vector defined in system 'B', then v_a = A.rotation_matrix(B) * v_b. A SymPy Matrix is returned. Parameters ========== other : CoordSys3D The system which the DCM is generated to. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q1 = symbols('q1') >>> N = CoordSys3D('N') >>> A = N.orient_new_axis('A', q1, N.i) >>> N.rotation_matrix(A) Matrix([ [1, 0, 0], [0, cos(q1), -sin(q1)], [0, sin(q1), cos(q1)]]) """ from sympy.vector.functions import _path if not isinstance(other, CoordSys3D): raise TypeError(str(other) + " is not a CoordSys3D") # Handle special cases if other == self: return eye(3) elif other == self._parent: return self._parent_rotation_matrix elif other._parent == self: return other._parent_rotation_matrix.T # Else, use tree to calculate position rootindex, path = _path(self, other) result = eye(3) i = -1 for i in range(rootindex): result *= path[i]._parent_rotation_matrix i += 2 while i < len(path): result *= path[i]._parent_rotation_matrix.T i += 1 return result @cacheit def position_wrt(self, other): """ Returns the position vector of the origin of this coordinate system with respect to another Point/CoordSys3D. Parameters ========== other : Point/CoordSys3D If other is a Point, the position of this system's origin wrt it is returned. If its an instance of CoordSyRect, the position wrt its origin is returned. Examples ======== >>> from sympy.vector import CoordSys3D >>> N = CoordSys3D('N') >>> N1 = N.locate_new('N1', 10 * N.i) >>> N.position_wrt(N1) (-10)*N.i """ return self.origin.position_wrt(other) def scalar_map(self, other): """ Returns a dictionary which expresses the coordinate variables (base scalars) of this frame in terms of the variables of otherframe. Parameters ========== otherframe : CoordSys3D The other system to map the variables to. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import Symbol >>> A = CoordSys3D('A') >>> q = Symbol('q') >>> B = A.orient_new_axis('B', q, A.k) >>> A.scalar_map(B) {A.x: B.x*cos(q) - B.y*sin(q), A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z} """ relocated_scalars = [] origin_coords = tuple(self.position_wrt(other).to_matrix(other)) for i, x in enumerate(other.base_scalars()): relocated_scalars.append(x - origin_coords[i]) vars_matrix = (self.rotation_matrix(other) * Matrix(relocated_scalars)) mapping = {} for i, x in enumerate(self.base_scalars()): mapping[x] = trigsimp(vars_matrix[i]) return mapping def locate_new(self, name, position, vector_names=None, variable_names=None): """ Returns a CoordSys3D with its origin located at the given position wrt this coordinate system's origin. Parameters ========== name : str The name of the new CoordSys3D instance. position : Vector The position vector of the new system's origin wrt this one. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> A = CoordSys3D('A') >>> B = A.locate_new('B', 10 * A.i) >>> B.origin.position_wrt(A.origin) 10*A.i """ if variable_names is None: variable_names = self._variable_names if vector_names is None: vector_names = self._vector_names return CoordSys3D(name, location=position, vector_names=vector_names, variable_names=variable_names, parent=self) def orient_new(self, name, orienters, location=None, vector_names=None, variable_names=None): """ Creates a new CoordSys3D oriented in the user-specified way with respect to this system. Please refer to the documentation of the orienter classes for more information about the orientation procedure. Parameters ========== name : str The name of the new CoordSys3D instance. orienters : iterable/Orienter An Orienter or an iterable of Orienters for orienting the new coordinate system. If an Orienter is provided, it is applied to get the new system. If an iterable is provided, the orienters will be applied in the order in which they appear in the iterable. location : Vector(optional) The location of the new coordinate system's origin wrt this system's origin. If not specified, the origins are taken to be coincident. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3') >>> N = CoordSys3D('N') Using an AxisOrienter >>> from sympy.vector import AxisOrienter >>> axis_orienter = AxisOrienter(q1, N.i + 2 * N.j) >>> A = N.orient_new('A', (axis_orienter, )) Using a BodyOrienter >>> from sympy.vector import BodyOrienter >>> body_orienter = BodyOrienter(q1, q2, q3, '123') >>> B = N.orient_new('B', (body_orienter, )) Using a SpaceOrienter >>> from sympy.vector import SpaceOrienter >>> space_orienter = SpaceOrienter(q1, q2, q3, '312') >>> C = N.orient_new('C', (space_orienter, )) Using a QuaternionOrienter >>> from sympy.vector import QuaternionOrienter >>> q_orienter = QuaternionOrienter(q0, q1, q2, q3) >>> D = N.orient_new('D', (q_orienter, )) """ if variable_names is None: variable_names = self._variable_names if vector_names is None: vector_names = self._vector_names if isinstance(orienters, Orienter): if isinstance(orienters, AxisOrienter): final_matrix = orienters.rotation_matrix(self) else: final_matrix = orienters.rotation_matrix() # TODO: trigsimp is needed here so that the matrix becomes # canonical (scalar_map also calls trigsimp; without this, you can # end up with the same CoordinateSystem that compares differently # due to a differently formatted matrix). However, this is # probably not so good for performance. final_matrix = trigsimp(final_matrix) else: final_matrix = Matrix(eye(3)) for orienter in orienters: if isinstance(orienter, AxisOrienter): final_matrix *= orienter.rotation_matrix(self) else: final_matrix *= orienter.rotation_matrix() return CoordSys3D(name, rotation_matrix=final_matrix, vector_names=vector_names, variable_names=variable_names, location=location, parent=self) def orient_new_axis(self, name, angle, axis, location=None, vector_names=None, variable_names=None): """ Axis rotation is a rotation about an arbitrary axis by some angle. The angle is supplied as a SymPy expr scalar, and the axis is supplied as a Vector. Parameters ========== name : string The name of the new coordinate system angle : Expr The angle by which the new system is to be rotated axis : Vector The axis around which the rotation has to be performed location : Vector(optional) The location of the new coordinate system's origin wrt this system's origin. If not specified, the origins are taken to be coincident. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q1 = symbols('q1') >>> N = CoordSys3D('N') >>> B = N.orient_new_axis('B', q1, N.i + 2 * N.j) """ if variable_names is None: variable_names = self._variable_names if vector_names is None: vector_names = self._vector_names orienter = AxisOrienter(angle, axis) return self.orient_new(name, orienter, location=location, vector_names=vector_names, variable_names=variable_names) def orient_new_body(self, name, angle1, angle2, angle3, rotation_order, location=None, vector_names=None, variable_names=None): """ Body orientation takes this coordinate system through three successive simple rotations. Body fixed rotations include both Euler Angles and Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles. Parameters ========== name : string The name of the new coordinate system angle1, angle2, angle3 : Expr Three successive angles to rotate the coordinate system by rotation_order : string String defining the order of axes for rotation location : Vector(optional) The location of the new coordinate system's origin wrt this system's origin. If not specified, the origins are taken to be coincident. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q1, q2, q3 = symbols('q1 q2 q3') >>> N = CoordSys3D('N') A 'Body' fixed rotation is described by three angles and three body-fixed rotation axes. To orient a coordinate system D with respect to N, each sequential rotation is always about the orthogonal unit vectors fixed to D. For example, a '123' rotation will specify rotations about N.i, then D.j, then D.k. (Initially, D.i is same as N.i) Therefore, >>> D = N.orient_new_body('D', q1, q2, q3, '123') is same as >>> D = N.orient_new_axis('D', q1, N.i) >>> D = D.orient_new_axis('D', q2, D.j) >>> D = D.orient_new_axis('D', q3, D.k) Acceptable rotation orders are of length 3, expressed in XYZ or 123, and cannot have a rotation about about an axis twice in a row. >>> B = N.orient_new_body('B', q1, q2, q3, '123') >>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ') >>> B = N.orient_new_body('B', 0, 0, 0, 'XYX') """ orienter = BodyOrienter(angle1, angle2, angle3, rotation_order) return self.orient_new(name, orienter, location=location, vector_names=vector_names, variable_names=variable_names) def orient_new_space(self, name, angle1, angle2, angle3, rotation_order, location=None, vector_names=None, variable_names=None): """ Space rotation is similar to Body rotation, but the rotations are applied in the opposite order. Parameters ========== name : string The name of the new coordinate system angle1, angle2, angle3 : Expr Three successive angles to rotate the coordinate system by rotation_order : string String defining the order of axes for rotation location : Vector(optional) The location of the new coordinate system's origin wrt this system's origin. If not specified, the origins are taken to be coincident. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. See Also ======== CoordSys3D.orient_new_body : method to orient via Euler angles Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q1, q2, q3 = symbols('q1 q2 q3') >>> N = CoordSys3D('N') To orient a coordinate system D with respect to N, each sequential rotation is always about N's orthogonal unit vectors. For example, a '123' rotation will specify rotations about N.i, then N.j, then N.k. Therefore, >>> D = N.orient_new_space('D', q1, q2, q3, '312') is same as >>> B = N.orient_new_axis('B', q1, N.i) >>> C = B.orient_new_axis('C', q2, N.j) >>> D = C.orient_new_axis('D', q3, N.k) """ orienter = SpaceOrienter(angle1, angle2, angle3, rotation_order) return self.orient_new(name, orienter, location=location, vector_names=vector_names, variable_names=variable_names) def orient_new_quaternion(self, name, q0, q1, q2, q3, location=None, vector_names=None, variable_names=None): """ Quaternion orientation orients the new CoordSys3D with Quaternions, defined as a finite rotation about lambda, a unit vector, by some amount theta. This orientation is described by four parameters: q0 = cos(theta/2) q1 = lambda_x sin(theta/2) q2 = lambda_y sin(theta/2) q3 = lambda_z sin(theta/2) Quaternion does not take in a rotation order. Parameters ========== name : string The name of the new coordinate system q0, q1, q2, q3 : Expr The quaternions to rotate the coordinate system by location : Vector(optional) The location of the new coordinate system's origin wrt this system's origin. If not specified, the origins are taken to be coincident. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3') >>> N = CoordSys3D('N') >>> B = N.orient_new_quaternion('B', q0, q1, q2, q3) """ orienter = QuaternionOrienter(q0, q1, q2, q3) return self.orient_new(name, orienter, location=location, vector_names=vector_names, variable_names=variable_names) def create_new(self, name, transformation, variable_names=None, vector_names=None): """ Returns a CoordSys3D which is connected to self by transformation. Parameters ========== name : str The name of the new CoordSys3D instance. transformation : Lambda, Tuple, str Transformation defined by transformation equations or chosen from predefined ones. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> a = CoordSys3D('a') >>> b = a.create_new('b', transformation='spherical') >>> b.transformation_to_parent() (b.r*sin(b.theta)*cos(b.phi), b.r*sin(b.phi)*sin(b.theta), b.r*cos(b.theta)) >>> b.transformation_from_parent() (sqrt(a.x**2 + a.y**2 + a.z**2), acos(a.z/sqrt(a.x**2 + a.y**2 + a.z**2)), atan2(a.y, a.x)) """ return CoordSys3D(name, parent=self, transformation=transformation, variable_names=variable_names, vector_names=vector_names) def __init__(self, name, location=None, rotation_matrix=None, parent=None, vector_names=None, variable_names=None, latex_vects=None, pretty_vects=None, latex_scalars=None, pretty_scalars=None, transformation=None): # Dummy initializer for setting docstring pass __init__.__doc__ = __new__.__doc__ @staticmethod def _compose_rotation_and_translation(rot, translation, parent): r = lambda x, y, z: CoordSys3D._rotation_trans_equations(rot, (x, y, z)) if parent is None: return r dx, dy, dz = [translation.dot(i) for i in parent.base_vectors()] t = lambda x, y, z: ( x + dx, y + dy, z + dz, ) return lambda x, y, z: t(*r(x, y, z)) def _check_strings(arg_name, arg): errorstr = arg_name + " must be an iterable of 3 string-types" if len(arg) != 3: raise ValueError(errorstr) for s in arg: if not isinstance(s, str): raise TypeError(errorstr) # Delayed import to avoid cyclic import problems: from sympy.vector.vector import BaseVector
54c7095261d22800e18dfd2940bfdfb335cdf9b28f2f50b179af0cbf1bef048a
"""The definition of the base geometrical entity with attributes common to all derived geometrical entities. Contains ======== GeometryEntity GeometricSet Notes ===== A GeometryEntity is any object that has special geometric properties. A GeometrySet is a superclass of any GeometryEntity that can also be viewed as a sympy.sets.Set. In particular, points are the only GeometryEntity not considered a Set. Rn is a GeometrySet representing n-dimensional Euclidean space. R2 and R3 are currently the only ambient spaces implemented. """ from sympy.core.basic import Basic from sympy.core.compatibility import is_sequence from sympy.core.containers import Tuple from sympy.core.sympify import sympify from sympy.functions import cos, sin from sympy.matrices import eye from sympy.multipledispatch import dispatch from sympy.sets import Set from sympy.sets.handlers.intersection import intersection_sets from sympy.sets.handlers.union import union_sets from sympy.utilities.misc import func_name # How entities are ordered; used by __cmp__ in GeometryEntity ordering_of_classes = [ "Point2D", "Point3D", "Point", "Segment2D", "Ray2D", "Line2D", "Segment3D", "Line3D", "Ray3D", "Segment", "Ray", "Line", "Plane", "Triangle", "RegularPolygon", "Polygon", "Circle", "Ellipse", "Curve", "Parabola" ] class GeometryEntity(Basic): """The base class for all geometrical entities. This class doesn't represent any particular geometric entity, it only provides the implementation of some methods common to all subclasses. """ def __cmp__(self, other): """Comparison of two GeometryEntities.""" n1 = self.__class__.__name__ n2 = other.__class__.__name__ c = (n1 > n2) - (n1 < n2) if not c: return 0 i1 = -1 for cls in self.__class__.__mro__: try: i1 = ordering_of_classes.index(cls.__name__) break except ValueError: i1 = -1 if i1 == -1: return c i2 = -1 for cls in other.__class__.__mro__: try: i2 = ordering_of_classes.index(cls.__name__) break except ValueError: i2 = -1 if i2 == -1: return c return (i1 > i2) - (i1 < i2) def __contains__(self, other): """Subclasses should implement this method for anything more complex than equality.""" if type(self) == type(other): return self == other raise NotImplementedError() def __getnewargs__(self): """Returns a tuple that will be passed to __new__ on unpickling.""" return tuple(self.args) def __ne__(self, o): """Test inequality of two geometrical entities.""" return not self == o def __new__(cls, *args, **kwargs): # Points are sequences, but they should not # be converted to Tuples, so use this detection function instead. def is_seq_and_not_point(a): # we cannot use isinstance(a, Point) since we cannot import Point if hasattr(a, 'is_Point') and a.is_Point: return False return is_sequence(a) args = [Tuple(*a) if is_seq_and_not_point(a) else sympify(a) for a in args] return Basic.__new__(cls, *args) def __radd__(self, a): """Implementation of reverse add method.""" return a.__add__(self) def __rtruediv__(self, a): """Implementation of reverse division method.""" return a.__truediv__(self) def __repr__(self): """String representation of a GeometryEntity that can be evaluated by sympy.""" return type(self).__name__ + repr(self.args) def __rmul__(self, a): """Implementation of reverse multiplication method.""" return a.__mul__(self) def __rsub__(self, a): """Implementation of reverse subtraction method.""" return a.__sub__(self) def __str__(self): """String representation of a GeometryEntity.""" from sympy.printing import sstr return type(self).__name__ + sstr(self.args) def _eval_subs(self, old, new): from sympy.geometry.point import Point, Point3D if is_sequence(old) or is_sequence(new): if isinstance(self, Point3D): old = Point3D(old) new = Point3D(new) else: old = Point(old) new = Point(new) return self._subs(old, new) def _repr_svg_(self): """SVG representation of a GeometryEntity suitable for IPython""" from sympy.core.evalf import N try: bounds = self.bounds except (NotImplementedError, TypeError): # if we have no SVG representation, return None so IPython # will fall back to the next representation return None if any([not x.is_number or not x.is_finite for x in bounds]): return None svg_top = '''<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="{1}" height="{2}" viewBox="{0}" preserveAspectRatio="xMinYMin meet"> <defs> <marker id="markerCircle" markerWidth="8" markerHeight="8" refx="5" refy="5" markerUnits="strokeWidth"> <circle cx="5" cy="5" r="1.5" style="stroke: none; fill:#000000;"/> </marker> <marker id="markerArrow" markerWidth="13" markerHeight="13" refx="2" refy="4" orient="auto" markerUnits="strokeWidth"> <path d="M2,2 L2,6 L6,4" style="fill: #000000;" /> </marker> <marker id="markerReverseArrow" markerWidth="13" markerHeight="13" refx="6" refy="4" orient="auto" markerUnits="strokeWidth"> <path d="M6,2 L6,6 L2,4" style="fill: #000000;" /> </marker> </defs>''' # Establish SVG canvas that will fit all the data + small space xmin, ymin, xmax, ymax = map(N, bounds) if xmin == xmax and ymin == ymax: # This is a point; buffer using an arbitrary size xmin, ymin, xmax, ymax = xmin - .5, ymin -.5, xmax + .5, ymax + .5 else: # Expand bounds by a fraction of the data ranges expand = 0.1 # or 10%; this keeps arrowheads in view (R plots use 4%) widest_part = max([xmax - xmin, ymax - ymin]) expand_amount = widest_part * expand xmin -= expand_amount ymin -= expand_amount xmax += expand_amount ymax += expand_amount dx = xmax - xmin dy = ymax - ymin width = min([max([100., dx]), 300]) height = min([max([100., dy]), 300]) scale_factor = 1. if max(width, height) == 0 else max(dx, dy) / max(width, height) try: svg = self._svg(scale_factor) except (NotImplementedError, TypeError): # if we have no SVG representation, return None so IPython # will fall back to the next representation return None view_box = "{} {} {} {}".format(xmin, ymin, dx, dy) transform = "matrix(1,0,0,-1,0,{})".format(ymax + ymin) svg_top = svg_top.format(view_box, width, height) return svg_top + ( '<g transform="{}">{}</g></svg>' ).format(transform, svg) def _svg(self, scale_factor=1., fill_color="#66cc99"): """Returns SVG path element for the GeometryEntity. Parameters ========== scale_factor : float Multiplication factor for the SVG stroke-width. Default is 1. fill_color : str, optional Hex string for fill color. Default is "#66cc99". """ raise NotImplementedError() def _sympy_(self): return self @property def ambient_dimension(self): """What is the dimension of the space that the object is contained in?""" raise NotImplementedError() @property def bounds(self): """Return a tuple (xmin, ymin, xmax, ymax) representing the bounding rectangle for the geometric figure. """ raise NotImplementedError() def encloses(self, o): """ Return True if o is inside (not on or outside) the boundaries of self. The object will be decomposed into Points and individual Entities need only define an encloses_point method for their class. See Also ======== sympy.geometry.ellipse.Ellipse.encloses_point sympy.geometry.polygon.Polygon.encloses_point Examples ======== >>> from sympy import RegularPolygon, Point, Polygon >>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices) >>> t2 = Polygon(*RegularPolygon(Point(0, 0), 2, 3).vertices) >>> t2.encloses(t) True >>> t.encloses(t2) False """ from sympy.geometry.point import Point from sympy.geometry.line import Segment, Ray, Line from sympy.geometry.ellipse import Ellipse from sympy.geometry.polygon import Polygon, RegularPolygon if isinstance(o, Point): return self.encloses_point(o) elif isinstance(o, Segment): return all(self.encloses_point(x) for x in o.points) elif isinstance(o, Ray) or isinstance(o, Line): return False elif isinstance(o, Ellipse): return self.encloses_point(o.center) and \ self.encloses_point( Point(o.center.x + o.hradius, o.center.y)) and \ not self.intersection(o) elif isinstance(o, Polygon): if isinstance(o, RegularPolygon): if not self.encloses_point(o.center): return False return all(self.encloses_point(v) for v in o.vertices) raise NotImplementedError() def equals(self, o): return self == o def intersection(self, o): """ Returns a list of all of the intersections of self with o. Notes ===== An entity is not required to implement this method. If two different types of entities can intersect, the item with higher index in ordering_of_classes should implement intersections with anything having a lower index. See Also ======== sympy.geometry.util.intersection """ raise NotImplementedError() def is_similar(self, other): """Is this geometrical entity similar to another geometrical entity? Two entities are similar if a uniform scaling (enlarging or shrinking) of one of the entities will allow one to obtain the other. Notes ===== This method is not intended to be used directly but rather through the `are_similar` function found in util.py. An entity is not required to implement this method. If two different types of entities can be similar, it is only required that one of them be able to determine this. See Also ======== scale """ raise NotImplementedError() def reflect(self, line): """ Reflects an object across a line. Parameters ========== line: Line Examples ======== >>> from sympy import pi, sqrt, Line, RegularPolygon >>> l = Line((0, pi), slope=sqrt(2)) >>> pent = RegularPolygon((1, 2), 1, 5) >>> rpent = pent.reflect(l) >>> rpent RegularPolygon(Point2D(-2*sqrt(2)*pi/3 - 1/3 + 4*sqrt(2)/3, 2/3 + 2*sqrt(2)/3 + 2*pi/3), -1, 5, -atan(2*sqrt(2)) + 3*pi/5) >>> from sympy import pi, Line, Circle, Point >>> l = Line((0, pi), slope=1) >>> circ = Circle(Point(0, 0), 5) >>> rcirc = circ.reflect(l) >>> rcirc Circle(Point2D(-pi, pi), -5) """ from sympy import atan, Point, Dummy, oo g = self l = line o = Point(0, 0) if l.slope.is_zero: y = l.args[0].y if not y: # x-axis return g.scale(y=-1) reps = [(p, p.translate(y=2*(y - p.y))) for p in g.atoms(Point)] elif l.slope is oo: x = l.args[0].x if not x: # y-axis return g.scale(x=-1) reps = [(p, p.translate(x=2*(x - p.x))) for p in g.atoms(Point)] else: if not hasattr(g, 'reflect') and not all( isinstance(arg, Point) for arg in g.args): raise NotImplementedError( 'reflect undefined or non-Point args in %s' % g) a = atan(l.slope) c = l.coefficients d = -c[-1]/c[1] # y-intercept # apply the transform to a single point x, y = Dummy(), Dummy() xf = Point(x, y) xf = xf.translate(y=-d).rotate(-a, o).scale(y=-1 ).rotate(a, o).translate(y=d) # replace every point using that transform reps = [(p, xf.xreplace({x: p.x, y: p.y})) for p in g.atoms(Point)] return g.xreplace(dict(reps)) def rotate(self, angle, pt=None): """Rotate ``angle`` radians counterclockwise about Point ``pt``. The default pt is the origin, Point(0, 0) See Also ======== scale, translate Examples ======== >>> from sympy import Point, RegularPolygon, Polygon, pi >>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices) >>> t # vertex on x axis Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2)) >>> t.rotate(pi/2) # vertex on y axis now Triangle(Point2D(0, 1), Point2D(-sqrt(3)/2, -1/2), Point2D(sqrt(3)/2, -1/2)) """ newargs = [] for a in self.args: if isinstance(a, GeometryEntity): newargs.append(a.rotate(angle, pt)) else: newargs.append(a) return type(self)(*newargs) def scale(self, x=1, y=1, pt=None): """Scale the object by multiplying the x,y-coordinates by x and y. If pt is given, the scaling is done relative to that point; the object is shifted by -pt, scaled, and shifted by pt. See Also ======== rotate, translate Examples ======== >>> from sympy import RegularPolygon, Point, Polygon >>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices) >>> t Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2)) >>> t.scale(2) Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)/2), Point2D(-1, -sqrt(3)/2)) >>> t.scale(2, 2) Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)), Point2D(-1, -sqrt(3))) """ from sympy.geometry.point import Point if pt: pt = Point(pt, dim=2) return self.translate(*(-pt).args).scale(x, y).translate(*pt.args) return type(self)(*[a.scale(x, y) for a in self.args]) # if this fails, override this class def translate(self, x=0, y=0): """Shift the object by adding to the x,y-coordinates the values x and y. See Also ======== rotate, scale Examples ======== >>> from sympy import RegularPolygon, Point, Polygon >>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices) >>> t Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2)) >>> t.translate(2) Triangle(Point2D(3, 0), Point2D(3/2, sqrt(3)/2), Point2D(3/2, -sqrt(3)/2)) >>> t.translate(2, 2) Triangle(Point2D(3, 2), Point2D(3/2, sqrt(3)/2 + 2), Point2D(3/2, 2 - sqrt(3)/2)) """ newargs = [] for a in self.args: if isinstance(a, GeometryEntity): newargs.append(a.translate(x, y)) else: newargs.append(a) return self.func(*newargs) def parameter_value(self, other, t): """Return the parameter corresponding to the given point. Evaluating an arbitrary point of the entity at this parameter value will return the given point. Examples ======== >>> from sympy import Line, Point >>> from sympy.abc import t >>> a = Point(0, 0) >>> b = Point(2, 2) >>> Line(a, b).parameter_value((1, 1), t) {t: 1/2} >>> Line(a, b).arbitrary_point(t).subs(_) Point2D(1, 1) """ from sympy.geometry.point import Point from sympy.core.symbol import Dummy from sympy.solvers.solvers import solve if not isinstance(other, GeometryEntity): other = Point(other, dim=self.ambient_dimension) if not isinstance(other, Point): raise ValueError("other must be a point") T = Dummy('t', real=True) sol = solve(self.arbitrary_point(T) - other, T, dict=True) if not sol: raise ValueError("Given point is not on %s" % func_name(self)) return {t: sol[0][T]} class GeometrySet(GeometryEntity, Set): """Parent class of all GeometryEntity that are also Sets (compatible with sympy.sets) """ def _contains(self, other): """sympy.sets uses the _contains method, so include it for compatibility.""" if isinstance(other, Set) and other.is_FiniteSet: return all(self.__contains__(i) for i in other) return self.__contains__(other) @dispatch(GeometrySet, Set) # type:ignore # noqa:F811 def union_sets(self, o): # noqa:F811 """ Returns the union of self and o for use with sympy.sets.Set, if possible. """ from sympy.sets import Union, FiniteSet # if its a FiniteSet, merge any points # we contain and return a union with the rest if o.is_FiniteSet: other_points = [p for p in o if not self._contains(p)] if len(other_points) == len(o): return None return Union(self, FiniteSet(*other_points)) if self._contains(o): return self return None @dispatch(GeometrySet, Set) # type: ignore # noqa:F811 def intersection_sets(self, o): # noqa:F811 """ Returns a sympy.sets.Set of intersection objects, if possible. """ from sympy.sets import FiniteSet, Union from sympy.geometry import Point try: # if o is a FiniteSet, find the intersection directly # to avoid infinite recursion if o.is_FiniteSet: inter = FiniteSet(*(p for p in o if self.contains(p))) else: inter = self.intersection(o) except NotImplementedError: # sympy.sets.Set.reduce expects None if an object # doesn't know how to simplify return None # put the points in a FiniteSet points = FiniteSet(*[p for p in inter if isinstance(p, Point)]) non_points = [p for p in inter if not isinstance(p, Point)] return Union(*(non_points + [points])) def translate(x, y): """Return the matrix to translate a 2-D point by x and y.""" rv = eye(3) rv[2, 0] = x rv[2, 1] = y return rv def scale(x, y, pt=None): """Return the matrix to multiply a 2-D point's coordinates by x and y. If pt is given, the scaling is done relative to that point.""" rv = eye(3) rv[0, 0] = x rv[1, 1] = y if pt: from sympy.geometry.point import Point pt = Point(pt, dim=2) tr1 = translate(*(-pt).args) tr2 = translate(*pt.args) return tr1*rv*tr2 return rv def rotate(th): """Return the matrix to rotate a 2-D point about the origin by ``angle``. The angle is measured in radians. To Point a point about a point other then the origin, translate the Point, do the rotation, and translate it back: >>> from sympy.geometry.entity import rotate, translate >>> from sympy import Point, pi >>> rot_about_11 = translate(-1, -1)*rotate(pi/2)*translate(1, 1) >>> Point(1, 1).transform(rot_about_11) Point2D(1, 1) >>> Point(0, 0).transform(rot_about_11) Point2D(2, 0) """ s = sin(th) rv = eye(3)*cos(th) rv[0, 1] = s rv[1, 0] = -s rv[2, 2] = 1 return rv
e38650ffaec8d4b3d4cffa7cc9680e0fcf3c9c4a4cda712d9008c5fce971ca8f
"""Transform a string with Python-like source code into SymPy expression. """ from tokenize import (generate_tokens, untokenize, TokenError, NUMBER, STRING, NAME, OP, ENDMARKER, ERRORTOKEN, NEWLINE) from keyword import iskeyword import ast import unicodedata from io import StringIO from sympy.core.compatibility import iterable from sympy.core.basic import Basic from sympy.core import Symbol from sympy.core.function import arity from sympy.utilities.misc import filldedent, func_name def _token_splittable(token): """ Predicate for whether a token name can be split into multiple tokens. A token is splittable if it does not contain an underscore character and it is not the name of a Greek letter. This is used to implicitly convert expressions like 'xyz' into 'x*y*z'. """ if '_' in token: return False else: try: return not unicodedata.lookup('GREEK SMALL LETTER ' + token) except KeyError: pass if len(token) > 1: return True return False def _token_callable(token, local_dict, global_dict, nextToken=None): """ Predicate for whether a token name represents a callable function. Essentially wraps ``callable``, but looks up the token name in the locals and globals. """ func = local_dict.get(token[1]) if not func: func = global_dict.get(token[1]) return callable(func) and not isinstance(func, Symbol) def _add_factorial_tokens(name, result): if result == [] or result[-1][1] == '(': raise TokenError() beginning = [(NAME, name), (OP, '(')] end = [(OP, ')')] diff = 0 length = len(result) for index, token in enumerate(result[::-1]): toknum, tokval = token i = length - index - 1 if tokval == ')': diff += 1 elif tokval == '(': diff -= 1 if diff == 0: if i - 1 >= 0 and result[i - 1][0] == NAME: return result[:i - 1] + beginning + result[i - 1:] + end else: return result[:i] + beginning + result[i:] + end return result class AppliedFunction: """ A group of tokens representing a function and its arguments. `exponent` is for handling the shorthand sin^2, ln^2, etc. """ def __init__(self, function, args, exponent=None): if exponent is None: exponent = [] self.function = function self.args = args self.exponent = exponent self.items = ['function', 'args', 'exponent'] def expand(self): """Return a list of tokens representing the function""" result = [] result.append(self.function) result.extend(self.args) return result def __getitem__(self, index): return getattr(self, self.items[index]) def __repr__(self): return "AppliedFunction(%s, %s, %s)" % (self.function, self.args, self.exponent) class ParenthesisGroup(list): """List of tokens representing an expression in parentheses.""" pass def _flatten(result): result2 = [] for tok in result: if isinstance(tok, AppliedFunction): result2.extend(tok.expand()) else: result2.append(tok) return result2 def _group_parentheses(recursor): def _inner(tokens, local_dict, global_dict): """Group tokens between parentheses with ParenthesisGroup. Also processes those tokens recursively. """ result = [] stacks = [] stacklevel = 0 for token in tokens: if token[0] == OP: if token[1] == '(': stacks.append(ParenthesisGroup([])) stacklevel += 1 elif token[1] == ')': stacks[-1].append(token) stack = stacks.pop() if len(stacks) > 0: # We don't recurse here since the upper-level stack # would reprocess these tokens stacks[-1].extend(stack) else: # Recurse here to handle nested parentheses # Strip off the outer parentheses to avoid an infinite loop inner = stack[1:-1] inner = recursor(inner, local_dict, global_dict) parenGroup = [stack[0]] + inner + [stack[-1]] result.append(ParenthesisGroup(parenGroup)) stacklevel -= 1 continue if stacklevel: stacks[-1].append(token) else: result.append(token) if stacklevel: raise TokenError("Mismatched parentheses") return result return _inner def _apply_functions(tokens, local_dict, global_dict): """Convert a NAME token + ParenthesisGroup into an AppliedFunction. Note that ParenthesisGroups, if not applied to any function, are converted back into lists of tokens. """ result = [] symbol = None for tok in tokens: if tok[0] == NAME: symbol = tok result.append(tok) elif isinstance(tok, ParenthesisGroup): if symbol and _token_callable(symbol, local_dict, global_dict): result[-1] = AppliedFunction(symbol, tok) symbol = None else: result.extend(tok) else: symbol = None result.append(tok) return result def _implicit_multiplication(tokens, local_dict, global_dict): """Implicitly adds '*' tokens. Cases: - Two AppliedFunctions next to each other ("sin(x)cos(x)") - AppliedFunction next to an open parenthesis ("sin x (cos x + 1)") - A close parenthesis next to an AppliedFunction ("(x+2)sin x")\ - A close parenthesis next to an open parenthesis ("(x+2)(x+3)") - AppliedFunction next to an implicitly applied function ("sin(x)cos x") """ result = [] for tok, nextTok in zip(tokens, tokens[1:]): result.append(tok) if (isinstance(tok, AppliedFunction) and isinstance(nextTok, AppliedFunction)): result.append((OP, '*')) elif (isinstance(tok, AppliedFunction) and nextTok[0] == OP and nextTok[1] == '('): # Applied function followed by an open parenthesis if tok.function[1] == "Function": result[-1].function = (result[-1].function[0], 'Symbol') result.append((OP, '*')) elif (tok[0] == OP and tok[1] == ')' and isinstance(nextTok, AppliedFunction)): # Close parenthesis followed by an applied function result.append((OP, '*')) elif (tok[0] == OP and tok[1] == ')' and nextTok[0] == NAME): # Close parenthesis followed by an implicitly applied function result.append((OP, '*')) elif (tok[0] == nextTok[0] == OP and tok[1] == ')' and nextTok[1] == '('): # Close parenthesis followed by an open parenthesis result.append((OP, '*')) elif (isinstance(tok, AppliedFunction) and nextTok[0] == NAME): # Applied function followed by implicitly applied function result.append((OP, '*')) elif (tok[0] == NAME and not _token_callable(tok, local_dict, global_dict) and nextTok[0] == OP and nextTok[1] == '('): # Constant followed by parenthesis result.append((OP, '*')) elif (tok[0] == NAME and not _token_callable(tok, local_dict, global_dict) and nextTok[0] == NAME and not _token_callable(nextTok, local_dict, global_dict)): # Constant followed by constant result.append((OP, '*')) elif (tok[0] == NAME and not _token_callable(tok, local_dict, global_dict) and (isinstance(nextTok, AppliedFunction) or nextTok[0] == NAME)): # Constant followed by (implicitly applied) function result.append((OP, '*')) if tokens: result.append(tokens[-1]) return result def _implicit_application(tokens, local_dict, global_dict): """Adds parentheses as needed after functions.""" result = [] appendParen = 0 # number of closing parentheses to add skip = 0 # number of tokens to delay before adding a ')' (to # capture **, ^, etc.) exponentSkip = False # skipping tokens before inserting parentheses to # work with function exponentiation for tok, nextTok in zip(tokens, tokens[1:]): result.append(tok) if (tok[0] == NAME and nextTok[0] not in [OP, ENDMARKER, NEWLINE]): if _token_callable(tok, local_dict, global_dict, nextTok): result.append((OP, '(')) appendParen += 1 # name followed by exponent - function exponentiation elif (tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**'): if _token_callable(tok, local_dict, global_dict): exponentSkip = True elif exponentSkip: # if the last token added was an applied function (i.e. the # power of the function exponent) OR a multiplication (as # implicit multiplication would have added an extraneous # multiplication) if (isinstance(tok, AppliedFunction) or (tok[0] == OP and tok[1] == '*')): # don't add anything if the next token is a multiplication # or if there's already a parenthesis (if parenthesis, still # stop skipping tokens) if not (nextTok[0] == OP and nextTok[1] == '*'): if not(nextTok[0] == OP and nextTok[1] == '('): result.append((OP, '(')) appendParen += 1 exponentSkip = False elif appendParen: if nextTok[0] == OP and nextTok[1] in ('^', '**', '*'): skip = 1 continue if skip: skip -= 1 continue result.append((OP, ')')) appendParen -= 1 if tokens: result.append(tokens[-1]) if appendParen: result.extend([(OP, ')')] * appendParen) return result def function_exponentiation(tokens, local_dict, global_dict): """Allows functions to be exponentiated, e.g. ``cos**2(x)``. Examples ======== >>> from sympy.parsing.sympy_parser import (parse_expr, ... standard_transformations, function_exponentiation) >>> transformations = standard_transformations + (function_exponentiation,) >>> parse_expr('sin**4(x)', transformations=transformations) sin(x)**4 """ result = [] exponent = [] consuming_exponent = False level = 0 for tok, nextTok in zip(tokens, tokens[1:]): if tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**': if _token_callable(tok, local_dict, global_dict): consuming_exponent = True elif consuming_exponent: if tok[0] == NAME and tok[1] == 'Function': tok = (NAME, 'Symbol') exponent.append(tok) # only want to stop after hitting ) if tok[0] == nextTok[0] == OP and tok[1] == ')' and nextTok[1] == '(': consuming_exponent = False # if implicit multiplication was used, we may have )*( instead if tok[0] == nextTok[0] == OP and tok[1] == '*' and nextTok[1] == '(': consuming_exponent = False del exponent[-1] continue elif exponent and not consuming_exponent: if tok[0] == OP: if tok[1] == '(': level += 1 elif tok[1] == ')': level -= 1 if level == 0: result.append(tok) result.extend(exponent) exponent = [] continue result.append(tok) if tokens: result.append(tokens[-1]) if exponent: result.extend(exponent) return result def split_symbols_custom(predicate): """Creates a transformation that splits symbol names. ``predicate`` should return True if the symbol name is to be split. For instance, to retain the default behavior but avoid splitting certain symbol names, a predicate like this would work: >>> from sympy.parsing.sympy_parser import (parse_expr, _token_splittable, ... standard_transformations, implicit_multiplication, ... split_symbols_custom) >>> def can_split(symbol): ... if symbol not in ('list', 'of', 'unsplittable', 'names'): ... return _token_splittable(symbol) ... return False ... >>> transformation = split_symbols_custom(can_split) >>> parse_expr('unsplittable', transformations=standard_transformations + ... (transformation, implicit_multiplication)) unsplittable """ def _split_symbols(tokens, local_dict, global_dict): result = [] split = False split_previous=False for tok in tokens: if split_previous: # throw out closing parenthesis of Symbol that was split split_previous=False continue split_previous=False if tok[0] == NAME and tok[1] in ['Symbol', 'Function']: split = True elif split and tok[0] == NAME: symbol = tok[1][1:-1] if predicate(symbol): tok_type = result[-2][1] # Symbol or Function del result[-2:] # Get rid of the call to Symbol i = 0 while i < len(symbol): char = symbol[i] if char in local_dict or char in global_dict: result.extend([(NAME, "%s" % char)]) elif char.isdigit(): char = [char] for i in range(i + 1, len(symbol)): if not symbol[i].isdigit(): i -= 1 break char.append(symbol[i]) char = ''.join(char) result.extend([(NAME, 'Number'), (OP, '('), (NAME, "'%s'" % char), (OP, ')')]) else: use = tok_type if i == len(symbol) else 'Symbol' result.extend([(NAME, use), (OP, '('), (NAME, "'%s'" % char), (OP, ')')]) i += 1 # Set split_previous=True so will skip # the closing parenthesis of the original Symbol split = False split_previous = True continue else: split = False result.append(tok) return result return _split_symbols #: Splits symbol names for implicit multiplication. #: #: Intended to let expressions like ``xyz`` be parsed as ``x*y*z``. Does not #: split Greek character names, so ``theta`` will *not* become #: ``t*h*e*t*a``. Generally this should be used with #: ``implicit_multiplication``. split_symbols = split_symbols_custom(_token_splittable) def implicit_multiplication(result, local_dict, global_dict): """Makes the multiplication operator optional in most cases. Use this before :func:`implicit_application`, otherwise expressions like ``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``. Examples ======== >>> from sympy.parsing.sympy_parser import (parse_expr, ... standard_transformations, implicit_multiplication) >>> transformations = standard_transformations + (implicit_multiplication,) >>> parse_expr('3 x y', transformations=transformations) 3*x*y """ # These are interdependent steps, so we don't expose them separately for step in (_group_parentheses(implicit_multiplication), _apply_functions, _implicit_multiplication): result = step(result, local_dict, global_dict) result = _flatten(result) return result def implicit_application(result, local_dict, global_dict): """Makes parentheses optional in some cases for function calls. Use this after :func:`implicit_multiplication`, otherwise expressions like ``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``. Examples ======== >>> from sympy.parsing.sympy_parser import (parse_expr, ... standard_transformations, implicit_application) >>> transformations = standard_transformations + (implicit_application,) >>> parse_expr('cot z + csc z', transformations=transformations) cot(z) + csc(z) """ for step in (_group_parentheses(implicit_application), _apply_functions, _implicit_application,): result = step(result, local_dict, global_dict) result = _flatten(result) return result def implicit_multiplication_application(result, local_dict, global_dict): """Allows a slightly relaxed syntax. - Parentheses for single-argument method calls are optional. - Multiplication is implicit. - Symbol names can be split (i.e. spaces are not needed between symbols). - Functions can be exponentiated. Examples ======== >>> from sympy.parsing.sympy_parser import (parse_expr, ... standard_transformations, implicit_multiplication_application) >>> parse_expr("10sin**2 x**2 + 3xyz + tan theta", ... transformations=(standard_transformations + ... (implicit_multiplication_application,))) 3*x*y*z + 10*sin(x**2)**2 + tan(theta) """ for step in (split_symbols, implicit_multiplication, implicit_application, function_exponentiation): result = step(result, local_dict, global_dict) return result def auto_symbol(tokens, local_dict, global_dict): """Inserts calls to ``Symbol``/``Function`` for undefined variables.""" result = [] prevTok = (None, None) tokens.append((None, None)) # so zip traverses all tokens for tok, nextTok in zip(tokens, tokens[1:]): tokNum, tokVal = tok nextTokNum, nextTokVal = nextTok if tokNum == NAME: name = tokVal if (name in ['True', 'False', 'None'] or iskeyword(name) # Don't convert attribute access or (prevTok[0] == OP and prevTok[1] == '.') # Don't convert keyword arguments or (prevTok[0] == OP and prevTok[1] in ('(', ',') and nextTokNum == OP and nextTokVal == '=')): result.append((NAME, name)) continue elif name in local_dict: if isinstance(local_dict[name], Symbol) and nextTokVal == '(': result.extend([(NAME, 'Function'), (OP, '('), (NAME, repr(str(local_dict[name]))), (OP, ')')]) else: result.append((NAME, name)) continue elif name in global_dict: obj = global_dict[name] if isinstance(obj, (Basic, type)) or callable(obj): result.append((NAME, name)) continue result.extend([ (NAME, 'Symbol' if nextTokVal != '(' else 'Function'), (OP, '('), (NAME, repr(str(name))), (OP, ')'), ]) else: result.append((tokNum, tokVal)) prevTok = (tokNum, tokVal) return result def lambda_notation(tokens, local_dict, global_dict): """Substitutes "lambda" with its Sympy equivalent Lambda(). However, the conversion doesn't take place if only "lambda" is passed because that is a syntax error. """ result = [] flag = False toknum, tokval = tokens[0] tokLen = len(tokens) if toknum == NAME and tokval == 'lambda': if tokLen == 2 or tokLen == 3 and tokens[1][0] == NEWLINE: # In Python 3.6.7+, inputs without a newline get NEWLINE added to # the tokens result.extend(tokens) elif tokLen > 2: result.extend([ (NAME, 'Lambda'), (OP, '('), (OP, '('), (OP, ')'), (OP, ')'), ]) for tokNum, tokVal in tokens[1:]: if tokNum == OP and tokVal == ':': tokVal = ',' flag = True if not flag and tokNum == OP and tokVal in ['*', '**']: raise TokenError("Starred arguments in lambda not supported") if flag: result.insert(-1, (tokNum, tokVal)) else: result.insert(-2, (tokNum, tokVal)) else: result.extend(tokens) return result def factorial_notation(tokens, local_dict, global_dict): """Allows standard notation for factorial.""" result = [] nfactorial = 0 for toknum, tokval in tokens: if toknum == ERRORTOKEN: op = tokval if op == '!': nfactorial += 1 else: nfactorial = 0 result.append((OP, op)) else: if nfactorial == 1: result = _add_factorial_tokens('factorial', result) elif nfactorial == 2: result = _add_factorial_tokens('factorial2', result) elif nfactorial > 2: raise TokenError nfactorial = 0 result.append((toknum, tokval)) return result def convert_xor(tokens, local_dict, global_dict): """Treats XOR, ``^``, as exponentiation, ``**``.""" result = [] for toknum, tokval in tokens: if toknum == OP: if tokval == '^': result.append((OP, '**')) else: result.append((toknum, tokval)) else: result.append((toknum, tokval)) return result def repeated_decimals(tokens, local_dict, global_dict): """ Allows 0.2[1] notation to represent the repeated decimal 0.2111... (19/90) Run this before auto_number. """ result = [] def is_digit(s): return all(i in '0123456789_' for i in s) # num will running match any DECIMAL [ INTEGER ] num = [] for toknum, tokval in tokens: if toknum == NUMBER: if (not num and '.' in tokval and 'e' not in tokval.lower() and 'j' not in tokval.lower()): num.append((toknum, tokval)) elif is_digit(tokval)and len(num) == 2: num.append((toknum, tokval)) elif is_digit(tokval) and len(num) == 3 and is_digit(num[-1][1]): # Python 2 tokenizes 00123 as '00', '123' # Python 3 tokenizes 01289 as '012', '89' num.append((toknum, tokval)) else: num = [] elif toknum == OP: if tokval == '[' and len(num) == 1: num.append((OP, tokval)) elif tokval == ']' and len(num) >= 3: num.append((OP, tokval)) elif tokval == '.' and not num: # handle .[1] num.append((NUMBER, '0.')) else: num = [] else: num = [] result.append((toknum, tokval)) if num and num[-1][1] == ']': # pre.post[repetend] = a + b/c + d/e where a = pre, b/c = post, # and d/e = repetend result = result[:-len(num)] pre, post = num[0][1].split('.') repetend = num[2][1] if len(num) == 5: repetend += num[3][1] pre = pre.replace('_', '') post = post.replace('_', '') repetend = repetend.replace('_', '') zeros = '0'*len(post) post, repetends = [w.lstrip('0') for w in [post, repetend]] # or else interpreted as octal a = pre or '0' b, c = post or '0', '1' + zeros d, e = repetends, ('9'*len(repetend)) + zeros seq = [ (OP, '('), (NAME, 'Integer'), (OP, '('), (NUMBER, a), (OP, ')'), (OP, '+'), (NAME, 'Rational'), (OP, '('), (NUMBER, b), (OP, ','), (NUMBER, c), (OP, ')'), (OP, '+'), (NAME, 'Rational'), (OP, '('), (NUMBER, d), (OP, ','), (NUMBER, e), (OP, ')'), (OP, ')'), ] result.extend(seq) num = [] return result def auto_number(tokens, local_dict, global_dict): """ Converts numeric literals to use SymPy equivalents. Complex numbers use ``I``, integer literals use ``Integer``, and float literals use ``Float``. """ result = [] for toknum, tokval in tokens: if toknum == NUMBER: number = tokval postfix = [] if number.endswith('j') or number.endswith('J'): number = number[:-1] postfix = [(OP, '*'), (NAME, 'I')] if '.' in number or (('e' in number or 'E' in number) and not (number.startswith('0x') or number.startswith('0X'))): seq = [(NAME, 'Float'), (OP, '('), (NUMBER, repr(str(number))), (OP, ')')] else: seq = [(NAME, 'Integer'), (OP, '('), ( NUMBER, number), (OP, ')')] result.extend(seq + postfix) else: result.append((toknum, tokval)) return result def rationalize(tokens, local_dict, global_dict): """Converts floats into ``Rational``. Run AFTER ``auto_number``.""" result = [] passed_float = False for toknum, tokval in tokens: if toknum == NAME: if tokval == 'Float': passed_float = True tokval = 'Rational' result.append((toknum, tokval)) elif passed_float == True and toknum == NUMBER: passed_float = False result.append((STRING, tokval)) else: result.append((toknum, tokval)) return result def _transform_equals_sign(tokens, local_dict, global_dict): """Transforms the equals sign ``=`` to instances of Eq. This is a helper function for `convert_equals_signs`. Works with expressions containing one equals sign and no nesting. Expressions like `(1=2)=False` won't work with this and should be used with `convert_equals_signs`. Examples: 1=2 to Eq(1,2) 1*2=x to Eq(1*2, x) This does not deal with function arguments yet. """ result = [] if (OP, "=") in tokens: result.append((NAME, "Eq")) result.append((OP, "(")) for index, token in enumerate(tokens): if token == (OP, "="): result.append((OP, ",")) continue result.append(token) result.append((OP, ")")) else: result = tokens return result def convert_equals_signs(result, local_dict, global_dict): """ Transforms all the equals signs ``=`` to instances of Eq. Parses the equals signs in the expression and replaces them with appropriate Eq instances.Also works with nested equals signs. Does not yet play well with function arguments. For example, the expression `(x=y)` is ambiguous and can be interpreted as x being an argument to a function and `convert_equals_signs` won't work for this. See also ======== convert_equality_operators Examples ======== >>> from sympy.parsing.sympy_parser import (parse_expr, ... standard_transformations, convert_equals_signs) >>> parse_expr("1*2=x", transformations=( ... standard_transformations + (convert_equals_signs,))) Eq(2, x) >>> parse_expr("(1*2=x)=False", transformations=( ... standard_transformations + (convert_equals_signs,))) Eq(Eq(2, x), False) """ for step in (_group_parentheses(convert_equals_signs), _apply_functions, _transform_equals_sign): result = step(result, local_dict, global_dict) result = _flatten(result) return result #: Standard transformations for :func:`parse_expr`. #: Inserts calls to :class:`~.Symbol`, :class:`~.Integer`, and other SymPy #: datatypes and allows the use of standard factorial notation (e.g. ``x!``). standard_transformations = (lambda_notation, auto_symbol, repeated_decimals, auto_number, factorial_notation) def stringify_expr(s, local_dict, global_dict, transformations): """ Converts the string ``s`` to Python code, in ``local_dict`` Generally, ``parse_expr`` should be used. """ tokens = [] input_code = StringIO(s.strip()) for toknum, tokval, _, _, _ in generate_tokens(input_code.readline): tokens.append((toknum, tokval)) for transform in transformations: tokens = transform(tokens, local_dict, global_dict) return untokenize(tokens) def eval_expr(code, local_dict, global_dict): """ Evaluate Python code generated by ``stringify_expr``. Generally, ``parse_expr`` should be used. """ expr = eval( code, global_dict, local_dict) # take local objects in preference return expr def parse_expr(s, local_dict=None, transformations=standard_transformations, global_dict=None, evaluate=True): """Converts the string ``s`` to a SymPy expression, in ``local_dict`` Parameters ========== s : str The string to parse. local_dict : dict, optional A dictionary of local variables to use when parsing. global_dict : dict, optional A dictionary of global variables. By default, this is initialized with ``from sympy import *``; provide this parameter to override this behavior (for instance, to parse ``"Q & S"``). transformations : tuple, optional A tuple of transformation functions used to modify the tokens of the parsed expression before evaluation. The default transformations convert numeric literals into their SymPy equivalents, convert undefined variables into SymPy symbols, and allow the use of standard mathematical factorial notation (e.g. ``x!``). evaluate : bool, optional When False, the order of the arguments will remain as they were in the string and automatic simplification that would normally occur is suppressed. (see examples) Examples ======== >>> from sympy.parsing.sympy_parser import parse_expr >>> parse_expr("1/2") 1/2 >>> type(_) <class 'sympy.core.numbers.Half'> >>> from sympy.parsing.sympy_parser import standard_transformations,\\ ... implicit_multiplication_application >>> transformations = (standard_transformations + ... (implicit_multiplication_application,)) >>> parse_expr("2x", transformations=transformations) 2*x When evaluate=False, some automatic simplifications will not occur: >>> parse_expr("2**3"), parse_expr("2**3", evaluate=False) (8, 2**3) In addition the order of the arguments will not be made canonical. This feature allows one to tell exactly how the expression was entered: >>> a = parse_expr('1 + x', evaluate=False) >>> b = parse_expr('x + 1', evaluate=0) >>> a == b False >>> a.args (1, x) >>> b.args (x, 1) See Also ======== stringify_expr, eval_expr, standard_transformations, implicit_multiplication_application """ if local_dict is None: local_dict = {} elif not isinstance(local_dict, dict): raise TypeError('expecting local_dict to be a dict') if global_dict is None: global_dict = {} exec('from sympy import *', global_dict) elif not isinstance(global_dict, dict): raise TypeError('expecting global_dict to be a dict') transformations = transformations or () if transformations: if not iterable(transformations): raise TypeError( '`transformations` should be a list of functions.') for _ in transformations: if not callable(_): raise TypeError(filldedent(''' expected a function in `transformations`, not %s''' % func_name(_))) if arity(_) != 3: raise TypeError(filldedent(''' a transformation should be function that takes 3 arguments''')) code = stringify_expr(s, local_dict, global_dict, transformations) if not evaluate: code = compile(evaluateFalse(code), '<string>', 'eval') return eval_expr(code, local_dict, global_dict) def evaluateFalse(s): """ Replaces operators with the SymPy equivalent and sets evaluate=False. """ node = ast.parse(s) node = EvaluateFalseTransformer().visit(node) # node is a Module, we want an Expression node = ast.Expression(node.body[0].value) return ast.fix_missing_locations(node) class EvaluateFalseTransformer(ast.NodeTransformer): operators = { ast.Add: 'Add', ast.Mult: 'Mul', ast.Pow: 'Pow', ast.Sub: 'Add', ast.Div: 'Mul', ast.BitOr: 'Or', ast.BitAnd: 'And', ast.BitXor: 'Not', } def flatten(self, args, func): result = [] for arg in args: if isinstance(arg, ast.Call): arg_func = arg.func if isinstance(arg_func, ast.Call): arg_func = arg_func.func if arg_func.id == func: result.extend(self.flatten(arg.args, func)) else: result.append(arg) else: result.append(arg) return result def visit_BinOp(self, node): if node.op.__class__ in self.operators: sympy_class = self.operators[node.op.__class__] right = self.visit(node.right) left = self.visit(node.left) if isinstance(node.left, ast.UnaryOp) and (isinstance(node.right, ast.UnaryOp) == 0) and sympy_class in ('Mul',): left, right = right, left if isinstance(node.op, ast.Sub): right = ast.Call( func=ast.Name(id='Mul', ctx=ast.Load()), args=[ast.UnaryOp(op=ast.USub(), operand=ast.Num(1)), right], keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))], starargs=None, kwargs=None ) if isinstance(node.op, ast.Div): if isinstance(node.left, ast.UnaryOp): if isinstance(node.right,ast.UnaryOp): left, right = right, left left = ast.Call( func=ast.Name(id='Pow', ctx=ast.Load()), args=[left, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))], keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))], starargs=None, kwargs=None ) else: right = ast.Call( func=ast.Name(id='Pow', ctx=ast.Load()), args=[right, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))], keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))], starargs=None, kwargs=None ) new_node = ast.Call( func=ast.Name(id=sympy_class, ctx=ast.Load()), args=[left, right], keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))], starargs=None, kwargs=None ) if sympy_class in ('Add', 'Mul'): # Denest Add or Mul as appropriate new_node.args = self.flatten(new_node.args, sympy_class) return new_node return node
f64c9dac47764d767c8068c39bc0bdfd4dbdfcc1d64886736495334f9a537f5f
""" This module implements the functionality to take any Python expression as a string and fix all numbers and other things before evaluating it, thus 1/2 returns Integer(1)/Integer(2) We use the ast module for this. It is well documented at docs.python.org. Some tips to understand how this works: use dump() to get a nice representation of any node. Then write a string of what you want to get, e.g. "Integer(1)", parse it, dump it and you'll see that you need to do "Call(Name('Integer', Load()), [node], [], None, None)". You don't need to bother with lineno and col_offset, just call fix_missing_locations() before returning the node. """ from sympy.core.basic import Basic from sympy.core.sympify import SympifyError from ast import parse, NodeTransformer, Call, Name, Load, \ fix_missing_locations, Str, Tuple class Transform(NodeTransformer): def __init__(self, local_dict, global_dict): NodeTransformer.__init__(self) self.local_dict = local_dict self.global_dict = global_dict def visit_Num(self, node): if isinstance(node.n, int): return fix_missing_locations(Call(func=Name('Integer', Load()), args=[node], keywords=[])) elif isinstance(node.n, float): return fix_missing_locations(Call(func=Name('Float', Load()), args=[node], keywords=[])) return node def visit_Name(self, node): if node.id in self.local_dict: return node elif node.id in self.global_dict: name_obj = self.global_dict[node.id] if isinstance(name_obj, (Basic, type)) or callable(name_obj): return node elif node.id in ['True', 'False']: return node return fix_missing_locations(Call(func=Name('Symbol', Load()), args=[Str(node.id)], keywords=[])) def visit_Lambda(self, node): args = [self.visit(arg) for arg in node.args.args] body = self.visit(node.body) n = Call(func=Name('Lambda', Load()), args=[Tuple(args, Load()), body], keywords=[]) return fix_missing_locations(n) def parse_expr(s, local_dict): """ Converts the string "s" to a SymPy expression, in local_dict. It converts all numbers to Integers before feeding it to Python and automatically creates Symbols. """ global_dict = {} exec('from sympy import *', global_dict) try: a = parse(s.strip(), mode="eval") except SyntaxError: raise SympifyError("Cannot parse %s." % repr(s)) a = Transform(local_dict, global_dict).visit(a) e = compile(a, "<string>", "eval") return eval(e, global_dict, local_dict)
36575564c68b4b4b066b42188ab9afb4d41e028a7d6217fec3c572fe556f90a8
# -*- coding: utf-8 -*- r""" Wigner, Clebsch-Gordan, Racah, and Gaunt coefficients Collection of functions for calculating Wigner 3j, 6j, 9j, Clebsch-Gordan, Racah as well as Gaunt coefficients exactly, all evaluating to a rational number times the square root of a rational number [Rasch03]_. Please see the description of the individual functions for further details and examples. References ========== .. [Regge58] 'Symmetry Properties of Clebsch-Gordan Coefficients', T. Regge, Nuovo Cimento, Volume 10, pp. 544 (1958) .. [Regge59] 'Symmetry Properties of Racah Coefficients', T. Regge, Nuovo Cimento, Volume 11, pp. 116 (1959) .. [Edmonds74] A. R. Edmonds. Angular momentum in quantum mechanics. Investigations in physics, 4.; Investigations in physics, no. 4. Princeton, N.J., Princeton University Press, 1957. .. [Rasch03] J. Rasch and A. C. H. Yu, 'Efficient Storage Scheme for Pre-calculated Wigner 3j, 6j and Gaunt Coefficients', SIAM J. Sci. Comput. Volume 25, Issue 4, pp. 1416-1428 (2003) .. [Liberatodebrito82] 'FORTRAN program for the integral of three spherical harmonics', A. Liberato de Brito, Comput. Phys. Commun., Volume 25, pp. 81-85 (1982) Credits and Copyright ===================== This code was taken from Sage with the permission of all authors: https://groups.google.com/forum/#!topic/sage-devel/M4NZdu-7O38 Authors ======= - Jens Rasch (2009-03-24): initial version for Sage - Jens Rasch (2009-05-31): updated to sage-4.0 - Oscar Gerardo Lazo Arjona (2017-06-18): added Wigner D matrices Copyright (C) 2008 Jens Rasch <[email protected]> """ from sympy import (Integer, pi, sqrt, sympify, Dummy, S, Sum, Ynm, zeros, Function, sin, cos, exp, I, factorial, binomial, Add, ImmutableMatrix) # This list of precomputed factorials is needed to massively # accelerate future calculations of the various coefficients _Factlist = [1] def _calc_factlist(nn): r""" Function calculates a list of precomputed factorials in order to massively accelerate future calculations of the various coefficients. Parameters ========== nn : integer Highest factorial to be computed. Returns ======= list of integers : The list of precomputed factorials. Examples ======== Calculate list of factorials:: sage: from sage.functions.wigner import _calc_factlist sage: _calc_factlist(10) [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800] """ if nn >= len(_Factlist): for ii in range(len(_Factlist), int(nn + 1)): _Factlist.append(_Factlist[ii - 1] * ii) return _Factlist[:int(nn) + 1] def wigner_3j(j_1, j_2, j_3, m_1, m_2, m_3): r""" Calculate the Wigner 3j symbol `\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)`. Parameters ========== j_1, j_2, j_3, m_1, m_2, m_3 : Integer or half integer. Returns ======= Rational number times the square root of a rational number. Examples ======== >>> from sympy.physics.wigner import wigner_3j >>> wigner_3j(2, 6, 4, 0, 0, 0) sqrt(715)/143 >>> wigner_3j(2, 6, 4, 0, 0, 1) 0 It is an error to have arguments that are not integer or half integer values:: sage: wigner_3j(2.1, 6, 4, 0, 0, 0) Traceback (most recent call last): ... ValueError: j values must be integer or half integer sage: wigner_3j(2, 6, 4, 1, 0, -1.1) Traceback (most recent call last): ... ValueError: m values must be integer or half integer Notes ===== The Wigner 3j symbol obeys the following symmetry rules: - invariant under any permutation of the columns (with the exception of a sign change where `J:=j_1+j_2+j_3`): .. math:: \begin{aligned} \operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3) &=\operatorname{Wigner3j}(j_3,j_1,j_2,m_3,m_1,m_2) \\ &=\operatorname{Wigner3j}(j_2,j_3,j_1,m_2,m_3,m_1) \\ &=(-1)^J \operatorname{Wigner3j}(j_3,j_2,j_1,m_3,m_2,m_1) \\ &=(-1)^J \operatorname{Wigner3j}(j_1,j_3,j_2,m_1,m_3,m_2) \\ &=(-1)^J \operatorname{Wigner3j}(j_2,j_1,j_3,m_2,m_1,m_3) \end{aligned} - invariant under space inflection, i.e. .. math:: \operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3) =(-1)^J \operatorname{Wigner3j}(j_1,j_2,j_3,-m_1,-m_2,-m_3) - symmetric with respect to the 72 additional symmetries based on the work by [Regge58]_ - zero for `j_1`, `j_2`, `j_3` not fulfilling triangle relation - zero for `m_1 + m_2 + m_3 \neq 0` - zero for violating any one of the conditions `j_1 \ge |m_1|`, `j_2 \ge |m_2|`, `j_3 \ge |m_3|` Algorithm ========= This function uses the algorithm of [Edmonds74]_ to calculate the value of the 3j symbol exactly. Note that the formula contains alternating sums over large factorials and is therefore unsuitable for finite precision arithmetic and only useful for a computer algebra system [Rasch03]_. Authors ======= - Jens Rasch (2009-03-24): initial version """ if int(j_1 * 2) != j_1 * 2 or int(j_2 * 2) != j_2 * 2 or \ int(j_3 * 2) != j_3 * 2: raise ValueError("j values must be integer or half integer") if int(m_1 * 2) != m_1 * 2 or int(m_2 * 2) != m_2 * 2 or \ int(m_3 * 2) != m_3 * 2: raise ValueError("m values must be integer or half integer") if m_1 + m_2 + m_3 != 0: return 0 prefid = Integer((-1) ** int(j_1 - j_2 - m_3)) m_3 = -m_3 a1 = j_1 + j_2 - j_3 if a1 < 0: return 0 a2 = j_1 - j_2 + j_3 if a2 < 0: return 0 a3 = -j_1 + j_2 + j_3 if a3 < 0: return 0 if (abs(m_1) > j_1) or (abs(m_2) > j_2) or (abs(m_3) > j_3): return 0 maxfact = max(j_1 + j_2 + j_3 + 1, j_1 + abs(m_1), j_2 + abs(m_2), j_3 + abs(m_3)) _calc_factlist(int(maxfact)) argsqrt = Integer(_Factlist[int(j_1 + j_2 - j_3)] * _Factlist[int(j_1 - j_2 + j_3)] * _Factlist[int(-j_1 + j_2 + j_3)] * _Factlist[int(j_1 - m_1)] * _Factlist[int(j_1 + m_1)] * _Factlist[int(j_2 - m_2)] * _Factlist[int(j_2 + m_2)] * _Factlist[int(j_3 - m_3)] * _Factlist[int(j_3 + m_3)]) / \ _Factlist[int(j_1 + j_2 + j_3 + 1)] ressqrt = sqrt(argsqrt) if ressqrt.is_complex or ressqrt.is_infinite: ressqrt = ressqrt.as_real_imag()[0] imin = max(-j_3 + j_1 + m_2, -j_3 + j_2 - m_1, 0) imax = min(j_2 + m_2, j_1 - m_1, j_1 + j_2 - j_3) sumres = 0 for ii in range(int(imin), int(imax) + 1): den = _Factlist[ii] * \ _Factlist[int(ii + j_3 - j_1 - m_2)] * \ _Factlist[int(j_2 + m_2 - ii)] * \ _Factlist[int(j_1 - ii - m_1)] * \ _Factlist[int(ii + j_3 - j_2 + m_1)] * \ _Factlist[int(j_1 + j_2 - j_3 - ii)] sumres = sumres + Integer((-1) ** ii) / den res = ressqrt * sumres * prefid return res def clebsch_gordan(j_1, j_2, j_3, m_1, m_2, m_3): r""" Calculates the Clebsch-Gordan coefficient. `\left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle`. The reference for this function is [Edmonds74]_. Parameters ========== j_1, j_2, j_3, m_1, m_2, m_3 : Integer or half integer. Returns ======= Rational number times the square root of a rational number. Examples ======== >>> from sympy import S >>> from sympy.physics.wigner import clebsch_gordan >>> clebsch_gordan(S(3)/2, S(1)/2, 2, S(3)/2, S(1)/2, 2) 1 >>> clebsch_gordan(S(3)/2, S(1)/2, 1, S(3)/2, -S(1)/2, 1) sqrt(3)/2 >>> clebsch_gordan(S(3)/2, S(1)/2, 1, -S(1)/2, S(1)/2, 0) -sqrt(2)/2 Notes ===== The Clebsch-Gordan coefficient will be evaluated via its relation to Wigner 3j symbols: .. math:: \left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle =(-1)^{j_1-j_2+m_3} \sqrt{2j_3+1} \operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,-m_3) See also the documentation on Wigner 3j symbols which exhibit much higher symmetry relations than the Clebsch-Gordan coefficient. Authors ======= - Jens Rasch (2009-03-24): initial version """ res = (-1) ** sympify(j_1 - j_2 + m_3) * sqrt(2 * j_3 + 1) * \ wigner_3j(j_1, j_2, j_3, m_1, m_2, -m_3) return res def _big_delta_coeff(aa, bb, cc, prec=None): r""" Calculates the Delta coefficient of the 3 angular momenta for Racah symbols. Also checks that the differences are of integer value. Parameters ========== aa : First angular momentum, integer or half integer. bb : Second angular momentum, integer or half integer. cc : Third angular momentum, integer or half integer. prec : Precision of the ``sqrt()`` calculation. Returns ======= double : Value of the Delta coefficient. Examples ======== sage: from sage.functions.wigner import _big_delta_coeff sage: _big_delta_coeff(1,1,1) 1/2*sqrt(1/6) """ if int(aa + bb - cc) != (aa + bb - cc): raise ValueError("j values must be integer or half integer and fulfill the triangle relation") if int(aa + cc - bb) != (aa + cc - bb): raise ValueError("j values must be integer or half integer and fulfill the triangle relation") if int(bb + cc - aa) != (bb + cc - aa): raise ValueError("j values must be integer or half integer and fulfill the triangle relation") if (aa + bb - cc) < 0: return 0 if (aa + cc - bb) < 0: return 0 if (bb + cc - aa) < 0: return 0 maxfact = max(aa + bb - cc, aa + cc - bb, bb + cc - aa, aa + bb + cc + 1) _calc_factlist(maxfact) argsqrt = Integer(_Factlist[int(aa + bb - cc)] * _Factlist[int(aa + cc - bb)] * _Factlist[int(bb + cc - aa)]) / \ Integer(_Factlist[int(aa + bb + cc + 1)]) ressqrt = sqrt(argsqrt) if prec: ressqrt = ressqrt.evalf(prec).as_real_imag()[0] return ressqrt def racah(aa, bb, cc, dd, ee, ff, prec=None): r""" Calculate the Racah symbol `W(a,b,c,d;e,f)`. Parameters ========== a, ..., f : Integer or half integer. prec : Precision, default: ``None``. Providing a precision can drastically speed up the calculation. Returns ======= Rational number times the square root of a rational number (if ``prec=None``), or real number if a precision is given. Examples ======== >>> from sympy.physics.wigner import racah >>> racah(3,3,3,3,3,3) -1/14 Notes ===== The Racah symbol is related to the Wigner 6j symbol: .. math:: \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) =(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6) Please see the 6j symbol for its much richer symmetries and for additional properties. Algorithm ========= This function uses the algorithm of [Edmonds74]_ to calculate the value of the 6j symbol exactly. Note that the formula contains alternating sums over large factorials and is therefore unsuitable for finite precision arithmetic and only useful for a computer algebra system [Rasch03]_. Authors ======= - Jens Rasch (2009-03-24): initial version """ prefac = _big_delta_coeff(aa, bb, ee, prec) * \ _big_delta_coeff(cc, dd, ee, prec) * \ _big_delta_coeff(aa, cc, ff, prec) * \ _big_delta_coeff(bb, dd, ff, prec) if prefac == 0: return 0 imin = max(aa + bb + ee, cc + dd + ee, aa + cc + ff, bb + dd + ff) imax = min(aa + bb + cc + dd, aa + dd + ee + ff, bb + cc + ee + ff) maxfact = max(imax + 1, aa + bb + cc + dd, aa + dd + ee + ff, bb + cc + ee + ff) _calc_factlist(maxfact) sumres = 0 for kk in range(int(imin), int(imax) + 1): den = _Factlist[int(kk - aa - bb - ee)] * \ _Factlist[int(kk - cc - dd - ee)] * \ _Factlist[int(kk - aa - cc - ff)] * \ _Factlist[int(kk - bb - dd - ff)] * \ _Factlist[int(aa + bb + cc + dd - kk)] * \ _Factlist[int(aa + dd + ee + ff - kk)] * \ _Factlist[int(bb + cc + ee + ff - kk)] sumres = sumres + Integer((-1) ** kk * _Factlist[kk + 1]) / den res = prefac * sumres * (-1) ** int(aa + bb + cc + dd) return res def wigner_6j(j_1, j_2, j_3, j_4, j_5, j_6, prec=None): r""" Calculate the Wigner 6j symbol `\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)`. Parameters ========== j_1, ..., j_6 : Integer or half integer. prec : Precision, default: ``None``. Providing a precision can drastically speed up the calculation. Returns ======= Rational number times the square root of a rational number (if ``prec=None``), or real number if a precision is given. Examples ======== >>> from sympy.physics.wigner import wigner_6j >>> wigner_6j(3,3,3,3,3,3) -1/14 >>> wigner_6j(5,5,5,5,5,5) 1/52 It is an error to have arguments that are not integer or half integer values or do not fulfill the triangle relation:: sage: wigner_6j(2.5,2.5,2.5,2.5,2.5,2.5) Traceback (most recent call last): ... ValueError: j values must be integer or half integer and fulfill the triangle relation sage: wigner_6j(0.5,0.5,1.1,0.5,0.5,1.1) Traceback (most recent call last): ... ValueError: j values must be integer or half integer and fulfill the triangle relation Notes ===== The Wigner 6j symbol is related to the Racah symbol but exhibits more symmetries as detailed below. .. math:: \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) =(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6) The Wigner 6j symbol obeys the following symmetry rules: - Wigner 6j symbols are left invariant under any permutation of the columns: .. math:: \begin{aligned} \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) &=\operatorname{Wigner6j}(j_3,j_1,j_2,j_6,j_4,j_5) \\ &=\operatorname{Wigner6j}(j_2,j_3,j_1,j_5,j_6,j_4) \\ &=\operatorname{Wigner6j}(j_3,j_2,j_1,j_6,j_5,j_4) \\ &=\operatorname{Wigner6j}(j_1,j_3,j_2,j_4,j_6,j_5) \\ &=\operatorname{Wigner6j}(j_2,j_1,j_3,j_5,j_4,j_6) \end{aligned} - They are invariant under the exchange of the upper and lower arguments in each of any two columns, i.e. .. math:: \operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6) =\operatorname{Wigner6j}(j_1,j_5,j_6,j_4,j_2,j_3) =\operatorname{Wigner6j}(j_4,j_2,j_6,j_1,j_5,j_3) =\operatorname{Wigner6j}(j_4,j_5,j_3,j_1,j_2,j_6) - additional 6 symmetries [Regge59]_ giving rise to 144 symmetries in total - only non-zero if any triple of `j`'s fulfill a triangle relation Algorithm ========= This function uses the algorithm of [Edmonds74]_ to calculate the value of the 6j symbol exactly. Note that the formula contains alternating sums over large factorials and is therefore unsuitable for finite precision arithmetic and only useful for a computer algebra system [Rasch03]_. """ res = (-1) ** int(j_1 + j_2 + j_4 + j_5) * \ racah(j_1, j_2, j_5, j_4, j_3, j_6, prec) return res def wigner_9j(j_1, j_2, j_3, j_4, j_5, j_6, j_7, j_8, j_9, prec=None): r""" Calculate the Wigner 9j symbol `\operatorname{Wigner9j}(j_1,j_2,j_3,j_4,j_5,j_6,j_7,j_8,j_9)`. Parameters ========== j_1, ..., j_9 : Integer or half integer. prec : precision, default ``None``. Providing a precision can drastically speed up the calculation. Returns ======= Rational number times the square root of a rational number (if ``prec=None``), or real number if a precision is given. Examples ======== >>> from sympy.physics.wigner import wigner_9j >>> wigner_9j(1,1,1, 1,1,1, 1,1,0 ,prec=64) # ==1/18 0.05555555... >>> wigner_9j(1/2,1/2,0, 1/2,3/2,1, 0,1,1 ,prec=64) # ==1/6 0.1666666... It is an error to have arguments that are not integer or half integer values or do not fulfill the triangle relation:: sage: wigner_9j(0.5,0.5,0.5, 0.5,0.5,0.5, 0.5,0.5,0.5,prec=64) Traceback (most recent call last): ... ValueError: j values must be integer or half integer and fulfill the triangle relation sage: wigner_9j(1,1,1, 0.5,1,1.5, 0.5,1,2.5,prec=64) Traceback (most recent call last): ... ValueError: j values must be integer or half integer and fulfill the triangle relation Algorithm ========= This function uses the algorithm of [Edmonds74]_ to calculate the value of the 3j symbol exactly. Note that the formula contains alternating sums over large factorials and is therefore unsuitable for finite precision arithmetic and only useful for a computer algebra system [Rasch03]_. """ imax = int(min(j_1 + j_9, j_2 + j_6, j_4 + j_8) * 2) imin = imax % 2 sumres = 0 for kk in range(imin, int(imax) + 1, 2): sumres = sumres + (kk + 1) * \ racah(j_1, j_2, j_9, j_6, j_3, kk / 2, prec) * \ racah(j_4, j_6, j_8, j_2, j_5, kk / 2, prec) * \ racah(j_1, j_4, j_9, j_8, j_7, kk / 2, prec) return sumres def gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None): r""" Calculate the Gaunt coefficient. Explanation =========== The Gaunt coefficient is defined as the integral over three spherical harmonics: .. math:: \begin{aligned} \operatorname{Gaunt}(l_1,l_2,l_3,m_1,m_2,m_3) &=\int Y_{l_1,m_1}(\Omega) Y_{l_2,m_2}(\Omega) Y_{l_3,m_3}(\Omega) \,d\Omega \\ &=\sqrt{\frac{(2l_1+1)(2l_2+1)(2l_3+1)}{4\pi}} \operatorname{Wigner3j}(l_1,l_2,l_3,0,0,0) \operatorname{Wigner3j}(l_1,l_2,l_3,m_1,m_2,m_3) \end{aligned} Parameters ========== l_1, l_2, l_3, m_1, m_2, m_3 : Integer. prec - precision, default: ``None``. Providing a precision can drastically speed up the calculation. Returns ======= Rational number times the square root of a rational number (if ``prec=None``), or real number if a precision is given. Examples ======== >>> from sympy.physics.wigner import gaunt >>> gaunt(1,0,1,1,0,-1) -1/(2*sqrt(pi)) >>> gaunt(1000,1000,1200,9,3,-12).n(64) 0.00689500421922113448... It is an error to use non-integer values for `l` and `m`:: sage: gaunt(1.2,0,1.2,0,0,0) Traceback (most recent call last): ... ValueError: l values must be integer sage: gaunt(1,0,1,1.1,0,-1.1) Traceback (most recent call last): ... ValueError: m values must be integer Notes ===== The Gaunt coefficient obeys the following symmetry rules: - invariant under any permutation of the columns .. math:: \begin{aligned} Y(l_1,l_2,l_3,m_1,m_2,m_3) &=Y(l_3,l_1,l_2,m_3,m_1,m_2) \\ &=Y(l_2,l_3,l_1,m_2,m_3,m_1) \\ &=Y(l_3,l_2,l_1,m_3,m_2,m_1) \\ &=Y(l_1,l_3,l_2,m_1,m_3,m_2) \\ &=Y(l_2,l_1,l_3,m_2,m_1,m_3) \end{aligned} - invariant under space inflection, i.e. .. math:: Y(l_1,l_2,l_3,m_1,m_2,m_3) =Y(l_1,l_2,l_3,-m_1,-m_2,-m_3) - symmetric with respect to the 72 Regge symmetries as inherited for the `3j` symbols [Regge58]_ - zero for `l_1`, `l_2`, `l_3` not fulfilling triangle relation - zero for violating any one of the conditions: `l_1 \ge |m_1|`, `l_2 \ge |m_2|`, `l_3 \ge |m_3|` - non-zero only for an even sum of the `l_i`, i.e. `L = l_1 + l_2 + l_3 = 2n` for `n` in `\mathbb{N}` Algorithms ========== This function uses the algorithm of [Liberatodebrito82]_ to calculate the value of the Gaunt coefficient exactly. Note that the formula contains alternating sums over large factorials and is therefore unsuitable for finite precision arithmetic and only useful for a computer algebra system [Rasch03]_. Authors ======= Jens Rasch (2009-03-24): initial version for Sage. """ if int(l_1) != l_1 or int(l_2) != l_2 or int(l_3) != l_3: raise ValueError("l values must be integer") if int(m_1) != m_1 or int(m_2) != m_2 or int(m_3) != m_3: raise ValueError("m values must be integer") sumL = l_1 + l_2 + l_3 bigL = sumL // 2 a1 = l_1 + l_2 - l_3 if a1 < 0: return 0 a2 = l_1 - l_2 + l_3 if a2 < 0: return 0 a3 = -l_1 + l_2 + l_3 if a3 < 0: return 0 if sumL % 2: return 0 if (m_1 + m_2 + m_3) != 0: return 0 if (abs(m_1) > l_1) or (abs(m_2) > l_2) or (abs(m_3) > l_3): return 0 imin = max(-l_3 + l_1 + m_2, -l_3 + l_2 - m_1, 0) imax = min(l_2 + m_2, l_1 - m_1, l_1 + l_2 - l_3) maxfact = max(l_1 + l_2 + l_3 + 1, imax + 1) _calc_factlist(maxfact) argsqrt = (2 * l_1 + 1) * (2 * l_2 + 1) * (2 * l_3 + 1) * \ _Factlist[l_1 - m_1] * _Factlist[l_1 + m_1] * _Factlist[l_2 - m_2] * \ _Factlist[l_2 + m_2] * _Factlist[l_3 - m_3] * _Factlist[l_3 + m_3] / \ (4*pi) ressqrt = sqrt(argsqrt) prefac = Integer(_Factlist[bigL] * _Factlist[l_2 - l_1 + l_3] * _Factlist[l_1 - l_2 + l_3] * _Factlist[l_1 + l_2 - l_3])/ \ _Factlist[2 * bigL + 1]/ \ (_Factlist[bigL - l_1] * _Factlist[bigL - l_2] * _Factlist[bigL - l_3]) sumres = 0 for ii in range(int(imin), int(imax) + 1): den = _Factlist[ii] * _Factlist[ii + l_3 - l_1 - m_2] * \ _Factlist[l_2 + m_2 - ii] * _Factlist[l_1 - ii - m_1] * \ _Factlist[ii + l_3 - l_2 + m_1] * _Factlist[l_1 + l_2 - l_3 - ii] sumres = sumres + Integer((-1) ** ii) / den res = ressqrt * prefac * sumres * Integer((-1) ** (bigL + l_3 + m_1 - m_2)) if prec is not None: res = res.n(prec) return res class Wigner3j(Function): def doit(self, **hints): if all(obj.is_number for obj in self.args): return wigner_3j(*self.args) else: return self def dot_rot_grad_Ynm(j, p, l, m, theta, phi): r""" Returns dot product of rotational gradients of spherical harmonics. Explanation =========== This function returns the right hand side of the following expression: .. math :: \vec{R}Y{_j^{p}} \cdot \vec{R}Y{_l^{m}} = (-1)^{m+p} \sum\limits_{k=|l-j|}^{l+j}Y{_k^{m+p}} * \alpha_{l,m,j,p,k} * \frac{1}{2} (k^2-j^2-l^2+k-j-l) Arguments ========= j, p, l, m .... indices in spherical harmonics (expressions or integers) theta, phi .... angle arguments in spherical harmonics Example ======= >>> from sympy import symbols >>> from sympy.physics.wigner import dot_rot_grad_Ynm >>> theta, phi = symbols("theta phi") >>> dot_rot_grad_Ynm(3, 2, 2, 0, theta, phi).doit() 3*sqrt(55)*Ynm(5, 2, theta, phi)/(11*sqrt(pi)) """ j = sympify(j) p = sympify(p) l = sympify(l) m = sympify(m) theta = sympify(theta) phi = sympify(phi) k = Dummy("k") def alpha(l,m,j,p,k): return sqrt((2*l+1)*(2*j+1)*(2*k+1)/(4*pi)) * \ Wigner3j(j, l, k, S.Zero, S.Zero, S.Zero) * \ Wigner3j(j, l, k, p, m, -m-p) return (S.NegativeOne)**(m+p) * Sum(Ynm(k, m+p, theta, phi) * alpha(l,m,j,p,k) / 2 \ *(k**2-j**2-l**2+k-j-l), (k, abs(l-j), l+j)) def wigner_d_small(J, beta): """Return the small Wigner d matrix for angular momentum J. Explanation =========== J : An integer, half-integer, or sympy symbol for the total angular momentum of the angular momentum space being rotated. beta : A real number representing the Euler angle of rotation about the so-called line of nodes. See [Edmonds74]_. Returns ======= A matrix representing the corresponding Euler angle rotation( in the basis of eigenvectors of `J_z`). .. math :: \\mathcal{d}_{\\beta} = \\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big) The components are calculated using the general form [Edmonds74]_, equation 4.1.15. Examples ======== >>> from sympy import Integer, symbols, pi, pprint >>> from sympy.physics.wigner import wigner_d_small >>> half = 1/Integer(2) >>> beta = symbols("beta", real=True) >>> pprint(wigner_d_small(half, beta), use_unicode=True) ⎡ ⎛β⎞ ⎛β⎞⎤ ⎢cos⎜─⎟ sin⎜─⎟⎥ ⎢ ⎝2⎠ ⎝2⎠⎥ ⎢ ⎥ ⎢ ⎛β⎞ ⎛β⎞⎥ ⎢-sin⎜─⎟ cos⎜─⎟⎥ ⎣ ⎝2⎠ ⎝2⎠⎦ >>> pprint(wigner_d_small(2*half, beta), use_unicode=True) ⎡ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎤ ⎢ cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟ sin ⎜─⎟ ⎥ ⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎥ ⎢ ⎥ ⎢ ⎛β⎞ ⎛β⎞ 2⎛β⎞ 2⎛β⎞ ⎛β⎞ ⎛β⎞⎥ ⎢-√2⋅sin⎜─⎟⋅cos⎜─⎟ - sin ⎜─⎟ + cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟⎥ ⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠⎥ ⎢ ⎥ ⎢ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎥ ⎢ sin ⎜─⎟ -√2⋅sin⎜─⎟⋅cos⎜─⎟ cos ⎜─⎟ ⎥ ⎣ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎦ From table 4 in [Edmonds74]_ >>> pprint(wigner_d_small(half, beta).subs({beta:pi/2}), use_unicode=True) ⎡ √2 √2⎤ ⎢ ── ──⎥ ⎢ 2 2 ⎥ ⎢ ⎥ ⎢-√2 √2⎥ ⎢──── ──⎥ ⎣ 2 2 ⎦ >>> pprint(wigner_d_small(2*half, beta).subs({beta:pi/2}), ... use_unicode=True) ⎡ √2 ⎤ ⎢1/2 ── 1/2⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢-√2 √2 ⎥ ⎢──── 0 ── ⎥ ⎢ 2 2 ⎥ ⎢ ⎥ ⎢ -√2 ⎥ ⎢1/2 ──── 1/2⎥ ⎣ 2 ⎦ >>> pprint(wigner_d_small(3*half, beta).subs({beta:pi/2}), ... use_unicode=True) ⎡ √2 √6 √6 √2⎤ ⎢ ── ── ── ──⎥ ⎢ 4 4 4 4 ⎥ ⎢ ⎥ ⎢-√6 -√2 √2 √6⎥ ⎢──── ──── ── ──⎥ ⎢ 4 4 4 4 ⎥ ⎢ ⎥ ⎢ √6 -√2 -√2 √6⎥ ⎢ ── ──── ──── ──⎥ ⎢ 4 4 4 4 ⎥ ⎢ ⎥ ⎢-√2 √6 -√6 √2⎥ ⎢──── ── ──── ──⎥ ⎣ 4 4 4 4 ⎦ >>> pprint(wigner_d_small(4*half, beta).subs({beta:pi/2}), ... use_unicode=True) ⎡ √6 ⎤ ⎢1/4 1/2 ── 1/2 1/4⎥ ⎢ 4 ⎥ ⎢ ⎥ ⎢-1/2 -1/2 0 1/2 1/2⎥ ⎢ ⎥ ⎢ √6 √6 ⎥ ⎢ ── 0 -1/2 0 ── ⎥ ⎢ 4 4 ⎥ ⎢ ⎥ ⎢-1/2 1/2 0 -1/2 1/2⎥ ⎢ ⎥ ⎢ √6 ⎥ ⎢1/4 -1/2 ── -1/2 1/4⎥ ⎣ 4 ⎦ """ M = [J-i for i in range(2*J+1)] d = zeros(2*J+1) for i, Mi in enumerate(M): for j, Mj in enumerate(M): # We get the maximum and minimum value of sigma. sigmamax = max([-Mi-Mj, J-Mj]) sigmamin = min([0, J-Mi]) dij = sqrt(factorial(J+Mi)*factorial(J-Mi) / factorial(J+Mj)/factorial(J-Mj)) terms = [(-1)**(J-Mi-s) * binomial(J+Mj, J-Mi-s) * binomial(J-Mj, s) * cos(beta/2)**(2*s+Mi+Mj) * sin(beta/2)**(2*J-2*s-Mj-Mi) for s in range(sigmamin, sigmamax+1)] d[i, j] = dij*Add(*terms) return ImmutableMatrix(d) def wigner_d(J, alpha, beta, gamma): """Return the Wigner D matrix for angular momentum J. Explanation =========== J : An integer, half-integer, or sympy symbol for the total angular momentum of the angular momentum space being rotated. alpha, beta, gamma - Real numbers representing the Euler. Angles of rotation about the so-called vertical, line of nodes, and figure axes. See [Edmonds74]_. Returns ======= A matrix representing the corresponding Euler angle rotation( in the basis of eigenvectors of `J_z`). .. math :: \\mathcal{D}_{\\alpha \\beta \\gamma} = \\exp\\big( \\frac{i\\alpha}{\\hbar} J_z\\big) \\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big) \\exp\\big( \\frac{i\\gamma}{\\hbar} J_z\\big) The components are calculated using the general form [Edmonds74]_, equation 4.1.12. Examples ======== The simplest possible example: >>> from sympy.physics.wigner import wigner_d >>> from sympy import Integer, symbols, pprint >>> half = 1/Integer(2) >>> alpha, beta, gamma = symbols("alpha, beta, gamma", real=True) >>> pprint(wigner_d(half, alpha, beta, gamma), use_unicode=True) ⎡ ⅈ⋅α ⅈ⋅γ ⅈ⋅α -ⅈ⋅γ ⎤ ⎢ ─── ─── ─── ───── ⎥ ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞ ⎥ ⎢ ℯ ⋅ℯ ⋅cos⎜─⎟ ℯ ⋅ℯ ⋅sin⎜─⎟ ⎥ ⎢ ⎝2⎠ ⎝2⎠ ⎥ ⎢ ⎥ ⎢ -ⅈ⋅α ⅈ⋅γ -ⅈ⋅α -ⅈ⋅γ ⎥ ⎢ ───── ─── ───── ───── ⎥ ⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞⎥ ⎢-ℯ ⋅ℯ ⋅sin⎜─⎟ ℯ ⋅ℯ ⋅cos⎜─⎟⎥ ⎣ ⎝2⎠ ⎝2⎠⎦ """ d = wigner_d_small(J, beta) M = [J-i for i in range(2*J+1)] D = [[exp(I*Mi*alpha)*d[i, j]*exp(I*Mj*gamma) for j, Mj in enumerate(M)] for i, Mi in enumerate(M)] return ImmutableMatrix(D)
e0223631b5be3e0aa37f47cb73f4703f2dfc5f498dce0bb67a1d9d6f81b83056
""" This module implements Pauli algebra by subclassing Symbol. Only algebraic properties of Pauli matrices are used (we don't use the Matrix class). See the documentation to the class Pauli for examples. References ========== .. [1] https://en.wikipedia.org/wiki/Pauli_matrices """ from sympy import Symbol, I, Mul, Pow, Add from sympy.physics.quantum import TensorProduct __all__ = ['evaluate_pauli_product'] def delta(i, j): """ Returns 1 if ``i == j``, else 0. This is used in the multiplication of Pauli matrices. Examples ======== >>> from sympy.physics.paulialgebra import delta >>> delta(1, 1) 1 >>> delta(2, 3) 0 """ if i == j: return 1 else: return 0 def epsilon(i, j, k): """ Return 1 if i,j,k is equal to (1,2,3), (2,3,1), or (3,1,2); -1 if ``i``,``j``,``k`` is equal to (1,3,2), (3,2,1), or (2,1,3); else return 0. This is used in the multiplication of Pauli matrices. Examples ======== >>> from sympy.physics.paulialgebra import epsilon >>> epsilon(1, 2, 3) 1 >>> epsilon(1, 3, 2) -1 """ if (i, j, k) in [(1, 2, 3), (2, 3, 1), (3, 1, 2)]: return 1 elif (i, j, k) in [(1, 3, 2), (3, 2, 1), (2, 1, 3)]: return -1 else: return 0 class Pauli(Symbol): """ The class representing algebraic properties of Pauli matrices. Explanation =========== The symbol used to display the Pauli matrices can be changed with an optional parameter ``label="sigma"``. Pauli matrices with different ``label`` attributes cannot multiply together. If the left multiplication of symbol or number with Pauli matrix is needed, please use parentheses to separate Pauli and symbolic multiplication (for example: 2*I*(Pauli(3)*Pauli(2))). Another variant is to use evaluate_pauli_product function to evaluate the product of Pauli matrices and other symbols (with commutative multiply rules). See Also ======== evaluate_pauli_product Examples ======== >>> from sympy.physics.paulialgebra import Pauli >>> Pauli(1) sigma1 >>> Pauli(1)*Pauli(2) I*sigma3 >>> Pauli(1)*Pauli(1) 1 >>> Pauli(3)**4 1 >>> Pauli(1)*Pauli(2)*Pauli(3) I >>> from sympy.physics.paulialgebra import Pauli >>> Pauli(1, label="tau") tau1 >>> Pauli(1)*Pauli(2, label="tau") sigma1*tau2 >>> Pauli(1, label="tau")*Pauli(2, label="tau") I*tau3 >>> from sympy import I >>> I*(Pauli(2)*Pauli(3)) -sigma1 >>> from sympy.physics.paulialgebra import evaluate_pauli_product >>> f = I*Pauli(2)*Pauli(3) >>> f I*sigma2*sigma3 >>> evaluate_pauli_product(f) -sigma1 """ __slots__ = ("i", "label") def __new__(cls, i, label="sigma"): if not i in [1, 2, 3]: raise IndexError("Invalid Pauli index") obj = Symbol.__new__(cls, "%s%d" %(label,i), commutative=False, hermitian=True) obj.i = i obj.label = label return obj def __getnewargs__(self): return (self.i,self.label,) # FIXME don't work for -I*Pauli(2)*Pauli(3) def __mul__(self, other): if isinstance(other, Pauli): j = self.i k = other.i jlab = self.label klab = other.label if jlab == klab: return delta(j, k) \ + I*epsilon(j, k, 1)*Pauli(1,jlab) \ + I*epsilon(j, k, 2)*Pauli(2,jlab) \ + I*epsilon(j, k, 3)*Pauli(3,jlab) return super().__mul__(other) def _eval_power(b, e): if e.is_Integer and e.is_positive: return super().__pow__(int(e) % 2) def evaluate_pauli_product(arg): '''Help function to evaluate Pauli matrices product with symbolic objects. Parameters ========== arg: symbolic expression that contains Paulimatrices Examples ======== >>> from sympy.physics.paulialgebra import Pauli, evaluate_pauli_product >>> from sympy import I >>> evaluate_pauli_product(I*Pauli(1)*Pauli(2)) -sigma3 >>> from sympy.abc import x >>> evaluate_pauli_product(x**2*Pauli(2)*Pauli(1)) -I*x**2*sigma3 ''' start = arg end = arg if isinstance(arg, Pow) and isinstance(arg.args[0], Pauli): if arg.args[1].is_odd: return arg.args[0] else: return 1 if isinstance(arg, Add): return Add(*[evaluate_pauli_product(part) for part in arg.args]) if isinstance(arg, TensorProduct): return TensorProduct(*[evaluate_pauli_product(part) for part in arg.args]) elif not(isinstance(arg, Mul)): return arg while ((not(start == end)) | ((start == arg) & (end == arg))): start = end tmp = start.as_coeff_mul() sigma_product = 1 com_product = 1 keeper = 1 for el in tmp[1]: if isinstance(el, Pauli): sigma_product *= el elif not(el.is_commutative): if isinstance(el, Pow) and isinstance(el.args[0], Pauli): if el.args[1].is_odd: sigma_product *= el.args[0] elif isinstance(el, TensorProduct): keeper = keeper*sigma_product*\ TensorProduct( *[evaluate_pauli_product(part) for part in el.args] ) sigma_product = 1 else: keeper = keeper*sigma_product*el sigma_product = 1 else: com_product *= el end = (tmp[0]*keeper*sigma_product*com_product) if end == arg: break return end
f3742654b1bf99b0a501edc851c9c9622796b08fbb45603bc4da282663a3ca14
from sympy.core import S, pi, Rational from sympy.functions import assoc_laguerre, sqrt, exp, factorial, factorial2 def R_nl(n, l, nu, r): """ Returns the radial wavefunction R_{nl} for a 3d isotropic harmonic oscillator. Parameters ========== ``n`` : The "nodal" quantum number. Corresponds to the number of nodes in the wavefunction. ``n >= 0`` ``l`` : The quantum number for orbital angular momentum. ``nu`` : mass-scaled frequency: nu = m*omega/(2*hbar) where `m` is the mass and `omega` the frequency of the oscillator. (in atomic units ``nu == omega/2``) ``r`` : Radial coordinate. Examples ======== >>> from sympy.physics.sho import R_nl >>> from sympy.abc import r, nu, l >>> R_nl(0, 0, 1, r) 2*2**(3/4)*exp(-r**2)/pi**(1/4) >>> R_nl(1, 0, 1, r) 4*2**(1/4)*sqrt(3)*(3/2 - 2*r**2)*exp(-r**2)/(3*pi**(1/4)) l, nu and r may be symbolic: >>> R_nl(0, 0, nu, r) 2*2**(3/4)*sqrt(nu**(3/2))*exp(-nu*r**2)/pi**(1/4) >>> R_nl(0, l, 1, r) r**l*sqrt(2**(l + 3/2)*2**(l + 2)/factorial2(2*l + 1))*exp(-r**2)/pi**(1/4) The normalization of the radial wavefunction is: >>> from sympy import Integral, oo >>> Integral(R_nl(0, 0, 1, r)**2*r**2, (r, 0, oo)).n() 1.00000000000000 >>> Integral(R_nl(1, 0, 1, r)**2*r**2, (r, 0, oo)).n() 1.00000000000000 >>> Integral(R_nl(1, 1, 1, r)**2*r**2, (r, 0, oo)).n() 1.00000000000000 """ n, l, nu, r = map(S, [n, l, nu, r]) # formula uses n >= 1 (instead of nodal n >= 0) n = n + 1 C = sqrt( ((2*nu)**(l + Rational(3, 2))*2**(n + l + 1)*factorial(n - 1))/ (sqrt(pi)*(factorial2(2*n + 2*l - 1))) ) return C*r**(l)*exp(-nu*r**2)*assoc_laguerre(n - 1, l + S.Half, 2*nu*r**2) def E_nl(n, l, hw): """ Returns the Energy of an isotropic harmonic oscillator. Parameters ========== ``n`` : The "nodal" quantum number. ``l`` : The orbital angular momentum. ``hw`` : The harmonic oscillator parameter. Notes ===== The unit of the returned value matches the unit of hw, since the energy is calculated as: E_nl = (2*n + l + 3/2)*hw Examples ======== >>> from sympy.physics.sho import E_nl >>> from sympy import symbols >>> x, y, z = symbols('x, y, z') >>> E_nl(x, y, z) z*(2*x + y + 3/2) """ return (2*n + l + Rational(3, 2))*hw
c3596d3b160bf46a464d8cbb872993a02546ebe9ef9bc925b65ad0d5455eb33b
from sympy.core import S, pi, Rational from sympy.functions import hermite, sqrt, exp, factorial, Abs from sympy.physics.quantum.constants import hbar def psi_n(n, x, m, omega): """ Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator. Parameters ========== ``n`` : the "nodal" quantum number. Corresponds to the number of nodes in the wavefunction. ``n >= 0`` ``x`` : x coordinate. ``m`` : Mass of the particle. ``omega`` : Angular frequency of the oscillator. Examples ======== >>> from sympy.physics.qho_1d import psi_n >>> from sympy.abc import m, x, omega >>> psi_n(0, x, m, omega) (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4)) """ # sympify arguments n, x, m, omega = map(S, [n, x, m, omega]) nu = m * omega / hbar # normalization coefficient C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n))) return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x) def E_n(n, omega): """ Returns the Energy of the One-dimensional harmonic oscillator. Parameters ========== ``n`` : The "nodal" quantum number. ``omega`` : The harmonic oscillator angular frequency. Notes ===== The unit of the returned value matches the unit of hw, since the energy is calculated as: E_n = hbar * omega*(n + 1/2) Examples ======== >>> from sympy.physics.qho_1d import E_n >>> from sympy.abc import x, omega >>> E_n(x, omega) hbar*omega*(x + 1/2) """ return hbar * omega * (n + S.Half) def coherent_state(n, alpha): """ Returns <n|alpha> for the coherent states of 1D harmonic oscillator. See https://en.wikipedia.org/wiki/Coherent_states Parameters ========== ``n`` : The "nodal" quantum number. ``alpha`` : The eigen value of annihilation operator. """ return exp(- Abs(alpha)**2/2)*(alpha**n)/sqrt(factorial(n))
6cd63b7b3dfe899c93de5d0ac751c2a12eed7a30b9cf25f41a51af85e5dfaf4a
from sympy import factorial, sqrt, exp, S, assoc_laguerre, Float from sympy.functions.special.spherical_harmonics import Ynm def R_nl(n, l, r, Z=1): """ Returns the Hydrogen radial wavefunction R_{nl}. Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... l : integer ``l`` is the Angular Momentum Quantum Number with values ranging from 0 to ``n-1``. r : Radial coordinate. Z : Atomic number (1 for Hydrogen, 2 for Helium, ...) Everything is in Hartree atomic units. Examples ======== >>> from sympy.physics.hydrogen import R_nl >>> from sympy.abc import r, Z >>> R_nl(1, 0, r, Z) 2*sqrt(Z**3)*exp(-Z*r) >>> R_nl(2, 0, r, Z) sqrt(2)*(-Z*r + 2)*sqrt(Z**3)*exp(-Z*r/2)/4 >>> R_nl(2, 1, r, Z) sqrt(6)*Z*r*sqrt(Z**3)*exp(-Z*r/2)/12 For Hydrogen atom, you can just use the default value of Z=1: >>> R_nl(1, 0, r) 2*exp(-r) >>> R_nl(2, 0, r) sqrt(2)*(2 - r)*exp(-r/2)/4 >>> R_nl(3, 0, r) 2*sqrt(3)*(2*r**2/9 - 2*r + 3)*exp(-r/3)/27 For Silver atom, you would use Z=47: >>> R_nl(1, 0, r, Z=47) 94*sqrt(47)*exp(-47*r) >>> R_nl(2, 0, r, Z=47) 47*sqrt(94)*(2 - 47*r)*exp(-47*r/2)/4 >>> R_nl(3, 0, r, Z=47) 94*sqrt(141)*(4418*r**2/9 - 94*r + 3)*exp(-47*r/3)/27 The normalization of the radial wavefunction is: >>> from sympy import integrate, oo >>> integrate(R_nl(1, 0, r)**2 * r**2, (r, 0, oo)) 1 >>> integrate(R_nl(2, 0, r)**2 * r**2, (r, 0, oo)) 1 >>> integrate(R_nl(2, 1, r)**2 * r**2, (r, 0, oo)) 1 It holds for any atomic number: >>> integrate(R_nl(1, 0, r, Z=2)**2 * r**2, (r, 0, oo)) 1 >>> integrate(R_nl(2, 0, r, Z=3)**2 * r**2, (r, 0, oo)) 1 >>> integrate(R_nl(2, 1, r, Z=4)**2 * r**2, (r, 0, oo)) 1 """ # sympify arguments n, l, r, Z = map(S, [n, l, r, Z]) # radial quantum number n_r = n - l - 1 # rescaled "r" a = 1/Z # Bohr radius r0 = 2 * r / (n * a) # normalization coefficient C = sqrt((S(2)/(n*a))**3 * factorial(n_r) / (2*n*factorial(n + l))) # This is an equivalent normalization coefficient, that can be found in # some books. Both coefficients seem to be the same fast: # C = S(2)/n**2 * sqrt(1/a**3 * factorial(n_r) / (factorial(n+l))) return C * r0**l * assoc_laguerre(n_r, 2*l + 1, r0).expand() * exp(-r0/2) def Psi_nlm(n, l, m, r, phi, theta, Z=1): """ Returns the Hydrogen wave function psi_{nlm}. It's the product of the radial wavefunction R_{nl} and the spherical harmonic Y_{l}^{m}. Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... l : integer ``l`` is the Angular Momentum Quantum Number with values ranging from 0 to ``n-1``. m : integer ``m`` is the Magnetic Quantum Number with values ranging from ``-l`` to ``l``. r : radial coordinate phi : azimuthal angle theta : polar angle Z : atomic number (1 for Hydrogen, 2 for Helium, ...) Everything is in Hartree atomic units. Examples ======== >>> from sympy.physics.hydrogen import Psi_nlm >>> from sympy import Symbol >>> r=Symbol("r", real=True, positive=True) >>> phi=Symbol("phi", real=True) >>> theta=Symbol("theta", real=True) >>> Z=Symbol("Z", positive=True, integer=True, nonzero=True) >>> Psi_nlm(1,0,0,r,phi,theta,Z) Z**(3/2)*exp(-Z*r)/sqrt(pi) >>> Psi_nlm(2,1,1,r,phi,theta,Z) -Z**(5/2)*r*exp(I*phi)*exp(-Z*r/2)*sin(theta)/(8*sqrt(pi)) Integrating the absolute square of a hydrogen wavefunction psi_{nlm} over the whole space leads 1. The normalization of the hydrogen wavefunctions Psi_nlm is: >>> from sympy import integrate, conjugate, pi, oo, sin >>> wf=Psi_nlm(2,1,1,r,phi,theta,Z) >>> abs_sqrd=wf*conjugate(wf) >>> jacobi=r**2*sin(theta) >>> integrate(abs_sqrd*jacobi, (r,0,oo), (phi,0,2*pi), (theta,0,pi)) 1 """ # sympify arguments n, l, m, r, phi, theta, Z = map(S, [n, l, m, r, phi, theta, Z]) # check if values for n,l,m make physically sense if n.is_integer and n < 1: raise ValueError("'n' must be positive integer") if l.is_integer and not (n > l): raise ValueError("'n' must be greater than 'l'") if m.is_integer and not (abs(m) <= l): raise ValueError("|'m'| must be less or equal 'l'") # return the hydrogen wave function return R_nl(n, l, r, Z)*Ynm(l, m, theta, phi).expand(func=True) def E_nl(n, Z=1): """ Returns the energy of the state (n, l) in Hartree atomic units. The energy doesn't depend on "l". Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... Z : Atomic number (1 for Hydrogen, 2 for Helium, ...) Examples ======== >>> from sympy.physics.hydrogen import E_nl >>> from sympy.abc import n, Z >>> E_nl(n, Z) -Z**2/(2*n**2) >>> E_nl(1) -1/2 >>> E_nl(2) -1/8 >>> E_nl(3) -1/18 >>> E_nl(3, 47) -2209/18 """ n, Z = S(n), S(Z) if n.is_integer and (n < 1): raise ValueError("'n' must be positive integer") return -Z**2/(2*n**2) def E_nl_dirac(n, l, spin_up=True, Z=1, c=Float("137.035999037")): """ Returns the relativistic energy of the state (n, l, spin) in Hartree atomic units. The energy is calculated from the Dirac equation. The rest mass energy is *not* included. Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... l : integer ``l`` is the Angular Momentum Quantum Number with values ranging from 0 to ``n-1``. spin_up : True if the electron spin is up (default), otherwise down Z : Atomic number (1 for Hydrogen, 2 for Helium, ...) c : Speed of light in atomic units. Default value is 137.035999037, taken from http://arxiv.org/abs/1012.3627 Examples ======== >>> from sympy.physics.hydrogen import E_nl_dirac >>> E_nl_dirac(1, 0) -0.500006656595360 >>> E_nl_dirac(2, 0) -0.125002080189006 >>> E_nl_dirac(2, 1) -0.125000416028342 >>> E_nl_dirac(2, 1, False) -0.125002080189006 >>> E_nl_dirac(3, 0) -0.0555562951740285 >>> E_nl_dirac(3, 1) -0.0555558020932949 >>> E_nl_dirac(3, 1, False) -0.0555562951740285 >>> E_nl_dirac(3, 2) -0.0555556377366884 >>> E_nl_dirac(3, 2, False) -0.0555558020932949 """ n, l, Z, c = map(S, [n, l, Z, c]) if not (l >= 0): raise ValueError("'l' must be positive or zero") if not (n > l): raise ValueError("'n' must be greater than 'l'") if (l == 0 and spin_up is False): raise ValueError("Spin must be up for l==0.") # skappa is sign*kappa, where sign contains the correct sign if spin_up: skappa = -l - 1 else: skappa = -l beta = sqrt(skappa**2 - Z**2/c**2) return c**2/sqrt(1 + Z**2/(n + skappa + beta)**2/c**2) - c**2
cc4b2e755677f0d6ae1cde1031fc8f93d60ce005d1661748c8ba599f36878f4c
"""Known matrices related to physics""" from sympy import Matrix, I, pi, sqrt from sympy.functions import exp def msigma(i): r"""Returns a Pauli matrix `\sigma_i` with ``i=1,2,3``. References ========== .. [1] https://en.wikipedia.org/wiki/Pauli_matrices Examples ======== >>> from sympy.physics.matrices import msigma >>> msigma(1) Matrix([ [0, 1], [1, 0]]) """ if i == 1: mat = ( ( (0, 1), (1, 0) ) ) elif i == 2: mat = ( ( (0, -I), (I, 0) ) ) elif i == 3: mat = ( ( (1, 0), (0, -1) ) ) else: raise IndexError("Invalid Pauli index") return Matrix(mat) def pat_matrix(m, dx, dy, dz): """Returns the Parallel Axis Theorem matrix to translate the inertia matrix a distance of `(dx, dy, dz)` for a body of mass m. Examples ======== To translate a body having a mass of 2 units a distance of 1 unit along the `x`-axis we get: >>> from sympy.physics.matrices import pat_matrix >>> pat_matrix(2, 1, 0, 0) Matrix([ [0, 0, 0], [0, 2, 0], [0, 0, 2]]) """ dxdy = -dx*dy dydz = -dy*dz dzdx = -dz*dx dxdx = dx**2 dydy = dy**2 dzdz = dz**2 mat = ((dydy + dzdz, dxdy, dzdx), (dxdy, dxdx + dzdz, dydz), (dzdx, dydz, dydy + dxdx)) return m*Matrix(mat) def mgamma(mu, lower=False): r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard (Dirac) representation. Explanation =========== If you want `\gamma_\mu`, use ``gamma(mu, True)``. We use a convention: `\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3` `\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5` References ========== .. [1] https://en.wikipedia.org/wiki/Gamma_matrices Examples ======== >>> from sympy.physics.matrices import mgamma >>> mgamma(1) Matrix([ [ 0, 0, 0, 1], [ 0, 0, 1, 0], [ 0, -1, 0, 0], [-1, 0, 0, 0]]) """ if not mu in [0, 1, 2, 3, 5]: raise IndexError("Invalid Dirac index") if mu == 0: mat = ( (1, 0, 0, 0), (0, 1, 0, 0), (0, 0, -1, 0), (0, 0, 0, -1) ) elif mu == 1: mat = ( (0, 0, 0, 1), (0, 0, 1, 0), (0, -1, 0, 0), (-1, 0, 0, 0) ) elif mu == 2: mat = ( (0, 0, 0, -I), (0, 0, I, 0), (0, I, 0, 0), (-I, 0, 0, 0) ) elif mu == 3: mat = ( (0, 0, 1, 0), (0, 0, 0, -1), (-1, 0, 0, 0), (0, 1, 0, 0) ) elif mu == 5: mat = ( (0, 0, 1, 0), (0, 0, 0, 1), (1, 0, 0, 0), (0, 1, 0, 0) ) m = Matrix(mat) if lower: if mu in [1, 2, 3, 5]: m = -m return m #Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field #Theory minkowski_tensor = Matrix( ( (1, 0, 0, 0), (0, -1, 0, 0), (0, 0, -1, 0), (0, 0, 0, -1) )) def mdft(n): r""" Returns an expression of a discrete Fourier transform as a matrix multiplication. It is an n X n matrix. References ========== .. [1] https://en.wikipedia.org/wiki/DFT_matrix Examples ======== >>> from sympy.physics.matrices import mdft >>> mdft(3) Matrix([ [sqrt(3)/3, sqrt(3)/3, sqrt(3)/3], [sqrt(3)/3, sqrt(3)*exp(-2*I*pi/3)/3, sqrt(3)*exp(2*I*pi/3)/3], [sqrt(3)/3, sqrt(3)*exp(2*I*pi/3)/3, sqrt(3)*exp(-2*I*pi/3)/3]]) """ mat = [[None for x in range(n)] for y in range(n)] base = exp(-2*pi*I/n) mat[0] = [1]*n for i in range(n): mat[i][0] = 1 for i in range(1, n): for j in range(i, n): mat[i][j] = mat[j][i] = base**(i*j) return (1/sqrt(n))*Matrix(mat)
fdd3d1c0c0c9420e513669abb956ef86a6fa85ed550d388e199e3b8286b64957
""" Second quantization operators and states for bosons. This follow the formulation of Fetter and Welecka, "Quantum Theory of Many-Particle Systems." """ from collections import defaultdict from sympy import (Add, Basic, cacheit, Dummy, Expr, Function, I, KroneckerDelta, Mul, Pow, S, sqrt, Symbol, sympify, Tuple, zeros) from sympy.printing.str import StrPrinter from sympy.utilities.iterables import has_dups from sympy.utilities import default_sort_key __all__ = [ 'Dagger', 'KroneckerDelta', 'BosonicOperator', 'AnnihilateBoson', 'CreateBoson', 'AnnihilateFermion', 'CreateFermion', 'FockState', 'FockStateBra', 'FockStateKet', 'FockStateBosonKet', 'FockStateBosonBra', 'FockStateFermionKet', 'FockStateFermionBra', 'BBra', 'BKet', 'FBra', 'FKet', 'F', 'Fd', 'B', 'Bd', 'apply_operators', 'InnerProduct', 'BosonicBasis', 'VarBosonicBasis', 'FixedBosonicBasis', 'Commutator', 'matrix_rep', 'contraction', 'wicks', 'NO', 'evaluate_deltas', 'AntiSymmetricTensor', 'substitute_dummies', 'PermutationOperator', 'simplify_index_permutations', ] class SecondQuantizationError(Exception): pass class AppliesOnlyToSymbolicIndex(SecondQuantizationError): pass class ContractionAppliesOnlyToFermions(SecondQuantizationError): pass class ViolationOfPauliPrinciple(SecondQuantizationError): pass class SubstitutionOfAmbigousOperatorFailed(SecondQuantizationError): pass class WicksTheoremDoesNotApply(SecondQuantizationError): pass class Dagger(Expr): """ Hermitian conjugate of creation/annihilation operators. Examples ======== >>> from sympy import I >>> from sympy.physics.secondquant import Dagger, B, Bd >>> Dagger(2*I) -2*I >>> Dagger(B(0)) CreateBoson(0) >>> Dagger(Bd(0)) AnnihilateBoson(0) """ def __new__(cls, arg): arg = sympify(arg) r = cls.eval(arg) if isinstance(r, Basic): return r obj = Basic.__new__(cls, arg) return obj @classmethod def eval(cls, arg): """ Evaluates the Dagger instance. Examples ======== >>> from sympy import I >>> from sympy.physics.secondquant import Dagger, B, Bd >>> Dagger(2*I) -2*I >>> Dagger(B(0)) CreateBoson(0) >>> Dagger(Bd(0)) AnnihilateBoson(0) The eval() method is called automatically. """ dagger = getattr(arg, '_dagger_', None) if dagger is not None: return dagger() if isinstance(arg, Basic): if arg.is_Add: return Add(*tuple(map(Dagger, arg.args))) if arg.is_Mul: return Mul(*tuple(map(Dagger, reversed(arg.args)))) if arg.is_Number: return arg if arg.is_Pow: return Pow(Dagger(arg.args[0]), arg.args[1]) if arg == I: return -arg else: return None def _dagger_(self): return self.args[0] class TensorSymbol(Expr): is_commutative = True class AntiSymmetricTensor(TensorSymbol): """Stores upper and lower indices in separate Tuple's. Each group of indices is assumed to be antisymmetric. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import AntiSymmetricTensor >>> i, j = symbols('i j', below_fermi=True) >>> a, b = symbols('a b', above_fermi=True) >>> AntiSymmetricTensor('v', (a, i), (b, j)) AntiSymmetricTensor(v, (a, i), (b, j)) >>> AntiSymmetricTensor('v', (i, a), (b, j)) -AntiSymmetricTensor(v, (a, i), (b, j)) As you can see, the indices are automatically sorted to a canonical form. """ def __new__(cls, symbol, upper, lower): try: upper, signu = _sort_anticommuting_fermions( upper, key=cls._sortkey) lower, signl = _sort_anticommuting_fermions( lower, key=cls._sortkey) except ViolationOfPauliPrinciple: return S.Zero symbol = sympify(symbol) upper = Tuple(*upper) lower = Tuple(*lower) if (signu + signl) % 2: return -TensorSymbol.__new__(cls, symbol, upper, lower) else: return TensorSymbol.__new__(cls, symbol, upper, lower) @classmethod def _sortkey(cls, index): """Key for sorting of indices. particle < hole < general FIXME: This is a bottle-neck, can we do it faster? """ h = hash(index) label = str(index) if isinstance(index, Dummy): if index.assumptions0.get('above_fermi'): return (20, label, h) elif index.assumptions0.get('below_fermi'): return (21, label, h) else: return (22, label, h) if index.assumptions0.get('above_fermi'): return (10, label, h) elif index.assumptions0.get('below_fermi'): return (11, label, h) else: return (12, label, h) def _latex(self, printer): return "%s^{%s}_{%s}" % ( self.symbol, "".join([ i.name for i in self.args[1]]), "".join([ i.name for i in self.args[2]]) ) @property def symbol(self): """ Returns the symbol of the tensor. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import AntiSymmetricTensor >>> i, j = symbols('i,j', below_fermi=True) >>> a, b = symbols('a,b', above_fermi=True) >>> AntiSymmetricTensor('v', (a, i), (b, j)) AntiSymmetricTensor(v, (a, i), (b, j)) >>> AntiSymmetricTensor('v', (a, i), (b, j)).symbol v """ return self.args[0] @property def upper(self): """ Returns the upper indices. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import AntiSymmetricTensor >>> i, j = symbols('i,j', below_fermi=True) >>> a, b = symbols('a,b', above_fermi=True) >>> AntiSymmetricTensor('v', (a, i), (b, j)) AntiSymmetricTensor(v, (a, i), (b, j)) >>> AntiSymmetricTensor('v', (a, i), (b, j)).upper (a, i) """ return self.args[1] @property def lower(self): """ Returns the lower indices. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import AntiSymmetricTensor >>> i, j = symbols('i,j', below_fermi=True) >>> a, b = symbols('a,b', above_fermi=True) >>> AntiSymmetricTensor('v', (a, i), (b, j)) AntiSymmetricTensor(v, (a, i), (b, j)) >>> AntiSymmetricTensor('v', (a, i), (b, j)).lower (b, j) """ return self.args[2] def __str__(self): return "%s(%s,%s)" % self.args def doit(self, **kw_args): """ Returns self. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import AntiSymmetricTensor >>> i, j = symbols('i,j', below_fermi=True) >>> a, b = symbols('a,b', above_fermi=True) >>> AntiSymmetricTensor('v', (a, i), (b, j)).doit() AntiSymmetricTensor(v, (a, i), (b, j)) """ return self class SqOperator(Expr): """ Base class for Second Quantization operators. """ op_symbol = 'sq' is_commutative = False def __new__(cls, k): obj = Basic.__new__(cls, sympify(k)) return obj @property def state(self): """ Returns the state index related to this operator. Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F, Fd, B, Bd >>> p = Symbol('p') >>> F(p).state p >>> Fd(p).state p >>> B(p).state p >>> Bd(p).state p """ return self.args[0] @property def is_symbolic(self): """ Returns True if the state is a symbol (as opposed to a number). Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> p = Symbol('p') >>> F(p).is_symbolic True >>> F(1).is_symbolic False """ if self.state.is_Integer: return False else: return True def doit(self, **kw_args): """ FIXME: hack to prevent crash further up... """ return self def __repr__(self): return NotImplemented def __str__(self): return "%s(%r)" % (self.op_symbol, self.state) def apply_operator(self, state): """ Applies an operator to itself. """ raise NotImplementedError('implement apply_operator in a subclass') class BosonicOperator(SqOperator): pass class Annihilator(SqOperator): pass class Creator(SqOperator): pass class AnnihilateBoson(BosonicOperator, Annihilator): """ Bosonic annihilation operator. Examples ======== >>> from sympy.physics.secondquant import B >>> from sympy.abc import x >>> B(x) AnnihilateBoson(x) """ op_symbol = 'b' def _dagger_(self): return CreateBoson(self.state) def apply_operator(self, state): """ Apply state to self if self is not symbolic and state is a FockStateKet, else multiply self by state. Examples ======== >>> from sympy.physics.secondquant import B, BKet >>> from sympy.abc import x, y, n >>> B(x).apply_operator(y) y*AnnihilateBoson(x) >>> B(0).apply_operator(BKet((n,))) sqrt(n)*FockStateBosonKet((n - 1,)) """ if not self.is_symbolic and isinstance(state, FockStateKet): element = self.state amp = sqrt(state[element]) return amp*state.down(element) else: return Mul(self, state) def __repr__(self): return "AnnihilateBoson(%s)" % self.state def _latex(self, printer): return "b_{%s}" % self.state.name class CreateBoson(BosonicOperator, Creator): """ Bosonic creation operator. """ op_symbol = 'b+' def _dagger_(self): return AnnihilateBoson(self.state) def apply_operator(self, state): """ Apply state to self if self is not symbolic and state is a FockStateKet, else multiply self by state. Examples ======== >>> from sympy.physics.secondquant import B, Dagger, BKet >>> from sympy.abc import x, y, n >>> Dagger(B(x)).apply_operator(y) y*CreateBoson(x) >>> B(0).apply_operator(BKet((n,))) sqrt(n)*FockStateBosonKet((n - 1,)) """ if not self.is_symbolic and isinstance(state, FockStateKet): element = self.state amp = sqrt(state[element] + 1) return amp*state.up(element) else: return Mul(self, state) def __repr__(self): return "CreateBoson(%s)" % self.state def _latex(self, printer): return "b^\\dagger_{%s}" % self.state.name B = AnnihilateBoson Bd = CreateBoson class FermionicOperator(SqOperator): @property def is_restricted(self): """ Is this FermionicOperator restricted with respect to fermi level? Returns ======= 1 : restricted to orbits above fermi 0 : no restriction -1 : restricted to orbits below fermi Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F, Fd >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_restricted 1 >>> Fd(a).is_restricted 1 >>> F(i).is_restricted -1 >>> Fd(i).is_restricted -1 >>> F(p).is_restricted 0 >>> Fd(p).is_restricted 0 """ ass = self.args[0].assumptions0 if ass.get("below_fermi"): return -1 if ass.get("above_fermi"): return 1 return 0 @property def is_above_fermi(self): """ Does the index of this FermionicOperator allow values above fermi? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_above_fermi True >>> F(i).is_above_fermi False >>> F(p).is_above_fermi True Note ==== The same applies to creation operators Fd """ return not self.args[0].assumptions0.get("below_fermi") @property def is_below_fermi(self): """ Does the index of this FermionicOperator allow values below fermi? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_below_fermi False >>> F(i).is_below_fermi True >>> F(p).is_below_fermi True The same applies to creation operators Fd """ return not self.args[0].assumptions0.get("above_fermi") @property def is_only_below_fermi(self): """ Is the index of this FermionicOperator restricted to values below fermi? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_only_below_fermi False >>> F(i).is_only_below_fermi True >>> F(p).is_only_below_fermi False The same applies to creation operators Fd """ return self.is_below_fermi and not self.is_above_fermi @property def is_only_above_fermi(self): """ Is the index of this FermionicOperator restricted to values above fermi? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_only_above_fermi True >>> F(i).is_only_above_fermi False >>> F(p).is_only_above_fermi False The same applies to creation operators Fd """ return self.is_above_fermi and not self.is_below_fermi def _sortkey(self): h = hash(self) label = str(self.args[0]) if self.is_only_q_creator: return 1, label, h if self.is_only_q_annihilator: return 4, label, h if isinstance(self, Annihilator): return 3, label, h if isinstance(self, Creator): return 2, label, h class AnnihilateFermion(FermionicOperator, Annihilator): """ Fermionic annihilation operator. """ op_symbol = 'f' def _dagger_(self): return CreateFermion(self.state) def apply_operator(self, state): """ Apply state to self if self is not symbolic and state is a FockStateKet, else multiply self by state. Examples ======== >>> from sympy.physics.secondquant import B, Dagger, BKet >>> from sympy.abc import x, y, n >>> Dagger(B(x)).apply_operator(y) y*CreateBoson(x) >>> B(0).apply_operator(BKet((n,))) sqrt(n)*FockStateBosonKet((n - 1,)) """ if isinstance(state, FockStateFermionKet): element = self.state return state.down(element) elif isinstance(state, Mul): c_part, nc_part = state.args_cnc() if isinstance(nc_part[0], FockStateFermionKet): element = self.state return Mul(*(c_part + [nc_part[0].down(element)] + nc_part[1:])) else: return Mul(self, state) else: return Mul(self, state) @property def is_q_creator(self): """ Can we create a quasi-particle? (create hole or create particle) If so, would that be above or below the fermi surface? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_q_creator 0 >>> F(i).is_q_creator -1 >>> F(p).is_q_creator -1 """ if self.is_below_fermi: return -1 return 0 @property def is_q_annihilator(self): """ Can we destroy a quasi-particle? (annihilate hole or annihilate particle) If so, would that be above or below the fermi surface? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=1) >>> i = Symbol('i', below_fermi=1) >>> p = Symbol('p') >>> F(a).is_q_annihilator 1 >>> F(i).is_q_annihilator 0 >>> F(p).is_q_annihilator 1 """ if self.is_above_fermi: return 1 return 0 @property def is_only_q_creator(self): """ Always create a quasi-particle? (create hole or create particle) Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_only_q_creator False >>> F(i).is_only_q_creator True >>> F(p).is_only_q_creator False """ return self.is_only_below_fermi @property def is_only_q_annihilator(self): """ Always destroy a quasi-particle? (annihilate hole or annihilate particle) Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import F >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> F(a).is_only_q_annihilator True >>> F(i).is_only_q_annihilator False >>> F(p).is_only_q_annihilator False """ return self.is_only_above_fermi def __repr__(self): return "AnnihilateFermion(%s)" % self.state def _latex(self, printer): return "a_{%s}" % self.state.name class CreateFermion(FermionicOperator, Creator): """ Fermionic creation operator. """ op_symbol = 'f+' def _dagger_(self): return AnnihilateFermion(self.state) def apply_operator(self, state): """ Apply state to self if self is not symbolic and state is a FockStateKet, else multiply self by state. Examples ======== >>> from sympy.physics.secondquant import B, Dagger, BKet >>> from sympy.abc import x, y, n >>> Dagger(B(x)).apply_operator(y) y*CreateBoson(x) >>> B(0).apply_operator(BKet((n,))) sqrt(n)*FockStateBosonKet((n - 1,)) """ if isinstance(state, FockStateFermionKet): element = self.state return state.up(element) elif isinstance(state, Mul): c_part, nc_part = state.args_cnc() if isinstance(nc_part[0], FockStateFermionKet): element = self.state return Mul(*(c_part + [nc_part[0].up(element)] + nc_part[1:])) return Mul(self, state) @property def is_q_creator(self): """ Can we create a quasi-particle? (create hole or create particle) If so, would that be above or below the fermi surface? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import Fd >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> Fd(a).is_q_creator 1 >>> Fd(i).is_q_creator 0 >>> Fd(p).is_q_creator 1 """ if self.is_above_fermi: return 1 return 0 @property def is_q_annihilator(self): """ Can we destroy a quasi-particle? (annihilate hole or annihilate particle) If so, would that be above or below the fermi surface? Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import Fd >>> a = Symbol('a', above_fermi=1) >>> i = Symbol('i', below_fermi=1) >>> p = Symbol('p') >>> Fd(a).is_q_annihilator 0 >>> Fd(i).is_q_annihilator -1 >>> Fd(p).is_q_annihilator -1 """ if self.is_below_fermi: return -1 return 0 @property def is_only_q_creator(self): """ Always create a quasi-particle? (create hole or create particle) Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import Fd >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> Fd(a).is_only_q_creator True >>> Fd(i).is_only_q_creator False >>> Fd(p).is_only_q_creator False """ return self.is_only_above_fermi @property def is_only_q_annihilator(self): """ Always destroy a quasi-particle? (annihilate hole or annihilate particle) Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import Fd >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> Fd(a).is_only_q_annihilator False >>> Fd(i).is_only_q_annihilator True >>> Fd(p).is_only_q_annihilator False """ return self.is_only_below_fermi def __repr__(self): return "CreateFermion(%s)" % self.state def _latex(self, printer): return "a^\\dagger_{%s}" % self.state.name Fd = CreateFermion F = AnnihilateFermion class FockState(Expr): """ Many particle Fock state with a sequence of occupation numbers. Anywhere you can have a FockState, you can also have S.Zero. All code must check for this! Base class to represent FockStates. """ is_commutative = False def __new__(cls, occupations): """ occupations is a list with two possible meanings: - For bosons it is a list of occupation numbers. Element i is the number of particles in state i. - For fermions it is a list of occupied orbits. Element 0 is the state that was occupied first, element i is the i'th occupied state. """ occupations = list(map(sympify, occupations)) obj = Basic.__new__(cls, Tuple(*occupations)) return obj def __getitem__(self, i): i = int(i) return self.args[0][i] def __repr__(self): return ("FockState(%r)") % (self.args) def __str__(self): return "%s%r%s" % (self.lbracket, self._labels(), self.rbracket) def _labels(self): return self.args[0] def __len__(self): return len(self.args[0]) class BosonState(FockState): """ Base class for FockStateBoson(Ket/Bra). """ def up(self, i): """ Performs the action of a creation operator. Examples ======== >>> from sympy.physics.secondquant import BBra >>> b = BBra([1, 2]) >>> b FockStateBosonBra((1, 2)) >>> b.up(1) FockStateBosonBra((1, 3)) """ i = int(i) new_occs = list(self.args[0]) new_occs[i] = new_occs[i] + S.One return self.__class__(new_occs) def down(self, i): """ Performs the action of an annihilation operator. Examples ======== >>> from sympy.physics.secondquant import BBra >>> b = BBra([1, 2]) >>> b FockStateBosonBra((1, 2)) >>> b.down(1) FockStateBosonBra((1, 1)) """ i = int(i) new_occs = list(self.args[0]) if new_occs[i] == S.Zero: return S.Zero else: new_occs[i] = new_occs[i] - S.One return self.__class__(new_occs) class FermionState(FockState): """ Base class for FockStateFermion(Ket/Bra). """ fermi_level = 0 def __new__(cls, occupations, fermi_level=0): occupations = list(map(sympify, occupations)) if len(occupations) > 1: try: (occupations, sign) = _sort_anticommuting_fermions( occupations, key=hash) except ViolationOfPauliPrinciple: return S.Zero else: sign = 0 cls.fermi_level = fermi_level if cls._count_holes(occupations) > fermi_level: return S.Zero if sign % 2: return S.NegativeOne*FockState.__new__(cls, occupations) else: return FockState.__new__(cls, occupations) def up(self, i): """ Performs the action of a creation operator. Explanation =========== If below fermi we try to remove a hole, if above fermi we try to create a particle. If general index p we return ``Kronecker(p,i)*self`` where ``i`` is a new symbol with restriction above or below. Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import FKet >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') >>> FKet([]).up(a) FockStateFermionKet((a,)) A creator acting on vacuum below fermi vanishes >>> FKet([]).up(i) 0 """ present = i in self.args[0] if self._only_above_fermi(i): if present: return S.Zero else: return self._add_orbit(i) elif self._only_below_fermi(i): if present: return self._remove_orbit(i) else: return S.Zero else: if present: hole = Dummy("i", below_fermi=True) return KroneckerDelta(i, hole)*self._remove_orbit(i) else: particle = Dummy("a", above_fermi=True) return KroneckerDelta(i, particle)*self._add_orbit(i) def down(self, i): """ Performs the action of an annihilation operator. Explanation =========== If below fermi we try to create a hole, If above fermi we try to remove a particle. If general index p we return ``Kronecker(p,i)*self`` where ``i`` is a new symbol with restriction above or below. Examples ======== >>> from sympy import Symbol >>> from sympy.physics.secondquant import FKet >>> a = Symbol('a', above_fermi=True) >>> i = Symbol('i', below_fermi=True) >>> p = Symbol('p') An annihilator acting on vacuum above fermi vanishes >>> FKet([]).down(a) 0 Also below fermi, it vanishes, unless we specify a fermi level > 0 >>> FKet([]).down(i) 0 >>> FKet([],4).down(i) FockStateFermionKet((i,)) """ present = i in self.args[0] if self._only_above_fermi(i): if present: return self._remove_orbit(i) else: return S.Zero elif self._only_below_fermi(i): if present: return S.Zero else: return self._add_orbit(i) else: if present: hole = Dummy("i", below_fermi=True) return KroneckerDelta(i, hole)*self._add_orbit(i) else: particle = Dummy("a", above_fermi=True) return KroneckerDelta(i, particle)*self._remove_orbit(i) @classmethod def _only_below_fermi(cls, i): """ Tests if given orbit is only below fermi surface. If nothing can be concluded we return a conservative False. """ if i.is_number: return i <= cls.fermi_level if i.assumptions0.get('below_fermi'): return True return False @classmethod def _only_above_fermi(cls, i): """ Tests if given orbit is only above fermi surface. If fermi level has not been set we return True. If nothing can be concluded we return a conservative False. """ if i.is_number: return i > cls.fermi_level if i.assumptions0.get('above_fermi'): return True return not cls.fermi_level def _remove_orbit(self, i): """ Removes particle/fills hole in orbit i. No input tests performed here. """ new_occs = list(self.args[0]) pos = new_occs.index(i) del new_occs[pos] if (pos) % 2: return S.NegativeOne*self.__class__(new_occs, self.fermi_level) else: return self.__class__(new_occs, self.fermi_level) def _add_orbit(self, i): """ Adds particle/creates hole in orbit i. No input tests performed here. """ return self.__class__((i,) + self.args[0], self.fermi_level) @classmethod def _count_holes(cls, list): """ Returns the number of identified hole states in list. """ return len([i for i in list if cls._only_below_fermi(i)]) def _negate_holes(self, list): return tuple([-i if i <= self.fermi_level else i for i in list]) def __repr__(self): if self.fermi_level: return "FockStateKet(%r, fermi_level=%s)" % (self.args[0], self.fermi_level) else: return "FockStateKet(%r)" % (self.args[0],) def _labels(self): return self._negate_holes(self.args[0]) class FockStateKet(FockState): """ Representation of a ket. """ lbracket = '|' rbracket = '>' class FockStateBra(FockState): """ Representation of a bra. """ lbracket = '<' rbracket = '|' def __mul__(self, other): if isinstance(other, FockStateKet): return InnerProduct(self, other) else: return Expr.__mul__(self, other) class FockStateBosonKet(BosonState, FockStateKet): """ Many particle Fock state with a sequence of occupation numbers. Occupation numbers can be any integer >= 0. Examples ======== >>> from sympy.physics.secondquant import BKet >>> BKet([1, 2]) FockStateBosonKet((1, 2)) """ def _dagger_(self): return FockStateBosonBra(*self.args) class FockStateBosonBra(BosonState, FockStateBra): """ Describes a collection of BosonBra particles. Examples ======== >>> from sympy.physics.secondquant import BBra >>> BBra([1, 2]) FockStateBosonBra((1, 2)) """ def _dagger_(self): return FockStateBosonKet(*self.args) class FockStateFermionKet(FermionState, FockStateKet): """ Many-particle Fock state with a sequence of occupied orbits. Explanation =========== Each state can only have one particle, so we choose to store a list of occupied orbits rather than a tuple with occupation numbers (zeros and ones). states below fermi level are holes, and are represented by negative labels in the occupation list. For symbolic state labels, the fermi_level caps the number of allowed hole- states. Examples ======== >>> from sympy.physics.secondquant import FKet >>> FKet([1, 2]) FockStateFermionKet((1, 2)) """ def _dagger_(self): return FockStateFermionBra(*self.args) class FockStateFermionBra(FermionState, FockStateBra): """ See Also ======== FockStateFermionKet Examples ======== >>> from sympy.physics.secondquant import FBra >>> FBra([1, 2]) FockStateFermionBra((1, 2)) """ def _dagger_(self): return FockStateFermionKet(*self.args) BBra = FockStateBosonBra BKet = FockStateBosonKet FBra = FockStateFermionBra FKet = FockStateFermionKet def _apply_Mul(m): """ Take a Mul instance with operators and apply them to states. Explanation =========== This method applies all operators with integer state labels to the actual states. For symbolic state labels, nothing is done. When inner products of FockStates are encountered (like <a|b>), they are converted to instances of InnerProduct. This does not currently work on double inner products like, <a|b><c|d>. If the argument is not a Mul, it is simply returned as is. """ if not isinstance(m, Mul): return m c_part, nc_part = m.args_cnc() n_nc = len(nc_part) if n_nc == 0 or n_nc == 1: return m else: last = nc_part[-1] next_to_last = nc_part[-2] if isinstance(last, FockStateKet): if isinstance(next_to_last, SqOperator): if next_to_last.is_symbolic: return m else: result = next_to_last.apply_operator(last) if result == 0: return S.Zero else: return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result]))) elif isinstance(next_to_last, Pow): if isinstance(next_to_last.base, SqOperator) and \ next_to_last.exp.is_Integer: if next_to_last.base.is_symbolic: return m else: result = last for i in range(next_to_last.exp): result = next_to_last.base.apply_operator(result) if result == 0: break if result == 0: return S.Zero else: return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result]))) else: return m elif isinstance(next_to_last, FockStateBra): result = InnerProduct(next_to_last, last) if result == 0: return S.Zero else: return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result]))) else: return m else: return m def apply_operators(e): """ Take a sympy expression with operators and states and apply the operators. Examples ======== >>> from sympy.physics.secondquant import apply_operators >>> from sympy import sympify >>> apply_operators(sympify(3)+4) 7 """ e = e.expand() muls = e.atoms(Mul) subs_list = [(m, _apply_Mul(m)) for m in iter(muls)] return e.subs(subs_list) class InnerProduct(Basic): """ An unevaluated inner product between a bra and ket. Explanation =========== Currently this class just reduces things to a product of Kronecker Deltas. In the future, we could introduce abstract states like ``|a>`` and ``|b>``, and leave the inner product unevaluated as ``<a|b>``. """ is_commutative = True def __new__(cls, bra, ket): if not isinstance(bra, FockStateBra): raise TypeError("must be a bra") if not isinstance(ket, FockStateKet): raise TypeError("must be a key") return cls.eval(bra, ket) @classmethod def eval(cls, bra, ket): result = S.One for i, j in zip(bra.args[0], ket.args[0]): result *= KroneckerDelta(i, j) if result == 0: break return result @property def bra(self): """Returns the bra part of the state""" return self.args[0] @property def ket(self): """Returns the ket part of the state""" return self.args[1] def __repr__(self): sbra = repr(self.bra) sket = repr(self.ket) return "%s|%s" % (sbra[:-1], sket[1:]) def __str__(self): return self.__repr__() def matrix_rep(op, basis): """ Find the representation of an operator in a basis. Examples ======== >>> from sympy.physics.secondquant import VarBosonicBasis, B, matrix_rep >>> b = VarBosonicBasis(5) >>> o = B(0) >>> matrix_rep(o, b) Matrix([ [0, 1, 0, 0, 0], [0, 0, sqrt(2), 0, 0], [0, 0, 0, sqrt(3), 0], [0, 0, 0, 0, 2], [0, 0, 0, 0, 0]]) """ a = zeros(len(basis)) for i in range(len(basis)): for j in range(len(basis)): a[i, j] = apply_operators(Dagger(basis[i])*op*basis[j]) return a class BosonicBasis: """ Base class for a basis set of bosonic Fock states. """ pass class VarBosonicBasis: """ A single state, variable particle number basis set. Examples ======== >>> from sympy.physics.secondquant import VarBosonicBasis >>> b = VarBosonicBasis(5) >>> b [FockState((0,)), FockState((1,)), FockState((2,)), FockState((3,)), FockState((4,))] """ def __init__(self, n_max): self.n_max = n_max self._build_states() def _build_states(self): self.basis = [] for i in range(self.n_max): self.basis.append(FockStateBosonKet([i])) self.n_basis = len(self.basis) def index(self, state): """ Returns the index of state in basis. Examples ======== >>> from sympy.physics.secondquant import VarBosonicBasis >>> b = VarBosonicBasis(3) >>> state = b.state(1) >>> b [FockState((0,)), FockState((1,)), FockState((2,))] >>> state FockStateBosonKet((1,)) >>> b.index(state) 1 """ return self.basis.index(state) def state(self, i): """ The state of a single basis. Examples ======== >>> from sympy.physics.secondquant import VarBosonicBasis >>> b = VarBosonicBasis(5) >>> b.state(3) FockStateBosonKet((3,)) """ return self.basis[i] def __getitem__(self, i): return self.state(i) def __len__(self): return len(self.basis) def __repr__(self): return repr(self.basis) class FixedBosonicBasis(BosonicBasis): """ Fixed particle number basis set. Examples ======== >>> from sympy.physics.secondquant import FixedBosonicBasis >>> b = FixedBosonicBasis(2, 2) >>> state = b.state(1) >>> b [FockState((2, 0)), FockState((1, 1)), FockState((0, 2))] >>> state FockStateBosonKet((1, 1)) >>> b.index(state) 1 """ def __init__(self, n_particles, n_levels): self.n_particles = n_particles self.n_levels = n_levels self._build_particle_locations() self._build_states() def _build_particle_locations(self): tup = ["i%i" % i for i in range(self.n_particles)] first_loop = "for i0 in range(%i)" % self.n_levels other_loops = '' for cur, prev in zip(tup[1:], tup): temp = "for %s in range(%s + 1) " % (cur, prev) other_loops = other_loops + temp tup_string = "(%s)" % ", ".join(tup) list_comp = "[%s %s %s]" % (tup_string, first_loop, other_loops) result = eval(list_comp) if self.n_particles == 1: result = [(item,) for item in result] self.particle_locations = result def _build_states(self): self.basis = [] for tuple_of_indices in self.particle_locations: occ_numbers = self.n_levels*[0] for level in tuple_of_indices: occ_numbers[level] += 1 self.basis.append(FockStateBosonKet(occ_numbers)) self.n_basis = len(self.basis) def index(self, state): """Returns the index of state in basis. Examples ======== >>> from sympy.physics.secondquant import FixedBosonicBasis >>> b = FixedBosonicBasis(2, 3) >>> b.index(b.state(3)) 3 """ return self.basis.index(state) def state(self, i): """Returns the state that lies at index i of the basis Examples ======== >>> from sympy.physics.secondquant import FixedBosonicBasis >>> b = FixedBosonicBasis(2, 3) >>> b.state(3) FockStateBosonKet((1, 0, 1)) """ return self.basis[i] def __getitem__(self, i): return self.state(i) def __len__(self): return len(self.basis) def __repr__(self): return repr(self.basis) class Commutator(Function): """ The Commutator: [A, B] = A*B - B*A The arguments are ordered according to .__cmp__() Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import Commutator >>> A, B = symbols('A,B', commutative=False) >>> Commutator(B, A) -Commutator(A, B) Evaluate the commutator with .doit() >>> comm = Commutator(A,B); comm Commutator(A, B) >>> comm.doit() A*B - B*A For two second quantization operators the commutator is evaluated immediately: >>> from sympy.physics.secondquant import Fd, F >>> a = symbols('a', above_fermi=True) >>> i = symbols('i', below_fermi=True) >>> p,q = symbols('p,q') >>> Commutator(Fd(a),Fd(i)) 2*NO(CreateFermion(a)*CreateFermion(i)) But for more complicated expressions, the evaluation is triggered by a call to .doit() >>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i)) >>> comm.doit(wicks=True) -KroneckerDelta(i, p)*CreateFermion(q) + KroneckerDelta(i, q)*CreateFermion(p) """ is_commutative = False @classmethod def eval(cls, a, b): """ The Commutator [A,B] is on canonical form if A < B. Examples ======== >>> from sympy.physics.secondquant import Commutator, F, Fd >>> from sympy.abc import x >>> c1 = Commutator(F(x), Fd(x)) >>> c2 = Commutator(Fd(x), F(x)) >>> Commutator.eval(c1, c2) 0 """ if not (a and b): return S.Zero if a == b: return S.Zero if a.is_commutative or b.is_commutative: return S.Zero # # [A+B,C] -> [A,C] + [B,C] # a = a.expand() if isinstance(a, Add): return Add(*[cls(term, b) for term in a.args]) b = b.expand() if isinstance(b, Add): return Add(*[cls(a, term) for term in b.args]) # # [xA,yB] -> xy*[A,B] # ca, nca = a.args_cnc() cb, ncb = b.args_cnc() c_part = list(ca) + list(cb) if c_part: return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb))) # # single second quantization operators # if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator): if isinstance(b, CreateBoson) and isinstance(a, AnnihilateBoson): return KroneckerDelta(a.state, b.state) if isinstance(a, CreateBoson) and isinstance(b, AnnihilateBoson): return S.NegativeOne*KroneckerDelta(a.state, b.state) else: return S.Zero if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator): return wicks(a*b) - wicks(b*a) # # Canonical ordering of arguments # if a.sort_key() > b.sort_key(): return S.NegativeOne*cls(b, a) def doit(self, **hints): """ Enables the computation of complex expressions. Examples ======== >>> from sympy.physics.secondquant import Commutator, F, Fd >>> from sympy import symbols >>> i, j = symbols('i,j', below_fermi=True) >>> a, b = symbols('a,b', above_fermi=True) >>> c = Commutator(Fd(a)*F(i),Fd(b)*F(j)) >>> c.doit(wicks=True) 0 """ a = self.args[0] b = self.args[1] if hints.get("wicks"): a = a.doit(**hints) b = b.doit(**hints) try: return wicks(a*b) - wicks(b*a) except ContractionAppliesOnlyToFermions: pass except WicksTheoremDoesNotApply: pass return (a*b - b*a).doit(**hints) def __repr__(self): return "Commutator(%s,%s)" % (self.args[0], self.args[1]) def __str__(self): return "[%s,%s]" % (self.args[0], self.args[1]) def _latex(self, printer): return "\\left[%s,%s\\right]" % tuple([ printer._print(arg) for arg in self.args]) class NO(Expr): """ This Object is used to represent normal ordering brackets. i.e. {abcd} sometimes written :abcd: Explanation =========== Applying the function NO(arg) to an argument means that all operators in the argument will be assumed to anticommute, and have vanishing contractions. This allows an immediate reordering to canonical form upon object creation. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import NO, F, Fd >>> p,q = symbols('p,q') >>> NO(Fd(p)*F(q)) NO(CreateFermion(p)*AnnihilateFermion(q)) >>> NO(F(q)*Fd(p)) -NO(CreateFermion(p)*AnnihilateFermion(q)) Note ==== If you want to generate a normal ordered equivalent of an expression, you should use the function wicks(). This class only indicates that all operators inside the brackets anticommute, and have vanishing contractions. Nothing more, nothing less. """ is_commutative = False def __new__(cls, arg): """ Use anticommutation to get canonical form of operators. Explanation =========== Employ associativity of normal ordered product: {ab{cd}} = {abcd} but note that {ab}{cd} /= {abcd}. We also employ distributivity: {ab + cd} = {ab} + {cd}. Canonical form also implies expand() {ab(c+d)} = {abc} + {abd}. """ # {ab + cd} = {ab} + {cd} arg = sympify(arg) arg = arg.expand() if arg.is_Add: return Add(*[ cls(term) for term in arg.args]) if arg.is_Mul: # take coefficient outside of normal ordering brackets c_part, seq = arg.args_cnc() if c_part: coeff = Mul(*c_part) if not seq: return coeff else: coeff = S.One # {ab{cd}} = {abcd} newseq = [] foundit = False for fac in seq: if isinstance(fac, NO): newseq.extend(fac.args) foundit = True else: newseq.append(fac) if foundit: return coeff*cls(Mul(*newseq)) # We assume that the user don't mix B and F operators if isinstance(seq[0], BosonicOperator): raise NotImplementedError try: newseq, sign = _sort_anticommuting_fermions(seq) except ViolationOfPauliPrinciple: return S.Zero if sign % 2: return (S.NegativeOne*coeff)*cls(Mul(*newseq)) elif sign: return coeff*cls(Mul(*newseq)) else: pass # since sign==0, no permutations was necessary # if we couldn't do anything with Mul object, we just # mark it as normal ordered if coeff != S.One: return coeff*cls(Mul(*newseq)) return Expr.__new__(cls, Mul(*newseq)) if isinstance(arg, NO): return arg # if object was not Mul or Add, normal ordering does not apply return arg @property def has_q_creators(self): """ Return 0 if the leftmost argument of the first argument is a not a q_creator, else 1 if it is above fermi or -1 if it is below fermi. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import NO, F, Fd >>> a = symbols('a', above_fermi=True) >>> i = symbols('i', below_fermi=True) >>> NO(Fd(a)*Fd(i)).has_q_creators 1 >>> NO(F(i)*F(a)).has_q_creators -1 >>> NO(Fd(i)*F(a)).has_q_creators #doctest: +SKIP 0 """ return self.args[0].args[0].is_q_creator @property def has_q_annihilators(self): """ Return 0 if the rightmost argument of the first argument is a not a q_annihilator, else 1 if it is above fermi or -1 if it is below fermi. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import NO, F, Fd >>> a = symbols('a', above_fermi=True) >>> i = symbols('i', below_fermi=True) >>> NO(Fd(a)*Fd(i)).has_q_annihilators -1 >>> NO(F(i)*F(a)).has_q_annihilators 1 >>> NO(Fd(a)*F(i)).has_q_annihilators 0 """ return self.args[0].args[-1].is_q_annihilator def doit(self, **kw_args): """ Either removes the brackets or enables complex computations in its arguments. Examples ======== >>> from sympy.physics.secondquant import NO, Fd, F >>> from textwrap import fill >>> from sympy import symbols, Dummy >>> p,q = symbols('p,q', cls=Dummy) >>> print(fill(str(NO(Fd(p)*F(q)).doit()))) KroneckerDelta(_a, _p)*KroneckerDelta(_a, _q)*CreateFermion(_a)*AnnihilateFermion(_a) + KroneckerDelta(_a, _p)*KroneckerDelta(_i, _q)*CreateFermion(_a)*AnnihilateFermion(_i) - KroneckerDelta(_a, _q)*KroneckerDelta(_i, _p)*AnnihilateFermion(_a)*CreateFermion(_i) - KroneckerDelta(_i, _p)*KroneckerDelta(_i, _q)*AnnihilateFermion(_i)*CreateFermion(_i) """ if kw_args.get("remove_brackets", True): return self._remove_brackets() else: return self.__new__(type(self), self.args[0].doit(**kw_args)) def _remove_brackets(self): """ Returns the sorted string without normal order brackets. The returned string have the property that no nonzero contractions exist. """ # check if any creator is also an annihilator subslist = [] for i in self.iter_q_creators(): if self[i].is_q_annihilator: assume = self[i].state.assumptions0 # only operators with a dummy index can be split in two terms if isinstance(self[i].state, Dummy): # create indices with fermi restriction assume.pop("above_fermi", None) assume["below_fermi"] = True below = Dummy('i', **assume) assume.pop("below_fermi", None) assume["above_fermi"] = True above = Dummy('a', **assume) cls = type(self[i]) split = ( self[i].__new__(cls, below) * KroneckerDelta(below, self[i].state) + self[i].__new__(cls, above) * KroneckerDelta(above, self[i].state) ) subslist.append((self[i], split)) else: raise SubstitutionOfAmbigousOperatorFailed(self[i]) if subslist: result = NO(self.subs(subslist)) if isinstance(result, Add): return Add(*[term.doit() for term in result.args]) else: return self.args[0] def _expand_operators(self): """ Returns a sum of NO objects that contain no ambiguous q-operators. Explanation =========== If an index q has range both above and below fermi, the operator F(q) is ambiguous in the sense that it can be both a q-creator and a q-annihilator. If q is dummy, it is assumed to be a summation variable and this method rewrites it into a sum of NO terms with unambiguous operators: {Fd(p)*F(q)} = {Fd(a)*F(b)} + {Fd(a)*F(i)} + {Fd(j)*F(b)} -{F(i)*Fd(j)} where a,b are above and i,j are below fermi level. """ return NO(self._remove_brackets) def __getitem__(self, i): if isinstance(i, slice): indices = i.indices(len(self)) return [self.args[0].args[i] for i in range(*indices)] else: return self.args[0].args[i] def __len__(self): return len(self.args[0].args) def iter_q_annihilators(self): """ Iterates over the annihilation operators. Examples ======== >>> from sympy import symbols >>> i, j = symbols('i j', below_fermi=True) >>> a, b = symbols('a b', above_fermi=True) >>> from sympy.physics.secondquant import NO, F, Fd >>> no = NO(Fd(a)*F(i)*F(b)*Fd(j)) >>> no.iter_q_creators() <generator object... at 0x...> >>> list(no.iter_q_creators()) [0, 1] >>> list(no.iter_q_annihilators()) [3, 2] """ ops = self.args[0].args iter = range(len(ops) - 1, -1, -1) for i in iter: if ops[i].is_q_annihilator: yield i else: break def iter_q_creators(self): """ Iterates over the creation operators. Examples ======== >>> from sympy import symbols >>> i, j = symbols('i j', below_fermi=True) >>> a, b = symbols('a b', above_fermi=True) >>> from sympy.physics.secondquant import NO, F, Fd >>> no = NO(Fd(a)*F(i)*F(b)*Fd(j)) >>> no.iter_q_creators() <generator object... at 0x...> >>> list(no.iter_q_creators()) [0, 1] >>> list(no.iter_q_annihilators()) [3, 2] """ ops = self.args[0].args iter = range(0, len(ops)) for i in iter: if ops[i].is_q_creator: yield i else: break def get_subNO(self, i): """ Returns a NO() without FermionicOperator at index i. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import F, NO >>> p, q, r = symbols('p,q,r') >>> NO(F(p)*F(q)*F(r)).get_subNO(1) NO(AnnihilateFermion(p)*AnnihilateFermion(r)) """ arg0 = self.args[0] # it's a Mul by definition of how it's created mul = arg0._new_rawargs(*(arg0.args[:i] + arg0.args[i + 1:])) return NO(mul) def _latex(self, printer): return "\\left\\{%s\\right\\}" % printer._print(self.args[0]) def __repr__(self): return "NO(%s)" % self.args[0] def __str__(self): return ":%s:" % self.args[0] def contraction(a, b): """ Calculates contraction of Fermionic operators a and b. Examples ======== >>> from sympy import symbols >>> from sympy.physics.secondquant import F, Fd, contraction >>> p, q = symbols('p,q') >>> a, b = symbols('a,b', above_fermi=True) >>> i, j = symbols('i,j', below_fermi=True) A contraction is non-zero only if a quasi-creator is to the right of a quasi-annihilator: >>> contraction(F(a),Fd(b)) KroneckerDelta(a, b) >>> contraction(Fd(i),F(j)) KroneckerDelta(i, j) For general indices a non-zero result restricts the indices to below/above the fermi surface: >>> contraction(Fd(p),F(q)) KroneckerDelta(_i, q)*KroneckerDelta(p, q) >>> contraction(F(p),Fd(q)) KroneckerDelta(_a, q)*KroneckerDelta(p, q) Two creators or two annihilators always vanishes: >>> contraction(F(p),F(q)) 0 >>> contraction(Fd(p),Fd(q)) 0 """ if isinstance(b, FermionicOperator) and isinstance(a, FermionicOperator): if isinstance(a, AnnihilateFermion) and isinstance(b, CreateFermion): if b.state.assumptions0.get("below_fermi"): return S.Zero if a.state.assumptions0.get("below_fermi"): return S.Zero if b.state.assumptions0.get("above_fermi"): return KroneckerDelta(a.state, b.state) if a.state.assumptions0.get("above_fermi"): return KroneckerDelta(a.state, b.state) return (KroneckerDelta(a.state, b.state)* KroneckerDelta(b.state, Dummy('a', above_fermi=True))) if isinstance(b, AnnihilateFermion) and isinstance(a, CreateFermion): if b.state.assumptions0.get("above_fermi"): return S.Zero if a.state.assumptions0.get("above_fermi"): return S.Zero if b.state.assumptions0.get("below_fermi"): return KroneckerDelta(a.state, b.state) if a.state.assumptions0.get("below_fermi"): return KroneckerDelta(a.state, b.state) return (KroneckerDelta(a.state, b.state)* KroneckerDelta(b.state, Dummy('i', below_fermi=True))) # vanish if 2xAnnihilator or 2xCreator return S.Zero else: #not fermion operators t = ( isinstance(i, FermionicOperator) for i in (a, b) ) raise ContractionAppliesOnlyToFermions(*t) def _sqkey(sq_operator): """Generates key for canonical sorting of SQ operators.""" return sq_operator._sortkey() def _sort_anticommuting_fermions(string1, key=_sqkey): """Sort fermionic operators to canonical order, assuming all pairs anticommute. Explanation =========== Uses a bidirectional bubble sort. Items in string1 are not referenced so in principle they may be any comparable objects. The sorting depends on the operators '>' and '=='. If the Pauli principle is violated, an exception is raised. Returns ======= tuple (sorted_str, sign) sorted_str: list containing the sorted operators sign: int telling how many times the sign should be changed (if sign==0 the string was already sorted) """ verified = False sign = 0 rng = list(range(len(string1) - 1)) rev = list(range(len(string1) - 3, -1, -1)) keys = list(map(key, string1)) key_val = dict(list(zip(keys, string1))) while not verified: verified = True for i in rng: left = keys[i] right = keys[i + 1] if left == right: raise ViolationOfPauliPrinciple([left, right]) if left > right: verified = False keys[i:i + 2] = [right, left] sign = sign + 1 if verified: break for i in rev: left = keys[i] right = keys[i + 1] if left == right: raise ViolationOfPauliPrinciple([left, right]) if left > right: verified = False keys[i:i + 2] = [right, left] sign = sign + 1 string1 = [ key_val[k] for k in keys ] return (string1, sign) def evaluate_deltas(e): """ We evaluate KroneckerDelta symbols in the expression assuming Einstein summation. Explanation =========== If one index is repeated it is summed over and in effect substituted with the other one. If both indices are repeated we substitute according to what is the preferred index. this is determined by KroneckerDelta.preferred_index and KroneckerDelta.killable_index. In case there are no possible substitutions or if a substitution would imply a loss of information, nothing is done. In case an index appears in more than one KroneckerDelta, the resulting substitution depends on the order of the factors. Since the ordering is platform dependent, the literal expression resulting from this function may be hard to predict. Examples ======== We assume the following: >>> from sympy import symbols, Function, Dummy, KroneckerDelta >>> from sympy.physics.secondquant import evaluate_deltas >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) >>> a,b = symbols('a b', above_fermi=True, cls=Dummy) >>> p,q = symbols('p q', cls=Dummy) >>> f = Function('f') >>> t = Function('t') The order of preference for these indices according to KroneckerDelta is (a, b, i, j, p, q). Trivial cases: >>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j) f(_j) >>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i) f(_i) >>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i) f(_i) >>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q) f(_q) >>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p) f(_p) More interesting cases: >>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q)) f(_i, _q)*t(_a, _i) >>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q)) f(_a, _q)*t(_a, _i) >>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q)) f(_p, _p) Finally, here are some cases where nothing is done, because that would imply a loss of information: >>> evaluate_deltas(KroneckerDelta(i,p)*f(q)) f(_q)*KroneckerDelta(_i, _p) >>> evaluate_deltas(KroneckerDelta(i,p)*f(i)) f(_i)*KroneckerDelta(_i, _p) """ # We treat Deltas only in mul objects # for general function objects we don't evaluate KroneckerDeltas in arguments, # but here we hard code exceptions to this rule accepted_functions = ( Add, ) if isinstance(e, accepted_functions): return e.func(*[evaluate_deltas(arg) for arg in e.args]) elif isinstance(e, Mul): # find all occurrences of delta function and count each index present in # expression. deltas = [] indices = {} for i in e.args: for s in i.free_symbols: if s in indices: indices[s] += 1 else: indices[s] = 0 # geek counting simplifies logic below if isinstance(i, KroneckerDelta): deltas.append(i) for d in deltas: # If we do something, and there are more deltas, we should recurse # to treat the resulting expression properly if d.killable_index.is_Symbol and indices[d.killable_index]: e = e.subs(d.killable_index, d.preferred_index) if len(deltas) > 1: return evaluate_deltas(e) elif (d.preferred_index.is_Symbol and indices[d.preferred_index] and d.indices_contain_equal_information): e = e.subs(d.preferred_index, d.killable_index) if len(deltas) > 1: return evaluate_deltas(e) else: pass return e # nothing to do, maybe we hit a Symbol or a number else: return e def substitute_dummies(expr, new_indices=False, pretty_indices={}): """ Collect terms by substitution of dummy variables. Explanation =========== This routine allows simplification of Add expressions containing terms which differ only due to dummy variables. The idea is to substitute all dummy variables consistently depending on the structure of the term. For each term, we obtain a sequence of all dummy variables, where the order is determined by the index range, what factors the index belongs to and its position in each factor. See _get_ordered_dummies() for more information about the sorting of dummies. The index sequence is then substituted consistently in each term. Examples ======== >>> from sympy import symbols, Function, Dummy >>> from sympy.physics.secondquant import substitute_dummies >>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy) >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) >>> f = Function('f') >>> expr = f(a,b) + f(c,d); expr f(_a, _b) + f(_c, _d) Since a, b, c and d are equivalent summation indices, the expression can be simplified to a single term (for which the dummy indices are still summed over) >>> substitute_dummies(expr) 2*f(_a, _b) Controlling output: By default the dummy symbols that are already present in the expression will be reused in a different permutation. However, if new_indices=True, new dummies will be generated and inserted. The keyword 'pretty_indices' can be used to control this generation of new symbols. By default the new dummies will be generated on the form i_1, i_2, a_1, etc. If you supply a dictionary with key:value pairs in the form: { index_group: string_of_letters } The letters will be used as labels for the new dummy symbols. The index_groups must be one of 'above', 'below' or 'general'. >>> expr = f(a,b,i,j) >>> my_dummies = { 'above':'st', 'below':'uv' } >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) f(_s, _t, _u, _v) If we run out of letters, or if there is no keyword for some index_group the default dummy generator will be used as a fallback: >>> p,q = symbols('p q', cls=Dummy) # general indices >>> expr = f(p,q) >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) f(_p_0, _p_1) """ # setup the replacing dummies if new_indices: letters_above = pretty_indices.get('above', "") letters_below = pretty_indices.get('below', "") letters_general = pretty_indices.get('general', "") len_above = len(letters_above) len_below = len(letters_below) len_general = len(letters_general) def _i(number): try: return letters_below[number] except IndexError: return 'i_' + str(number - len_below) def _a(number): try: return letters_above[number] except IndexError: return 'a_' + str(number - len_above) def _p(number): try: return letters_general[number] except IndexError: return 'p_' + str(number - len_general) aboves = [] belows = [] generals = [] dummies = expr.atoms(Dummy) if not new_indices: dummies = sorted(dummies, key=default_sort_key) # generate lists with the dummies we will insert a = i = p = 0 for d in dummies: assum = d.assumptions0 if assum.get("above_fermi"): if new_indices: sym = _a(a) a += 1 l1 = aboves elif assum.get("below_fermi"): if new_indices: sym = _i(i) i += 1 l1 = belows else: if new_indices: sym = _p(p) p += 1 l1 = generals if new_indices: l1.append(Dummy(sym, **assum)) else: l1.append(d) expr = expr.expand() terms = Add.make_args(expr) new_terms = [] for term in terms: i = iter(belows) a = iter(aboves) p = iter(generals) ordered = _get_ordered_dummies(term) subsdict = {} for d in ordered: if d.assumptions0.get('below_fermi'): subsdict[d] = next(i) elif d.assumptions0.get('above_fermi'): subsdict[d] = next(a) else: subsdict[d] = next(p) subslist = [] final_subs = [] for k, v in subsdict.items(): if k == v: continue if v in subsdict: # We check if the sequence of substitutions end quickly. In # that case, we can avoid temporary symbols if we ensure the # correct substitution order. if subsdict[v] in subsdict: # (x, y) -> (y, x), we need a temporary variable x = Dummy('x') subslist.append((k, x)) final_subs.append((x, v)) else: # (x, y) -> (y, a), x->y must be done last # but before temporary variables are resolved final_subs.insert(0, (k, v)) else: subslist.append((k, v)) subslist.extend(final_subs) new_terms.append(term.subs(subslist)) return Add(*new_terms) class KeyPrinter(StrPrinter): """Printer for which only equal objects are equal in print""" def _print_Dummy(self, expr): return "(%s_%i)" % (expr.name, expr.dummy_index) def __kprint(expr): p = KeyPrinter() return p.doprint(expr) def _get_ordered_dummies(mul, verbose=False): """Returns all dummies in the mul sorted in canonical order. Explanation =========== The purpose of the canonical ordering is that dummies can be substituted consistently across terms with the result that equivalent terms can be simplified. It is not possible to determine if two terms are equivalent based solely on the dummy order. However, a consistent substitution guided by the ordered dummies should lead to trivially (non-)equivalent terms, thereby revealing the equivalence. This also means that if two terms have identical sequences of dummies, the (non-)equivalence should already be apparent. Strategy -------- The canoncial order is given by an arbitrary sorting rule. A sort key is determined for each dummy as a tuple that depends on all factors where the index is present. The dummies are thereby sorted according to the contraction structure of the term, instead of sorting based solely on the dummy symbol itself. After all dummies in the term has been assigned a key, we check for identical keys, i.e. unorderable dummies. If any are found, we call a specialized method, _determine_ambiguous(), that will determine a unique order based on recursive calls to _get_ordered_dummies(). Key description --------------- A high level description of the sort key: 1. Range of the dummy index 2. Relation to external (non-dummy) indices 3. Position of the index in the first factor 4. Position of the index in the second factor The sort key is a tuple with the following components: 1. A single character indicating the range of the dummy (above, below or general.) 2. A list of strings with fully masked string representations of all factors where the dummy is present. By masked, we mean that dummies are represented by a symbol to indicate either below fermi, above or general. No other information is displayed about the dummies at this point. The list is sorted stringwise. 3. An integer number indicating the position of the index, in the first factor as sorted in 2. 4. An integer number indicating the position of the index, in the second factor as sorted in 2. If a factor is either of type AntiSymmetricTensor or SqOperator, the index position in items 3 and 4 is indicated as 'upper' or 'lower' only. (Creation operators are considered upper and annihilation operators lower.) If the masked factors are identical, the two factors cannot be ordered unambiguously in item 2. In this case, items 3, 4 are left out. If several indices are contracted between the unorderable factors, it will be handled by _determine_ambiguous() """ # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' dum_repr = {d: __kprint(d) for d in all_dums} def _key(d): dumstruct = [ fac for fac in fac_dum if d in fac_dum[fac] ] other_dums = set().union(*[fac_dum[fac] for fac in dumstruct]) fac = dumstruct[-1] if other_dums is fac_dum[fac]: other_dums = fac_dum[fac].copy() other_dums.remove(d) masked_facs = [ fac_repr[fac] for fac in dumstruct ] for d2 in other_dums: masked_facs = [ fac.replace(dum_repr[d2], mask[d2]) for fac in masked_facs ] all_masked = [ fac.replace(dum_repr[d], mask[d]) for fac in masked_facs ] masked_facs = dict(list(zip(dumstruct, masked_facs))) # dummies for which the ordering cannot be determined if has_dups(all_masked): all_masked.sort() return mask[d], tuple(all_masked) # positions are ambiguous # sort factors according to fully masked strings keydict = dict(list(zip(dumstruct, all_masked))) dumstruct.sort(key=lambda x: keydict[x]) all_masked.sort() pos_val = [] for fac in dumstruct: if isinstance(fac, AntiSymmetricTensor): if d in fac.upper: pos_val.append('u') if d in fac.lower: pos_val.append('l') elif isinstance(fac, Creator): pos_val.append('u') elif isinstance(fac, Annihilator): pos_val.append('l') elif isinstance(fac, NO): ops = [ op for op in fac if op.has(d) ] for op in ops: if isinstance(op, Creator): pos_val.append('u') else: pos_val.append('l') else: # fallback to position in string representation facpos = -1 while 1: facpos = masked_facs[fac].find(dum_repr[d], facpos + 1) if facpos == -1: break pos_val.append(facpos) return (mask[d], tuple(all_masked), pos_val[0], pos_val[-1]) dumkey = dict(list(zip(all_dums, list(map(_key, all_dums))))) result = sorted(all_dums, key=lambda x: dumkey[x]) if has_dups(iter(dumkey.values())): # We have ambiguities unordered = defaultdict(set) for d, k in dumkey.items(): unordered[k].add(d) for k in [ k for k in unordered if len(unordered[k]) < 2 ]: del unordered[k] unordered = [ unordered[k] for k in sorted(unordered) ] result = _determine_ambiguous(mul, result, unordered) return result def _determine_ambiguous(term, ordered, ambiguous_groups): # We encountered a term for which the dummy substitution is ambiguous. # This happens for terms with 2 or more contractions between factors that # cannot be uniquely ordered independent of summation indices. For # example: # # Sum(p, q) v^{p, .}_{q, .}v^{q, .}_{p, .} # # Assuming that the indices represented by . are dummies with the # same range, the factors cannot be ordered, and there is no # way to determine a consistent ordering of p and q. # # The strategy employed here, is to relabel all unambiguous dummies with # non-dummy symbols and call _get_ordered_dummies again. This procedure is # applied to the entire term so there is a possibility that # _determine_ambiguous() is called again from a deeper recursion level. # break recursion if there are no ordered dummies all_ambiguous = set() for dummies in ambiguous_groups: all_ambiguous |= dummies all_ordered = set(ordered) - all_ambiguous if not all_ordered: # FIXME: If we arrive here, there are no ordered dummies. A method to # handle this needs to be implemented. In order to return something # useful nevertheless, we choose arbitrarily the first dummy and # determine the rest from this one. This method is dependent on the # actual dummy labels which violates an assumption for the # canonicalization procedure. A better implementation is needed. group = [ d for d in ordered if d in ambiguous_groups[0] ] d = group[0] all_ordered.add(d) ambiguous_groups[0].remove(d) stored_counter = _symbol_factory._counter subslist = [] for d in [ d for d in ordered if d in all_ordered ]: nondum = _symbol_factory._next() subslist.append((d, nondum)) newterm = term.subs(subslist) neworder = _get_ordered_dummies(newterm) _symbol_factory._set_counter(stored_counter) # update ordered list with new information for group in ambiguous_groups: ordered_group = [ d for d in neworder if d in group ] ordered_group.reverse() result = [] for d in ordered: if d in group: result.append(ordered_group.pop()) else: result.append(d) ordered = result return ordered class _SymbolFactory: def __init__(self, label): self._counterVar = 0 self._label = label def _set_counter(self, value): """ Sets counter to value. """ self._counterVar = value @property def _counter(self): """ What counter is currently at. """ return self._counterVar def _next(self): """ Generates the next symbols and increments counter by 1. """ s = Symbol("%s%i" % (self._label, self._counterVar)) self._counterVar += 1 return s _symbol_factory = _SymbolFactory('_]"]_') # most certainly a unique label @cacheit def _get_contractions(string1, keep_only_fully_contracted=False): """ Returns Add-object with contracted terms. Uses recursion to find all contractions. -- Internal helper function -- Will find nonzero contractions in string1 between indices given in leftrange and rightrange. """ # Should we store current level of contraction? if keep_only_fully_contracted and string1: result = [] else: result = [NO(Mul(*string1))] for i in range(len(string1) - 1): for j in range(i + 1, len(string1)): c = contraction(string1[i], string1[j]) if c: sign = (j - i + 1) % 2 if sign: coeff = S.NegativeOne*c else: coeff = c # # Call next level of recursion # ============================ # # We now need to find more contractions among operators # # oplist = string1[:i]+ string1[i+1:j] + string1[j+1:] # # To prevent overcounting, we don't allow contractions # we have already encountered. i.e. contractions between # string1[:i] <---> string1[i+1:j] # and string1[:i] <---> string1[j+1:]. # # This leaves the case: oplist = string1[i + 1:j] + string1[j + 1:] if oplist: result.append(coeff*NO( Mul(*string1[:i])*_get_contractions( oplist, keep_only_fully_contracted=keep_only_fully_contracted))) else: result.append(coeff*NO( Mul(*string1[:i]))) if keep_only_fully_contracted: break # next iteration over i leaves leftmost operator string1[0] uncontracted return Add(*result) def wicks(e, **kw_args): """ Returns the normal ordered equivalent of an expression using Wicks Theorem. Examples ======== >>> from sympy import symbols, Dummy >>> from sympy.physics.secondquant import wicks, F, Fd >>> p, q, r = symbols('p,q,r') >>> wicks(Fd(p)*F(q)) KroneckerDelta(_i, q)*KroneckerDelta(p, q) + NO(CreateFermion(p)*AnnihilateFermion(q)) By default, the expression is expanded: >>> wicks(F(p)*(F(q)+F(r))) NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(AnnihilateFermion(p)*AnnihilateFermion(r)) With the keyword 'keep_only_fully_contracted=True', only fully contracted terms are returned. By request, the result can be simplified in the following order: -- KroneckerDelta functions are evaluated -- Dummy variables are substituted consistently across terms >>> p, q, r = symbols('p q r', cls=Dummy) >>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True) KroneckerDelta(_i, _q)*KroneckerDelta(_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r) """ if not e: return S.Zero opts = { 'simplify_kronecker_deltas': False, 'expand': True, 'simplify_dummies': False, 'keep_only_fully_contracted': False } opts.update(kw_args) # check if we are already normally ordered if isinstance(e, NO): if opts['keep_only_fully_contracted']: return S.Zero else: return e elif isinstance(e, FermionicOperator): if opts['keep_only_fully_contracted']: return S.Zero else: return e # break up any NO-objects, and evaluate commutators e = e.doit(wicks=True) # make sure we have only one term to consider e = e.expand() if isinstance(e, Add): if opts['simplify_dummies']: return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args])) else: return Add(*[ wicks(term, **kw_args) for term in e.args]) # For Mul-objects we can actually do something if isinstance(e, Mul): # we don't want to mess around with commuting part of Mul # so we factorize it out before starting recursion c_part = [] string1 = [] for factor in e.args: if factor.is_commutative: c_part.append(factor) else: string1.append(factor) n = len(string1) # catch trivial cases if n == 0: result = e elif n == 1: if opts['keep_only_fully_contracted']: return S.Zero else: result = e else: # non-trivial if isinstance(string1[0], BosonicOperator): raise NotImplementedError string1 = tuple(string1) # recursion over higher order contractions result = _get_contractions(string1, keep_only_fully_contracted=opts['keep_only_fully_contracted'] ) result = Mul(*c_part)*result if opts['expand']: result = result.expand() if opts['simplify_kronecker_deltas']: result = evaluate_deltas(result) return result # there was nothing to do return e class PermutationOperator(Expr): """ Represents the index permutation operator P(ij). P(ij)*f(i)*g(j) = f(i)*g(j) - f(j)*g(i) """ is_commutative = True def __new__(cls, i, j): i, j = sorted(map(sympify, (i, j)), key=default_sort_key) obj = Basic.__new__(cls, i, j) return obj def get_permuted(self, expr): """ Returns -expr with permuted indices. Explanation =========== >>> from sympy import symbols, Function >>> from sympy.physics.secondquant import PermutationOperator >>> p,q = symbols('p,q') >>> f = Function('f') >>> PermutationOperator(p,q).get_permuted(f(p,q)) -f(q, p) """ i = self.args[0] j = self.args[1] if expr.has(i) and expr.has(j): tmp = Dummy() expr = expr.subs(i, tmp) expr = expr.subs(j, i) expr = expr.subs(tmp, j) return S.NegativeOne*expr else: return expr def _latex(self, printer): return "P(%s%s)" % self.args def simplify_index_permutations(expr, permutation_operators): """ Performs simplification by introducing PermutationOperators where appropriate. Explanation =========== Schematically: [abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij] permutation_operators is a list of PermutationOperators to consider. If permutation_operators=[P(ab),P(ij)] we will try to introduce the permutation operators P(ij) and P(ab) in the expression. If there are other possible simplifications, we ignore them. >>> from sympy import symbols, Function >>> from sympy.physics.secondquant import simplify_index_permutations >>> from sympy.physics.secondquant import PermutationOperator >>> p,q,r,s = symbols('p,q,r,s') >>> f = Function('f') >>> g = Function('g') >>> expr = f(p)*g(q) - f(q)*g(p); expr f(p)*g(q) - f(q)*g(p) >>> simplify_index_permutations(expr,[PermutationOperator(p,q)]) f(p)*g(q)*PermutationOperator(p, q) >>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)] >>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r) >>> simplify_index_permutations(expr,PermutList) f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s) """ def _get_indices(expr, ind): """ Collects indices recursively in predictable order. """ result = [] for arg in expr.args: if arg in ind: result.append(arg) else: if arg.args: result.extend(_get_indices(arg, ind)) return result def _choose_one_to_keep(a, b, ind): # we keep the one where indices in ind are in order ind[0] < ind[1] return min(a, b, key=lambda x: default_sort_key(_get_indices(x, ind))) expr = expr.expand() if isinstance(expr, Add): terms = set(expr.args) for P in permutation_operators: new_terms = set() on_hold = set() while terms: term = terms.pop() permuted = P.get_permuted(term) if permuted in terms | on_hold: try: terms.remove(permuted) except KeyError: on_hold.remove(permuted) keep = _choose_one_to_keep(term, permuted, P.args) new_terms.add(P*keep) else: # Some terms must get a second chance because the permuted # term may already have canonical dummy ordering. Then # substitute_dummies() does nothing. However, the other # term, if it exists, will be able to match with us. permuted1 = permuted permuted = substitute_dummies(permuted) if permuted1 == permuted: on_hold.add(term) elif permuted in terms | on_hold: try: terms.remove(permuted) except KeyError: on_hold.remove(permuted) keep = _choose_one_to_keep(term, permuted, P.args) new_terms.add(P*keep) else: new_terms.add(term) terms = new_terms | on_hold return Add(*terms) return expr
17db0c1067f412b52c9461f3847d005040e84771dc3e10e1a2777a15db15ad49
from sympy import sqrt, exp, S, pi, I from sympy.physics.quantum.constants import hbar def wavefunction(n, x): """ Returns the wavefunction for particle on ring. Parameters ========== n : The quantum number. Here ``n`` can be positive as well as negative which can be used to describe the direction of motion of particle. x : The angle. Examples ======== >>> from sympy.physics.pring import wavefunction >>> from sympy import Symbol, integrate, pi >>> x=Symbol("x") >>> wavefunction(1, x) sqrt(2)*exp(I*x)/(2*sqrt(pi)) >>> wavefunction(2, x) sqrt(2)*exp(2*I*x)/(2*sqrt(pi)) >>> wavefunction(3, x) sqrt(2)*exp(3*I*x)/(2*sqrt(pi)) The normalization of the wavefunction is: >>> integrate(wavefunction(2, x)*wavefunction(-2, x), (x, 0, 2*pi)) 1 >>> integrate(wavefunction(4, x)*wavefunction(-4, x), (x, 0, 2*pi)) 1 References ========== .. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum Mechanics (4th ed.). Pages 71-73. """ # sympify arguments n, x = S(n), S(x) return exp(n * I * x) / sqrt(2 * pi) def energy(n, m, r): """ Returns the energy of the state corresponding to quantum number ``n``. E=(n**2 * (hcross)**2) / (2 * m * r**2) Parameters ========== n : The quantum number. m : Mass of the particle. r : Radius of circle. Examples ======== >>> from sympy.physics.pring import energy >>> from sympy import Symbol >>> m=Symbol("m") >>> r=Symbol("r") >>> energy(1, m, r) hbar**2/(2*m*r**2) >>> energy(2, m, r) 2*hbar**2/(m*r**2) >>> energy(-2, 2.0, 3.0) 0.111111111111111*hbar**2 References ========== .. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum Mechanics (4th ed.). Pages 71-73. """ n, m, r = S(n), S(m), S(r) if n.is_integer: return (n**2 * hbar**2) / (2 * m * r**2) else: raise ValueError("'n' must be integer")
352194aebb36a72a55b5f1de3ba03e5a869f29d9f12f3d0b6b9d96849e6b84c6
"""A module to manipulate symbolic objects with indices including tensors """ from .indexed import IndexedBase, Idx, Indexed from .index_methods import get_contraction_structure, get_indices from .array import (MutableDenseNDimArray, ImmutableDenseNDimArray, MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray, tensorproduct, tensorcontraction, tensordiagonal, derive_by_array, permutedims, Array, DenseNDimArray, SparseNDimArray,) __all__ = [ 'IndexedBase', 'Idx', 'Indexed', 'get_contraction_structure', 'get_indices', 'MutableDenseNDimArray', 'ImmutableDenseNDimArray', 'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray', 'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array', 'permutedims', 'Array', 'DenseNDimArray', 'SparseNDimArray', ]
611317aa8f615a79562cf8a1772ee7fc0c66e9eefc3844f3362eaad5e31c6798
""" This module defines tensors with abstract index notation. The abstract index notation has been first formalized by Penrose. Tensor indices are formal objects, with a tensor type; there is no notion of index range, it is only possible to assign the dimension, used to trace the Kronecker delta; the dimension can be a Symbol. The Einstein summation convention is used. The covariant indices are indicated with a minus sign in front of the index. For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c`` contracted. A tensor expression ``t`` can be called; called with its indices in sorted order it is equal to itself: in the above example ``t(a, b) == t``; one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``. The contracted indices are dummy indices, internally they have no name, the indices being represented by a graph-like structure. Tensors are put in canonical form using ``canon_bp``, which uses the Butler-Portugal algorithm for canonicalization using the monoterm symmetries of the tensors. If there is a (anti)symmetric metric, the indices can be raised and lowered when the tensor is put in canonical form. """ from typing import Any, Dict as tDict, List, Set from functools import reduce from abc import abstractmethod, ABCMeta from collections import defaultdict import operator import itertools from sympy import Rational, prod, Integer, default_sort_key from sympy.combinatorics import Permutation from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \ bsgs_direct_product, canonicalize, riemann_bsgs from sympy.core import Basic, Expr, sympify, Add, Mul, S from sympy.core.assumptions import ManagedProperties from sympy.core.compatibility import SYMPY_INTS from sympy.core.containers import Tuple, Dict from sympy.core.decorators import deprecated from sympy.core.symbol import Symbol, symbols from sympy.core.sympify import CantSympify, _sympify from sympy.core.operations import AssocOp from sympy.matrices import eye from sympy.utilities.exceptions import SymPyDeprecationWarning from sympy.utilities.decorator import memoize_property import warnings @deprecated(useinstead=".replace_with_arrays", issue=15276, deprecated_since_version="1.4") def deprecate_data(): pass @deprecated(useinstead=".substitute_indices()", issue=17515, deprecated_since_version="1.5") def deprecate_fun_eval(): pass @deprecated(useinstead="tensor_heads()", issue=17108, deprecated_since_version="1.5") def deprecate_TensorType(): pass class _IndexStructure(CantSympify): """ This class handles the indices (free and dummy ones). It contains the algorithms to manage the dummy indices replacements and contractions of free indices under multiplications of tensor expressions, as well as stuff related to canonicalization sorting, getting the permutation of the expression and so on. It also includes tools to get the ``TensorIndex`` objects corresponding to the given index structure. """ def __init__(self, free, dum, index_types, indices, canon_bp=False): self.free = free self.dum = dum self.index_types = index_types self.indices = indices self._ext_rank = len(self.free) + 2*len(self.dum) self.dum.sort(key=lambda x: x[0]) @staticmethod def from_indices(*indices): """ Create a new ``_IndexStructure`` object from a list of ``indices`` ``indices`` ``TensorIndex`` objects, the indices. Contractions are detected upon construction. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, _IndexStructure >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) >>> _IndexStructure.from_indices(m0, m1, -m1, m3) _IndexStructure([(m0, 0), (m3, 3)], [(1, 2)], [Lorentz, Lorentz, Lorentz, Lorentz]) """ free, dum = _IndexStructure._free_dum_from_indices(*indices) index_types = [i.tensor_index_type for i in indices] indices = _IndexStructure._replace_dummy_names(indices, free, dum) return _IndexStructure(free, dum, index_types, indices) @staticmethod def from_components_free_dum(components, free, dum): index_types = [] for component in components: index_types.extend(component.index_types) indices = _IndexStructure.generate_indices_from_free_dum_index_types(free, dum, index_types) return _IndexStructure(free, dum, index_types, indices) @staticmethod def _free_dum_from_indices(*indices): """ Convert ``indices`` into ``free``, ``dum`` for single component tensor ``free`` list of tuples ``(index, pos, 0)``, where ``pos`` is the position of index in the list of indices formed by the component tensors ``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)`` Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, \ _IndexStructure >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) >>> _IndexStructure._free_dum_from_indices(m0, m1, -m1, m3) ([(m0, 0), (m3, 3)], [(1, 2)]) """ n = len(indices) if n == 1: return [(indices[0], 0)], [] # find the positions of the free indices and of the dummy indices free = [True]*len(indices) index_dict = {} dum = [] for i, index in enumerate(indices): name = index.name typ = index.tensor_index_type contr = index.is_up if (name, typ) in index_dict: # found a pair of dummy indices is_contr, pos = index_dict[(name, typ)] # check consistency and update free if is_contr: if contr: raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i)) else: free[pos] = False free[i] = False else: if contr: free[pos] = False free[i] = False else: raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i)) if contr: dum.append((i, pos)) else: dum.append((pos, i)) else: index_dict[(name, typ)] = index.is_up, i free = [(index, i) for i, index in enumerate(indices) if free[i]] free.sort() return free, dum def get_indices(self): """ Get a list of indices, creating new tensor indices to complete dummy indices. """ return self.indices[:] @staticmethod def generate_indices_from_free_dum_index_types(free, dum, index_types): indices = [None]*(len(free)+2*len(dum)) for idx, pos in free: indices[pos] = idx generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) for pos1, pos2 in dum: typ1 = index_types[pos1] indname = generate_dummy_name(typ1) indices[pos1] = TensorIndex(indname, typ1, True) indices[pos2] = TensorIndex(indname, typ1, False) return _IndexStructure._replace_dummy_names(indices, free, dum) @staticmethod def _get_generator_for_dummy_indices(free): cdt = defaultdict(int) # if the free indices have names with dummy_name, start with an # index higher than those for the dummy indices # to avoid name collisions for indx, ipos in free: if indx.name.split('_')[0] == indx.tensor_index_type.dummy_name: cdt[indx.tensor_index_type] = max(cdt[indx.tensor_index_type], int(indx.name.split('_')[1]) + 1) def dummy_name_gen(tensor_index_type): nd = str(cdt[tensor_index_type]) cdt[tensor_index_type] += 1 return tensor_index_type.dummy_name + '_' + nd return dummy_name_gen @staticmethod def _replace_dummy_names(indices, free, dum): dum.sort(key=lambda x: x[0]) new_indices = [ind for ind in indices] assert len(indices) == len(free) + 2*len(dum) generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) for ipos1, ipos2 in dum: typ1 = new_indices[ipos1].tensor_index_type indname = generate_dummy_name(typ1) new_indices[ipos1] = TensorIndex(indname, typ1, True) new_indices[ipos2] = TensorIndex(indname, typ1, False) return new_indices def get_free_indices(self): # type: () -> List[TensorIndex] """ Get a list of free indices. """ # get sorted indices according to their position: free = sorted(self.free, key=lambda x: x[1]) return [i[0] for i in free] def __str__(self): return "_IndexStructure({}, {}, {})".format(self.free, self.dum, self.index_types) def __repr__(self): return self.__str__() def _get_sorted_free_indices_for_canon(self): sorted_free = self.free[:] sorted_free.sort(key=lambda x: x[0]) return sorted_free def _get_sorted_dum_indices_for_canon(self): return sorted(self.dum, key=lambda x: x[0]) def _get_lexicographically_sorted_index_types(self): permutation = self.indices_canon_args()[0] index_types = [None]*self._ext_rank for i, it in enumerate(self.index_types): index_types[permutation(i)] = it return index_types def _get_lexicographically_sorted_indices(self): permutation = self.indices_canon_args()[0] indices = [None]*self._ext_rank for i, it in enumerate(self.indices): indices[permutation(i)] = it return indices def perm2tensor(self, g, is_canon_bp=False): """ Returns a ``_IndexStructure`` instance corresponding to the permutation ``g`` ``g`` permutation corresponding to the tensor in the representation used in canonicalization ``is_canon_bp`` if True, then ``g`` is the permutation corresponding to the canonical form of the tensor """ sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()] lex_index_types = self._get_lexicographically_sorted_index_types() lex_indices = self._get_lexicographically_sorted_indices() nfree = len(sorted_free) rank = self._ext_rank dum = [[None]*2 for i in range((rank - nfree)//2)] free = [] index_types = [None]*rank indices = [None]*rank for i in range(rank): gi = g[i] index_types[i] = lex_index_types[gi] indices[i] = lex_indices[gi] if gi < nfree: ind = sorted_free[gi] assert index_types[i] == sorted_free[gi].tensor_index_type free.append((ind, i)) else: j = gi - nfree idum, cov = divmod(j, 2) if cov: dum[idum][1] = i else: dum[idum][0] = i dum = [tuple(x) for x in dum] return _IndexStructure(free, dum, index_types, indices) def indices_canon_args(self): """ Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize`` see ``canonicalize`` in ``tensor_can.py`` in combinatorics module """ # to be called after sorted_components from sympy.combinatorics.permutations import _af_new n = self._ext_rank g = [None]*n + [n, n+1] # Converts the symmetry of the metric into msym from .canonicalize() # method in the combinatorics module def metric_symmetry_to_msym(metric): if metric is None: return None sym = metric.symmetry if sym == TensorSymmetry.fully_symmetric(2): return 0 if sym == TensorSymmetry.fully_symmetric(-2): return 1 return None # ordered indices: first the free indices, ordered by types # then the dummy indices, ordered by types and contravariant before # covariant # g[position in tensor] = position in ordered indices for i, (indx, ipos) in enumerate(self._get_sorted_free_indices_for_canon()): g[ipos] = i pos = len(self.free) j = len(self.free) dummies = [] prev = None a = [] msym = [] for ipos1, ipos2 in self._get_sorted_dum_indices_for_canon(): g[ipos1] = j g[ipos2] = j + 1 j += 2 typ = self.index_types[ipos1] if typ != prev: if a: dummies.append(a) a = [pos, pos + 1] prev = typ msym.append(metric_symmetry_to_msym(typ.metric)) else: a.extend([pos, pos + 1]) pos += 2 if a: dummies.append(a) return _af_new(g), dummies, msym def components_canon_args(components): numtyp = [] prev = None for t in components: if t == prev: numtyp[-1][1] += 1 else: prev = t numtyp.append([prev, 1]) v = [] for h, n in numtyp: if h.comm == 0 or h.comm == 1: comm = h.comm else: comm = TensorManager.get_comm(h.comm, h.comm) v.append((h.symmetry.base, h.symmetry.generators, n, comm)) return v class _TensorDataLazyEvaluator(CantSympify): """ EXPERIMENTAL: do not rely on this class, it may change without deprecation warnings in future versions of SymPy. This object contains the logic to associate components data to a tensor expression. Components data are set via the ``.data`` property of tensor expressions, is stored inside this class as a mapping between the tensor expression and the ``ndarray``. Computations are executed lazily: whereas the tensor expressions can have contractions, tensor products, and additions, components data are not computed until they are accessed by reading the ``.data`` property associated to the tensor expression. """ _substitutions_dict = dict() # type: tDict[Any, Any] _substitutions_dict_tensmul = dict() # type: tDict[Any, Any] def __getitem__(self, key): dat = self._get(key) if dat is None: return None from .array import NDimArray if not isinstance(dat, NDimArray): return dat if dat.rank() == 0: return dat[()] elif dat.rank() == 1 and len(dat) == 1: return dat[0] return dat def _get(self, key): """ Retrieve ``data`` associated with ``key``. This algorithm looks into ``self._substitutions_dict`` for all ``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a TensorHead instance). It reconstructs the components data that the tensor expression should have by performing on components data the operations that correspond to the abstract tensor operations applied. Metric tensor is handled in a different manner: it is pre-computed in ``self._substitutions_dict_tensmul``. """ if key in self._substitutions_dict: return self._substitutions_dict[key] if isinstance(key, TensorHead): return None if isinstance(key, Tensor): # special case to handle metrics. Metric tensors cannot be # constructed through contraction by the metric, their # components show if they are a matrix or its inverse. signature = tuple([i.is_up for i in key.get_indices()]) srch = (key.component,) + signature if srch in self._substitutions_dict_tensmul: return self._substitutions_dict_tensmul[srch] array_list = [self.data_from_tensor(key)] return self.data_contract_dum(array_list, key.dum, key.ext_rank) if isinstance(key, TensMul): tensmul_args = key.args if len(tensmul_args) == 1 and len(tensmul_args[0].components) == 1: # special case to handle metrics. Metric tensors cannot be # constructed through contraction by the metric, their # components show if they are a matrix or its inverse. signature = tuple([i.is_up for i in tensmul_args[0].get_indices()]) srch = (tensmul_args[0].components[0],) + signature if srch in self._substitutions_dict_tensmul: return self._substitutions_dict_tensmul[srch] #data_list = [self.data_from_tensor(i) for i in tensmul_args if isinstance(i, TensExpr)] data_list = [self.data_from_tensor(i) if isinstance(i, Tensor) else i.data for i in tensmul_args if isinstance(i, TensExpr)] coeff = prod([i for i in tensmul_args if not isinstance(i, TensExpr)]) if all([i is None for i in data_list]): return None if any([i is None for i in data_list]): raise ValueError("Mixing tensors with associated components "\ "data with tensors without components data") data_result = self.data_contract_dum(data_list, key.dum, key.ext_rank) return coeff*data_result if isinstance(key, TensAdd): data_list = [] free_args_list = [] for arg in key.args: if isinstance(arg, TensExpr): data_list.append(arg.data) free_args_list.append([x[0] for x in arg.free]) else: data_list.append(arg) free_args_list.append([]) if all([i is None for i in data_list]): return None if any([i is None for i in data_list]): raise ValueError("Mixing tensors with associated components "\ "data with tensors without components data") sum_list = [] from .array import permutedims for data, free_args in zip(data_list, free_args_list): if len(free_args) < 2: sum_list.append(data) else: free_args_pos = {y: x for x, y in enumerate(free_args)} axes = [free_args_pos[arg] for arg in key.free_args] sum_list.append(permutedims(data, axes)) return reduce(lambda x, y: x+y, sum_list) return None @staticmethod def data_contract_dum(ndarray_list, dum, ext_rank): from .array import tensorproduct, tensorcontraction, MutableDenseNDimArray arrays = list(map(MutableDenseNDimArray, ndarray_list)) prodarr = tensorproduct(*arrays) return tensorcontraction(prodarr, *dum) def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead): """ This method is used when assigning components data to a ``TensMul`` object, it converts components data to a fully contravariant ndarray, which is then stored according to the ``TensorHead`` key. """ if data is None: return None return self._correct_signature_from_indices( data, tensmul.get_indices(), tensmul.free, tensmul.dum, True) def data_from_tensor(self, tensor): """ This method corrects the components data to the right signature (covariant/contravariant) using the metric associated with each ``TensorIndexType``. """ tensorhead = tensor.component if tensorhead.data is None: return None return self._correct_signature_from_indices( tensorhead.data, tensor.get_indices(), tensor.free, tensor.dum) def _assign_data_to_tensor_expr(self, key, data): if isinstance(key, TensAdd): raise ValueError('cannot assign data to TensAdd') # here it is assumed that `key` is a `TensMul` instance. if len(key.components) != 1: raise ValueError('cannot assign data to TensMul with multiple components') tensorhead = key.components[0] newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead) return tensorhead, newdata def _check_permutations_on_data(self, tens, data): from .array import permutedims from .array.arrayop import Flatten if isinstance(tens, TensorHead): rank = tens.rank generators = tens.symmetry.generators elif isinstance(tens, Tensor): rank = tens.rank generators = tens.components[0].symmetry.generators elif isinstance(tens, TensorIndexType): rank = tens.metric.rank generators = tens.metric.symmetry.generators # Every generator is a permutation, check that by permuting the array # by that permutation, the array will be the same, except for a # possible sign change if the permutation admits it. for gener in generators: sign_change = +1 if (gener(rank) == rank) else -1 data_swapped = data last_data = data permute_axes = list(map(gener, list(range(rank)))) # the order of a permutation is the number of times to get the # identity by applying that permutation. for i in range(gener.order()-1): data_swapped = permutedims(data_swapped, permute_axes) # if any value in the difference array is non-zero, raise an error: if any(Flatten(last_data - sign_change*data_swapped)): raise ValueError("Component data symmetry structure error") last_data = data_swapped def __setitem__(self, key, value): """ Set the components data of a tensor object/expression. Components data are transformed to the all-contravariant form and stored with the corresponding ``TensorHead`` object. If a ``TensorHead`` object cannot be uniquely identified, it will raise an error. """ data = _TensorDataLazyEvaluator.parse_data(value) self._check_permutations_on_data(key, data) # TensorHead and TensorIndexType can be assigned data directly, while # TensMul must first convert data to a fully contravariant form, and # assign it to its corresponding TensorHead single component. if not isinstance(key, (TensorHead, TensorIndexType)): key, data = self._assign_data_to_tensor_expr(key, data) if isinstance(key, TensorHead): for dim, indextype in zip(data.shape, key.index_types): if indextype.data is None: raise ValueError("index type {} has no components data"\ " associated (needed to raise/lower index)".format(indextype)) if not indextype.dim.is_number: continue if dim != indextype.dim: raise ValueError("wrong dimension of ndarray") self._substitutions_dict[key] = data def __delitem__(self, key): del self._substitutions_dict[key] def __contains__(self, key): return key in self._substitutions_dict def add_metric_data(self, metric, data): """ Assign data to the ``metric`` tensor. The metric tensor behaves in an anomalous way when raising and lowering indices. A fully covariant metric is the inverse transpose of the fully contravariant metric (it is meant matrix inverse). If the metric is symmetric, the transpose is not necessary and mixed covariant/contravariant metrics are Kronecker deltas. """ # hard assignment, data should not be added to `TensorHead` for metric: # the problem with `TensorHead` is that the metric is anomalous, i.e. # raising and lowering the index means considering the metric or its # inverse, this is not the case for other tensors. self._substitutions_dict_tensmul[metric, True, True] = data inverse_transpose = self.inverse_transpose_matrix(data) # in symmetric spaces, the transpose is the same as the original matrix, # the full covariant metric tensor is the inverse transpose, so this # code will be able to handle non-symmetric metrics. self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose # now mixed cases, these are identical to the unit matrix if the metric # is symmetric. m = data.tomatrix() invt = inverse_transpose.tomatrix() self._substitutions_dict_tensmul[metric, True, False] = m * invt self._substitutions_dict_tensmul[metric, False, True] = invt * m @staticmethod def _flip_index_by_metric(data, metric, pos): from .array import tensorproduct, tensorcontraction mdim = metric.rank() ddim = data.rank() if pos == 0: data = tensorcontraction( tensorproduct( metric, data ), (1, mdim+pos) ) else: data = tensorcontraction( tensorproduct( data, metric ), (pos, ddim) ) return data @staticmethod def inverse_matrix(ndarray): m = ndarray.tomatrix().inv() return _TensorDataLazyEvaluator.parse_data(m) @staticmethod def inverse_transpose_matrix(ndarray): m = ndarray.tomatrix().inv().T return _TensorDataLazyEvaluator.parse_data(m) @staticmethod def _correct_signature_from_indices(data, indices, free, dum, inverse=False): """ Utility function to correct the values inside the components data ndarray according to whether indices are covariant or contravariant. It uses the metric matrix to lower values of covariant indices. """ # change the ndarray values according covariantness/contravariantness of the indices # use the metric for i, indx in enumerate(indices): if not indx.is_up and not inverse: data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx.tensor_index_type.data, i) elif not indx.is_up and inverse: data = _TensorDataLazyEvaluator._flip_index_by_metric( data, _TensorDataLazyEvaluator.inverse_matrix(indx.tensor_index_type.data), i ) return data @staticmethod def _sort_data_axes(old, new): from .array import permutedims new_data = old.data.copy() old_free = [i[0] for i in old.free] new_free = [i[0] for i in new.free] for i in range(len(new_free)): for j in range(i, len(old_free)): if old_free[j] == new_free[i]: old_free[i], old_free[j] = old_free[j], old_free[i] new_data = permutedims(new_data, (i, j)) break return new_data @staticmethod def add_rearrange_tensmul_parts(new_tensmul, old_tensmul): def sorted_compo(): return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul) _TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo() @staticmethod def parse_data(data): """ Transform ``data`` to array. The parameter ``data`` may contain data in various formats, e.g. nested lists, sympy ``Matrix``, and so on. Examples ======== >>> from sympy.tensor.tensor import _TensorDataLazyEvaluator >>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12]) [1, 3, -6, 12] >>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]]) [[1, 2], [4, 7]] """ from .array import MutableDenseNDimArray if not isinstance(data, MutableDenseNDimArray): if len(data) == 2 and hasattr(data[0], '__call__'): data = MutableDenseNDimArray(data[0], data[1]) else: data = MutableDenseNDimArray(data) return data _tensor_data_substitution_dict = _TensorDataLazyEvaluator() class _TensorManager: """ Class to manage tensor properties. Notes ===== Tensors belong to tensor commutation groups; each group has a label ``comm``; there are predefined labels: ``0`` tensors commuting with any other tensor ``1`` tensors anticommuting among themselves ``2`` tensors not commuting, apart with those with ``comm=0`` Other groups can be defined using ``set_comm``; tensors in those groups commute with those with ``comm=0``; by default they do not commute with any other group. """ def __init__(self): self._comm_init() def _comm_init(self): self._comm = [{} for i in range(3)] for i in range(3): self._comm[0][i] = 0 self._comm[i][0] = 0 self._comm[1][1] = 1 self._comm[2][1] = None self._comm[1][2] = None self._comm_symbols2i = {0:0, 1:1, 2:2} self._comm_i2symbol = {0:0, 1:1, 2:2} @property def comm(self): return self._comm def comm_symbols2i(self, i): """ get the commutation group number corresponding to ``i`` ``i`` can be a symbol or a number or a string If ``i`` is not already defined its commutation group number is set. """ if i not in self._comm_symbols2i: n = len(self._comm) self._comm.append({}) self._comm[n][0] = 0 self._comm[0][n] = 0 self._comm_symbols2i[i] = n self._comm_i2symbol[n] = i return n return self._comm_symbols2i[i] def comm_i2symbol(self, i): """ Returns the symbol corresponding to the commutation group number. """ return self._comm_i2symbol[i] def set_comm(self, i, j, c): """ set the commutation parameter ``c`` for commutation groups ``i, j`` Parameters ========== i, j : symbols representing commutation groups c : group commutation number Notes ===== ``i, j`` can be symbols, strings or numbers, apart from ``0, 1`` and ``2`` which are reserved respectively for commuting, anticommuting tensors and tensors not commuting with any other group apart with the commuting tensors. For the remaining cases, use this method to set the commutation rules; by default ``c=None``. The group commutation number ``c`` is assigned in correspondence to the group commutation symbols; it can be 0 commuting 1 anticommuting None no commutation property Examples ======== ``G`` and ``GH`` do not commute with themselves and commute with each other; A is commuting. >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorManager, TensorSymmetry >>> Lorentz = TensorIndexType('Lorentz') >>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz) >>> A = TensorHead('A', [Lorentz]) >>> G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm') >>> GH = TensorHead('GH', [Lorentz], TensorSymmetry.no_symmetry(1), 'GHcomm') >>> TensorManager.set_comm('Gcomm', 'GHcomm', 0) >>> (GH(i1)*G(i0)).canon_bp() G(i0)*GH(i1) >>> (G(i1)*G(i0)).canon_bp() G(i1)*G(i0) >>> (G(i1)*A(i0)).canon_bp() A(i0)*G(i1) """ if c not in (0, 1, None): raise ValueError('`c` can assume only the values 0, 1 or None') if i not in self._comm_symbols2i: n = len(self._comm) self._comm.append({}) self._comm[n][0] = 0 self._comm[0][n] = 0 self._comm_symbols2i[i] = n self._comm_i2symbol[n] = i if j not in self._comm_symbols2i: n = len(self._comm) self._comm.append({}) self._comm[0][n] = 0 self._comm[n][0] = 0 self._comm_symbols2i[j] = n self._comm_i2symbol[n] = j ni = self._comm_symbols2i[i] nj = self._comm_symbols2i[j] self._comm[ni][nj] = c self._comm[nj][ni] = c def set_comms(self, *args): """ set the commutation group numbers ``c`` for symbols ``i, j`` Parameters ========== args : sequence of ``(i, j, c)`` """ for i, j, c in args: self.set_comm(i, j, c) def get_comm(self, i, j): """ Return the commutation parameter for commutation group numbers ``i, j`` see ``_TensorManager.set_comm`` """ return self._comm[i].get(j, 0 if i == 0 or j == 0 else None) def clear(self): """ Clear the TensorManager. """ self._comm_init() TensorManager = _TensorManager() class TensorIndexType(Basic): """ A TensorIndexType is characterized by its name and its metric. Parameters ========== name : name of the tensor type dummy_name : name of the head of dummy indices dim : dimension, it can be a symbol or an integer or ``None`` eps_dim : dimension of the epsilon tensor metric_symmetry : integer that denotes metric symmetry or ``None`` for no metirc metric_name : string with the name of the metric tensor Attributes ========== ``metric`` : the metric tensor ``delta`` : ``Kronecker delta`` ``epsilon`` : the ``Levi-Civita epsilon`` tensor ``data`` : (deprecated) a property to add ``ndarray`` values, to work in a specified basis. Notes ===== The possible values of the ``metric_symmetry`` parameter are: ``1`` : metric tensor is fully symmetric ``0`` : metric tensor possesses no index symmetry ``-1`` : metric tensor is fully antisymmetric ``None``: there is no metric tensor (metric equals to ``None``) The metric is assumed to be symmetric by default. It can also be set to a custom tensor by the ``.set_metric()`` method. If there is a metric the metric is used to raise and lower indices. In the case of non-symmetric metric, the following raising and lowering conventions will be adopted: ``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)`` From these it is easy to find: ``g(-a, b) = delta(-a, b)`` where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta`` (see ``TensorIndex`` for the conventions on indices). For antisymmetric metrics there is also the following equality: ``g(a, -b) = -delta(a, -b)`` If there is no metric it is not possible to raise or lower indices; e.g. the index of the defining representation of ``SU(N)`` is 'covariant' and the conjugate representation is 'contravariant'; for ``N > 2`` they are linearly independent. ``eps_dim`` is by default equal to ``dim``, if the latter is an integer; else it can be assigned (for use in naive dimensional regularization); if ``eps_dim`` is not an integer ``epsilon`` is ``None``. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> Lorentz.metric metric(Lorentz,Lorentz) """ def __new__(cls, name, dummy_name=None, dim=None, eps_dim=None, metric_symmetry=1, metric_name='metric', **kwargs): if 'dummy_fmt' in kwargs: SymPyDeprecationWarning(useinstead="dummy_name", feature="dummy_fmt", issue=17517, deprecated_since_version="1.5").warn() dummy_name = kwargs.get('dummy_fmt') if isinstance(name, str): name = Symbol(name) if dummy_name is None: dummy_name = str(name)[0] if isinstance(dummy_name, str): dummy_name = Symbol(dummy_name) if dim is None: dim = Symbol("dim_" + dummy_name.name) else: dim = sympify(dim) if eps_dim is None: eps_dim = dim else: eps_dim = sympify(eps_dim) metric_symmetry = sympify(metric_symmetry) if isinstance(metric_name, str): metric_name = Symbol(metric_name) if 'metric' in kwargs: SymPyDeprecationWarning(useinstead="metric_symmetry or .set_metric()", feature="metric argument", issue=17517, deprecated_since_version="1.5").warn() metric = kwargs.get('metric') if metric is not None: if metric in (True, False, 0, 1): metric_name = 'metric' #metric_antisym = metric else: metric_name = metric.name #metric_antisym = metric.antisym if metric: metric_symmetry = -1 else: metric_symmetry = 1 obj = Basic.__new__(cls, name, dummy_name, dim, eps_dim, metric_symmetry, metric_name) obj._autogenerated = [] return obj @property def name(self): return self.args[0].name @property def dummy_name(self): return self.args[1].name @property def dim(self): return self.args[2] @property def eps_dim(self): return self.args[3] @memoize_property def metric(self): metric_symmetry = self.args[4] metric_name = self.args[5] if metric_symmetry is None: return None if metric_symmetry == 0: symmetry = TensorSymmetry.no_symmetry(2) elif metric_symmetry == 1: symmetry = TensorSymmetry.fully_symmetric(2) elif metric_symmetry == -1: symmetry = TensorSymmetry.fully_symmetric(-2) return TensorHead(metric_name, [self]*2, symmetry) @memoize_property def delta(self): return TensorHead('KD', [self]*2, TensorSymmetry.fully_symmetric(2)) @memoize_property def epsilon(self): if not isinstance(self.eps_dim, (SYMPY_INTS, Integer)): return None symmetry = TensorSymmetry.fully_symmetric(-self.eps_dim) return TensorHead('Eps', [self]*self.eps_dim, symmetry) def set_metric(self, tensor): self._metric = tensor def __lt__(self, other): return self.name < other.name def __str__(self): return self.name __repr__ = __str__ # Everything below this line is deprecated @property def data(self): deprecate_data() return _tensor_data_substitution_dict[self] @data.setter def data(self, data): deprecate_data() # This assignment is a bit controversial, should metric components be assigned # to the metric only or also to the TensorIndexType object? The advantage here # is the ability to assign a 1D array and transform it to a 2D diagonal array. from .array import MutableDenseNDimArray data = _TensorDataLazyEvaluator.parse_data(data) if data.rank() > 2: raise ValueError("data have to be of rank 1 (diagonal metric) or 2.") if data.rank() == 1: if self.dim.is_number: nda_dim = data.shape[0] if nda_dim != self.dim: raise ValueError("Dimension mismatch") dim = data.shape[0] newndarray = MutableDenseNDimArray.zeros(dim, dim) for i, val in enumerate(data): newndarray[i, i] = val data = newndarray dim1, dim2 = data.shape if dim1 != dim2: raise ValueError("Non-square matrix tensor.") if self.dim.is_number: if self.dim != dim1: raise ValueError("Dimension mismatch") _tensor_data_substitution_dict[self] = data _tensor_data_substitution_dict.add_metric_data(self.metric, data) delta = self.get_kronecker_delta() i1 = TensorIndex('i1', self) i2 = TensorIndex('i2', self) delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1)) @data.deleter def data(self): deprecate_data() if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] if self.metric in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self.metric] @deprecated(useinstead=".delta", issue=17517, deprecated_since_version="1.5") def get_kronecker_delta(self): sym2 = TensorSymmetry(get_symmetric_group_sgs(2)) delta = TensorHead('KD', [self]*2, sym2) return delta @deprecated(useinstead=".delta", issue=17517, deprecated_since_version="1.5") def get_epsilon(self): if not isinstance(self._eps_dim, (SYMPY_INTS, Integer)): return None sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1)) epsilon = TensorHead('Eps', [self]*self._eps_dim, sym) return epsilon def _components_data_full_destroy(self): """ EXPERIMENTAL: do not rely on this API method. This destroys components data associated to the ``TensorIndexType``, if any, specifically: * metric tensor data * Kronecker tensor data """ if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] def delete_tensmul_data(key): if key in _tensor_data_substitution_dict._substitutions_dict_tensmul: del _tensor_data_substitution_dict._substitutions_dict_tensmul[key] # delete metric data: delete_tensmul_data((self.metric, True, True)) delete_tensmul_data((self.metric, True, False)) delete_tensmul_data((self.metric, False, True)) delete_tensmul_data((self.metric, False, False)) # delete delta tensor data: delta = self.get_kronecker_delta() if delta in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[delta] class TensorIndex(Basic): """ Represents a tensor index Parameters ========== name : name of the index, or ``True`` if you want it to be automatically assigned tensor_index_type : ``TensorIndexType`` of the index is_up : flag for contravariant index (is_up=True by default) Attributes ========== ``name`` ``tensor_index_type`` ``is_up`` Notes ===== Tensor indices are contracted with the Einstein summation convention. An index can be in contravariant or in covariant form; in the latter case it is represented prepending a ``-`` to the index name. Adding ``-`` to a covariant (is_up=False) index makes it contravariant. Dummy indices have a name with head given by ``tensor_inde_type.dummy_name`` with underscore and a number. Similar to ``symbols`` multiple contravariant indices can be created at once using ``tensor_indices(s, typ)``, where ``s`` is a string of names. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> mu = TensorIndex('mu', Lorentz, is_up=False) >>> nu, rho = tensor_indices('nu, rho', Lorentz) >>> A = TensorHead('A', [Lorentz, Lorentz]) >>> A(mu, nu) A(-mu, nu) >>> A(-mu, -rho) A(mu, -rho) >>> A(mu, -mu) A(-L_0, L_0) """ def __new__(cls, name, tensor_index_type, is_up=True): if isinstance(name, str): name_symbol = Symbol(name) elif isinstance(name, Symbol): name_symbol = name elif name is True: name = "_i{}".format(len(tensor_index_type._autogenerated)) name_symbol = Symbol(name) tensor_index_type._autogenerated.append(name_symbol) else: raise ValueError("invalid name") is_up = sympify(is_up) return Basic.__new__(cls, name_symbol, tensor_index_type, is_up) @property def name(self): return self.args[0].name @property def tensor_index_type(self): return self.args[1] @property def is_up(self): return self.args[2] def _print(self): s = self.name if not self.is_up: s = '-%s' % s return s def __lt__(self, other): return ((self.tensor_index_type, self.name) < (other.tensor_index_type, other.name)) def __neg__(self): t1 = TensorIndex(self.name, self.tensor_index_type, (not self.is_up)) return t1 def tensor_indices(s, typ): """ Returns list of tensor indices given their names and their types Parameters ========== s : string of comma separated names of indices typ : ``TensorIndexType`` of the indices Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) """ if isinstance(s, str): a = [x.name for x in symbols(s, seq=True)] else: raise ValueError('expecting a string') tilist = [TensorIndex(i, typ) for i in a] if len(tilist) == 1: return tilist[0] return tilist class TensorSymmetry(Basic): """ Monoterm symmetry of a tensor (i.e. any symmetric or anti-symmetric index permutation). For the relevant terminology see ``tensor_can.py`` section of the combinatorics module. Parameters ========== bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor Attributes ========== ``base`` : base of the BSGS ``generators`` : generators of the BSGS ``rank`` : rank of the tensor Notes ===== A tensor can have an arbitrary monoterm symmetry provided by its BSGS. Multiterm symmetries, like the cyclic symmetry of the Riemann tensor (i.e., Bianchi identity), are not covered. See combinatorics module for information on how to generate BSGS for a general index permutation group. Simple symmetries can be generated using built-in methods. See Also ======== sympy.combinatorics.tensor_can.get_symmetric_group_sgs Examples ======== Define a symmetric tensor of rank 2 >>> from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorHead >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> sym = TensorSymmetry(get_symmetric_group_sgs(2)) >>> T = TensorHead('T', [Lorentz]*2, sym) Note, that the same can also be done using built-in TensorSymmetry methods >>> sym2 = TensorSymmetry.fully_symmetric(2) >>> sym == sym2 True """ def __new__(cls, *args, **kw_args): if len(args) == 1: base, generators = args[0] elif len(args) == 2: base, generators = args else: raise TypeError("bsgs required, either two separate parameters or one tuple") if not isinstance(base, Tuple): base = Tuple(*base) if not isinstance(generators, Tuple): generators = Tuple(*generators) return Basic.__new__(cls, base, generators, **kw_args) @property def base(self): return self.args[0] @property def generators(self): return self.args[1] @property def rank(self): return self.generators[0].size - 2 @classmethod def fully_symmetric(cls, rank): """ Returns a fully symmetric (antisymmetric if ``rank``<0) TensorSymmetry object for ``abs(rank)`` indices. """ if rank > 0: bsgs = get_symmetric_group_sgs(rank, False) elif rank < 0: bsgs = get_symmetric_group_sgs(-rank, True) elif rank == 0: bsgs = ([], [Permutation(1)]) return TensorSymmetry(bsgs) @classmethod def direct_product(cls, *args): """ Returns a TensorSymmetry object that is being a direct product of fully (anti-)symmetric index permutation groups. Notes ===== Some examples for different values of ``(*args)``: ``(1)`` vector, equivalent to ``TensorSymmetry.fully_symmetric(1)`` ``(2)`` tensor with 2 symmetric indices, equivalent to ``.fully_symmetric(2)`` ``(-2)`` tensor with 2 antisymmetric indices, equivalent to ``.fully_symmetric(-2)`` ``(2, -2)`` tensor with the first 2 indices commuting and the last 2 anticommuting ``(1, 1, 1)`` tensor with 3 indices without any symmetry """ base, sgs = [], [Permutation(1)] for arg in args: if arg > 0: bsgs2 = get_symmetric_group_sgs(arg, False) elif arg < 0: bsgs2 = get_symmetric_group_sgs(-arg, True) else: continue base, sgs = bsgs_direct_product(base, sgs, *bsgs2) return TensorSymmetry(base, sgs) @classmethod def riemann(cls): """ Returns a monotorem symmetry of the Riemann tensor """ return TensorSymmetry(riemann_bsgs) @classmethod def no_symmetry(cls, rank): """ TensorSymmetry object for ``rank`` indices with no symmetry """ return TensorSymmetry([], [Permutation(rank+1)]) @deprecated(useinstead="TensorSymmetry class constructor and methods", issue=17108, deprecated_since_version="1.5") def tensorsymmetry(*args): """ Returns a ``TensorSymmetry`` object. This method is deprecated, use ``TensorSymmetry.direct_product()`` or ``.riemann()`` instead. One can represent a tensor with any monoterm slot symmetry group using a BSGS. ``args`` can be a BSGS ``args[0]`` base ``args[1]`` sgs Usually tensors are in (direct products of) representations of the symmetric group; ``args`` can be a list of lists representing the shapes of Young tableaux Notes ===== For instance: ``[[1]]`` vector ``[[1]*n]`` symmetric tensor of rank ``n`` ``[[n]]`` antisymmetric tensor of rank ``n`` ``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor ``[[1],[1]]`` vector*vector ``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector Notice that with the shape ``[2, 2]`` we associate only the monoterm symmetries of the Riemann tensor; this is an abuse of notation, since the shape ``[2, 2]`` corresponds usually to the irreducible representation characterized by the monoterm symmetries and by the cyclic symmetry. """ from sympy.combinatorics import Permutation def tableau2bsgs(a): if len(a) == 1: # antisymmetric vector n = a[0] bsgs = get_symmetric_group_sgs(n, 1) else: if all(x == 1 for x in a): # symmetric vector n = len(a) bsgs = get_symmetric_group_sgs(n) elif a == [2, 2]: bsgs = riemann_bsgs else: raise NotImplementedError return bsgs if not args: return TensorSymmetry(Tuple(), Tuple(Permutation(1))) if len(args) == 2 and isinstance(args[1][0], Permutation): return TensorSymmetry(args) base, sgs = tableau2bsgs(args[0]) for a in args[1:]: basex, sgsx = tableau2bsgs(a) base, sgs = bsgs_direct_product(base, sgs, basex, sgsx) return TensorSymmetry(Tuple(base, sgs)) class TensorType(Basic): """ Class of tensor types. Deprecated, use tensor_heads() instead. Parameters ========== index_types : list of ``TensorIndexType`` of the tensor indices symmetry : ``TensorSymmetry`` of the tensor Attributes ========== ``index_types`` ``symmetry`` ``types`` : list of ``TensorIndexType`` without repetitions """ is_commutative = False def __new__(cls, index_types, symmetry, **kw_args): deprecate_TensorType() assert symmetry.rank == len(index_types) obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args) return obj @property def index_types(self): return self.args[0] @property def symmetry(self): return self.args[1] @property def types(self): return sorted(set(self.index_types), key=lambda x: x.name) def __str__(self): return 'TensorType(%s)' % ([str(x) for x in self.index_types]) def __call__(self, s, comm=0): """ Return a TensorHead object or a list of TensorHead objects. ``s`` name or string of names ``comm``: commutation group number see ``_TensorManager.set_comm`` """ if isinstance(s, str): names = [x.name for x in symbols(s, seq=True)] else: raise ValueError('expecting a string') if len(names) == 1: return TensorHead(names[0], self.index_types, self.symmetry, comm) else: return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names] @deprecated(useinstead="TensorHead class constructor or tensor_heads()", issue=17108, deprecated_since_version="1.5") def tensorhead(name, typ, sym=None, comm=0): """ Function generating tensorhead(s). This method is deprecated, use TensorHead constructor or tensor_heads() instead. Parameters ========== name : name or sequence of names (as in ``symbols``) typ : index types sym : same as ``*args`` in ``tensorsymmetry`` comm : commutation group number see ``_TensorManager.set_comm`` """ if sym is None: sym = [[1] for i in range(len(typ))] sym = tensorsymmetry(*sym) return TensorHead(name, typ, sym, comm) class TensorHead(Basic): """ Tensor head of the tensor Parameters ========== name : name of the tensor index_types : list of TensorIndexType symmetry : TensorSymmetry of the tensor comm : commutation group number Attributes ========== ``name`` ``index_types`` ``rank`` : total number of indices ``symmetry`` ``comm`` : commutation group Notes ===== Similar to ``symbols`` multiple TensorHeads can be created using ``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s`` is the string of names and ``sym`` is the monoterm tensor symmetry (see ``tensorsymmetry``). A ``TensorHead`` belongs to a commutation group, defined by a symbol on number ``comm`` (see ``_TensorManager.set_comm``); tensors in a commutation group have the same commutation properties; by default ``comm`` is ``0``, the group of the commuting tensors. Examples ======== Define a fully antisymmetric tensor of rank 2: >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> asym2 = TensorSymmetry.fully_symmetric(-2) >>> A = TensorHead('A', [Lorentz, Lorentz], asym2) Examples with ndarray values, the components data assigned to the ``TensorHead`` object are assumed to be in a fully-contravariant representation. In case it is necessary to assign components data which represents the values of a non-fully covariant tensor, see the other examples. >>> from sympy.tensor.tensor import tensor_indices >>> from sympy import diag >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> i0, i1 = tensor_indices('i0:2', Lorentz) Specify a replacement dictionary to keep track of the arrays to use for replacements in the tensorial expression. The ``TensorIndexType`` is associated to the metric used for contractions (in fully covariant form): >>> repl = {Lorentz: diag(1, -1, -1, -1)} Let's see some examples of working with components with the electromagnetic tensor: >>> from sympy import symbols >>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z') >>> c = symbols('c', positive=True) Let's define `F`, an antisymmetric tensor: >>> F = TensorHead('F', [Lorentz, Lorentz], asym2) Let's update the dictionary to contain the matrix to use in the replacements: >>> repl.update({F(-i0, -i1): [ ... [0, Ex/c, Ey/c, Ez/c], ... [-Ex/c, 0, -Bz, By], ... [-Ey/c, Bz, 0, -Bx], ... [-Ez/c, -By, Bx, 0]]}) Now it is possible to retrieve the contravariant form of the Electromagnetic tensor: >>> F(i0, i1).replace_with_arrays(repl, [i0, i1]) [[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]] and the mixed contravariant-covariant form: >>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1]) [[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]] Energy-momentum of a particle may be represented as: >>> from sympy import symbols >>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1)) >>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True) >>> repl.update({P(i0): [E, px, py, pz]}) The contravariant and covariant components are, respectively: >>> P(i0).replace_with_arrays(repl, [i0]) [E, p_x, p_y, p_z] >>> P(-i0).replace_with_arrays(repl, [-i0]) [E, -p_x, -p_y, -p_z] The contraction of a 1-index tensor by itself: >>> expr = P(i0)*P(-i0) >>> expr.replace_with_arrays(repl, []) E**2 - p_x**2 - p_y**2 - p_z**2 """ is_commutative = False def __new__(cls, name, index_types, symmetry=None, comm=0): if isinstance(name, str): name_symbol = Symbol(name) elif isinstance(name, Symbol): name_symbol = name else: raise ValueError("invalid name") if symmetry is None: symmetry = TensorSymmetry.no_symmetry(len(index_types)) else: assert symmetry.rank == len(index_types) obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry) obj.comm = TensorManager.comm_symbols2i(comm) return obj @property def name(self): return self.args[0].name @property def index_types(self): return list(self.args[1]) @property def symmetry(self): return self.args[2] @property def rank(self): return len(self.index_types) def __lt__(self, other): return (self.name, self.index_types) < (other.name, other.index_types) def commutes_with(self, other): """ Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute. Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute. """ r = TensorManager.get_comm(self.comm, other.comm) return r def _print(self): return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types])) def __call__(self, *indices, **kw_args): """ Returns a tensor with indices. There is a special behavior in case of indices denoted by ``True``, they are considered auto-matrix indices, their slots are automatically filled, and confer to the tensor the behavior of a matrix or vector upon multiplication with another tensor containing auto-matrix indices of the same ``TensorIndexType``. This means indices get summed over the same way as in matrix multiplication. For matrix behavior, define two auto-matrix indices, for vector behavior define just one. Indices can also be strings, in which case the attribute ``index_types`` is used to convert them to proper ``TensorIndex``. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> a, b = tensor_indices('a,b', Lorentz) >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) >>> t = A(a, -b) >>> t A(a, -b) """ updated_indices = [] for idx, typ in zip(indices, self.index_types): if isinstance(idx, str): idx = idx.strip().replace(" ", "") if idx.startswith('-'): updated_indices.append(TensorIndex(idx[1:], typ, is_up=False)) else: updated_indices.append(TensorIndex(idx, typ)) else: updated_indices.append(idx) updated_indices += indices[len(updated_indices):] tensor = Tensor(self, updated_indices, **kw_args) return tensor.doit() # Everything below this line is deprecated def __pow__(self, other): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=SymPyDeprecationWarning) if self.data is None: raise ValueError("No power on abstract tensors.") deprecate_data() from .array import tensorproduct, tensorcontraction metrics = [_.data for _ in self.index_types] marray = self.data marraydim = marray.rank() for metric in metrics: marray = tensorproduct(marray, metric, marray) marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2)) return marray ** (other * S.Half) @property def data(self): deprecate_data() return _tensor_data_substitution_dict[self] @data.setter def data(self, data): deprecate_data() _tensor_data_substitution_dict[self] = data @data.deleter def data(self): deprecate_data() if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] def __iter__(self): deprecate_data() return self.data.__iter__() def _components_data_full_destroy(self): """ EXPERIMENTAL: do not rely on this API method. Destroy components data associated to the ``TensorHead`` object, this checks for attached components data, and destroys components data too. """ # do not garbage collect Kronecker tensor (it should be done by # ``TensorIndexType`` garbage collection) deprecate_data() if self.name == "KD": return # the data attached to a tensor must be deleted only by the TensorHead # destructor. If the TensorHead is deleted, it means that there are no # more instances of that tensor anywhere. if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] def tensor_heads(s, index_types, symmetry=None, comm=0): """ Returns a sequence of TensorHeads from a string `s` """ if isinstance(s, str): names = [x.name for x in symbols(s, seq=True)] else: raise ValueError('expecting a string') thlist = [TensorHead(name, index_types, symmetry, comm) for name in names] if len(thlist) == 1: return thlist[0] return thlist class _TensorMetaclass(ManagedProperties, ABCMeta): pass class TensExpr(Expr, metaclass=_TensorMetaclass): """ Abstract base class for tensor expressions Notes ===== A tensor expression is an expression formed by tensors; currently the sums of tensors are distributed. A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``. ``TensMul`` objects are formed by products of component tensors, and include a coefficient, which is a SymPy expression. In the internal representation contracted indices are represented by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position of the component tensor with contravariant index, ``ipos1`` is the slot which the index occupies in that component tensor. Contracted indices are therefore nameless in the internal representation. """ _op_priority = 12.0 is_commutative = False def __neg__(self): return self*S.NegativeOne def __abs__(self): raise NotImplementedError def __add__(self, other): return TensAdd(self, other).doit() def __radd__(self, other): return TensAdd(other, self).doit() def __sub__(self, other): return TensAdd(self, -other).doit() def __rsub__(self, other): return TensAdd(other, -self).doit() def __mul__(self, other): """ Multiply two tensors using Einstein summation convention. If the two tensors have an index in common, one contravariant and the other covariant, in their product the indices are summed Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) >>> g = Lorentz.metric >>> p, q = tensor_heads('p,q', [Lorentz]) >>> t1 = p(m0) >>> t2 = q(-m0) >>> t1*t2 p(L_0)*q(-L_0) """ return TensMul(self, other).doit() def __rmul__(self, other): return TensMul(other, self).doit() def __truediv__(self, other): other = _sympify(other) if isinstance(other, TensExpr): raise ValueError('cannot divide by a tensor') return TensMul(self, S.One/other).doit() def __rtruediv__(self, other): raise ValueError('cannot divide by a tensor') def __pow__(self, other): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=SymPyDeprecationWarning) if self.data is None: raise ValueError("No power without ndarray data.") deprecate_data() from .array import tensorproduct, tensorcontraction free = self.free marray = self.data mdim = marray.rank() for metric in free: marray = tensorcontraction( tensorproduct( marray, metric[0].tensor_index_type.data, marray), (0, mdim), (mdim+1, mdim+2) ) return marray ** (other * S.Half) def __rpow__(self, other): raise NotImplementedError @property @abstractmethod def nocoeff(self): raise NotImplementedError("abstract method") @property @abstractmethod def coeff(self): raise NotImplementedError("abstract method") @abstractmethod def get_indices(self): raise NotImplementedError("abstract method") @abstractmethod def get_free_indices(self): # type: () -> List[TensorIndex] raise NotImplementedError("abstract method") @abstractmethod def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr raise NotImplementedError("abstract method") def fun_eval(self, *index_tuples): deprecate_fun_eval() return self.substitute_indices(*index_tuples) def get_matrix(self): """ DEPRECATED: do not use. Returns ndarray components data as a matrix, if components data are available and ndarray dimension does not exceed 2. """ from sympy import Matrix deprecate_data() if 0 < self.rank <= 2: rows = self.data.shape[0] columns = self.data.shape[1] if self.rank == 2 else 1 if self.rank == 2: mat_list = [] * rows for i in range(rows): mat_list.append([]) for j in range(columns): mat_list[i].append(self[i, j]) else: mat_list = [None] * rows for i in range(rows): mat_list[i] = self[i] return Matrix(mat_list) else: raise NotImplementedError( "missing multidimensional reduction to matrix.") @staticmethod def _get_indices_permutation(indices1, indices2): return [indices1.index(i) for i in indices2] def expand(self, **hints): return _expand(self, **hints).doit() def _expand(self, **kwargs): return self def _get_free_indices_set(self): indset = set() for arg in self.args: if isinstance(arg, TensExpr): indset.update(arg._get_free_indices_set()) return indset def _get_dummy_indices_set(self): indset = set() for arg in self.args: if isinstance(arg, TensExpr): indset.update(arg._get_dummy_indices_set()) return indset def _get_indices_set(self): indset = set() for arg in self.args: if isinstance(arg, TensExpr): indset.update(arg._get_indices_set()) return indset @property def _iterate_dummy_indices(self): dummy_set = self._get_dummy_indices_set() def recursor(expr, pos): if isinstance(expr, TensorIndex): if expr in dummy_set: yield (expr, pos) elif isinstance(expr, (Tuple, TensExpr)): for p, arg in enumerate(expr.args): yield from recursor(arg, pos+(p,)) return recursor(self, ()) @property def _iterate_free_indices(self): free_set = self._get_free_indices_set() def recursor(expr, pos): if isinstance(expr, TensorIndex): if expr in free_set: yield (expr, pos) elif isinstance(expr, (Tuple, TensExpr)): for p, arg in enumerate(expr.args): yield from recursor(arg, pos+(p,)) return recursor(self, ()) @property def _iterate_indices(self): def recursor(expr, pos): if isinstance(expr, TensorIndex): yield (expr, pos) elif isinstance(expr, (Tuple, TensExpr)): for p, arg in enumerate(expr.args): yield from recursor(arg, pos+(p,)) return recursor(self, ()) @staticmethod def _contract_and_permute_with_metric(metric, array, pos, dim): # TODO: add possibility of metric after (spinors) from .array import tensorcontraction, tensorproduct, permutedims array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos)) permu = list(range(dim)) permu[0], permu[pos] = permu[pos], permu[0] return permutedims(array, permu) @staticmethod def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict): from .array import permutedims index_types1 = [i.tensor_index_type for i in free_ind1] # Check if variance of indices needs to be fixed: pos2up = [] pos2down = [] free2remaining = free_ind2[:] for pos1, index1 in enumerate(free_ind1): if index1 in free2remaining: pos2 = free2remaining.index(index1) free2remaining[pos2] = None continue if -index1 in free2remaining: pos2 = free2remaining.index(-index1) free2remaining[pos2] = None free_ind2[pos2] = index1 if index1.is_up: pos2up.append(pos2) else: pos2down.append(pos2) else: index2 = free2remaining[pos1] if index2 is None: raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) free2remaining[pos1] = None free_ind2[pos1] = index1 if index1.is_up ^ index2.is_up: if index1.is_up: pos2up.append(pos1) else: pos2down.append(pos1) if len(set(free_ind1) & set(free_ind2)) < len(free_ind1): raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) # Raise indices: for pos in pos2up: index_type_pos = index_types1[pos] # type: TensorIndexType if index_type_pos not in replacement_dict: raise ValueError("No metric provided to lower index") metric = replacement_dict[index_type_pos] metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric) array = TensExpr._contract_and_permute_with_metric(metric_inverse, array, pos, len(free_ind1)) # Lower indices: for pos in pos2down: index_type_pos = index_types1[pos] # type: TensorIndexType if index_type_pos not in replacement_dict: raise ValueError("No metric provided to lower index") metric = replacement_dict[index_type_pos] array = TensExpr._contract_and_permute_with_metric(metric, array, pos, len(free_ind1)) if free_ind1: permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1) array = permutedims(array, permutation) if hasattr(array, "rank") and array.rank() == 0: array = array[()] return free_ind2, array def replace_with_arrays(self, replacement_dict, indices=None): """ Replace the tensorial expressions with arrays. The final array will correspond to the N-dimensional array with indices arranged according to ``indices``. Parameters ========== replacement_dict dictionary containing the replacement rules for tensors. indices the index order with respect to which the array is read. The original index order will be used if no value is passed. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices >>> from sympy.tensor.tensor import TensorHead >>> from sympy import symbols, diag >>> L = TensorIndexType("L") >>> i, j = tensor_indices("i j", L) >>> A = TensorHead("A", [L]) >>> A(i).replace_with_arrays({A(i): [1, 2]}, [i]) [1, 2] Since 'indices' is optional, we can also call replace_with_arrays by this way if no specific index order is needed: >>> A(i).replace_with_arrays({A(i): [1, 2]}) [1, 2] >>> expr = A(i)*A(j) >>> expr.replace_with_arrays({A(i): [1, 2]}) [[1, 2], [2, 4]] For contractions, specify the metric of the ``TensorIndexType``, which in this case is ``L``, in its covariant form: >>> expr = A(i)*A(-i) >>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)}) -3 Symmetrization of an array: >>> H = TensorHead("H", [L, L]) >>> a, b, c, d = symbols("a b c d") >>> expr = H(i, j)/2 + H(j, i)/2 >>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]}) [[a, b/2 + c/2], [b/2 + c/2, d]] Anti-symmetrization of an array: >>> expr = H(i, j)/2 - H(j, i)/2 >>> repl = {H(i, j): [[a, b], [c, d]]} >>> expr.replace_with_arrays(repl) [[0, b/2 - c/2], [-b/2 + c/2, 0]] The same expression can be read as the transpose by inverting ``i`` and ``j``: >>> expr.replace_with_arrays(repl, [j, i]) [[0, -b/2 + c/2], [b/2 - c/2, 0]] """ from .array import Array indices = indices or [] replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()} # Check dimensions of replaced arrays: for tensor, array in replacement_dict.items(): if isinstance(tensor, TensorIndexType): expected_shape = [tensor.dim for i in range(2)] else: expected_shape = [index_type.dim for index_type in tensor.index_types] if len(expected_shape) != array.rank() or (not all([dim1 == dim2 if dim1.is_number else True for dim1, dim2 in zip(expected_shape, array.shape)])): raise ValueError("shapes for tensor %s expected to be %s, "\ "replacement array shape is %s" % (tensor, expected_shape, array.shape)) ret_indices, array = self._extract_data(replacement_dict) last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict) return array def _check_add_Sum(self, expr, index_symbols): from sympy import Sum indices = self.get_indices() dum = self.dum sum_indices = [ (index_symbols[i], 0, indices[i].tensor_index_type.dim-1) for i, j in dum] if sum_indices: expr = Sum(expr, *sum_indices) return expr def _expand_partial_derivative(self): # simply delegate the _expand_partial_derivative() to # its arguments to expand a possibly found PartialDerivative return self.func(*[ a._expand_partial_derivative() if isinstance(a, TensExpr) else a for a in self.args]) class TensAdd(TensExpr, AssocOp): """ Sum of tensors Parameters ========== free_args : list of the free indices Attributes ========== ``args`` : tuple of addends ``rank`` : rank of the tensor ``free_args`` : list of the free indices in sorted order Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_heads, tensor_indices >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> a, b = tensor_indices('a,b', Lorentz) >>> p, q = tensor_heads('p,q', [Lorentz]) >>> t = p(a) + q(a); t p(a) + q(a) Examples with components data added to the tensor expression: >>> from sympy import symbols, diag >>> x, y, z, t = symbols("x y z t") >>> repl = {} >>> repl[Lorentz] = diag(1, -1, -1, -1) >>> repl[p(a)] = [1, 2, 3, 4] >>> repl[q(a)] = [x, y, z, t] The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58 >>> expr = p(a) + q(a) >>> expr.replace_with_arrays(repl, [a]) [x + 1, y + 2, z + 3, t + 4] """ def __new__(cls, *args, **kw_args): args = [_sympify(x) for x in args if x] args = TensAdd._tensAdd_flatten(args) args.sort(key=default_sort_key) if not args: return S.Zero if len(args) == 1: return args[0] return Basic.__new__(cls, *args, **kw_args) @property def coeff(self): return S.One @property def nocoeff(self): return self def get_free_indices(self): # type: () -> List[TensorIndex] return self.free_indices def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr newargs = [arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args] return self.func(*newargs) @memoize_property def rank(self): if isinstance(self.args[0], TensExpr): return self.args[0].rank else: return 0 @memoize_property def free_args(self): if isinstance(self.args[0], TensExpr): return self.args[0].free_args else: return [] @memoize_property def free_indices(self): if isinstance(self.args[0], TensExpr): return self.args[0].get_free_indices() else: return set() def doit(self, **kwargs): deep = kwargs.get('deep', True) if deep: args = [arg.doit(**kwargs) for arg in self.args] else: args = self.args if not args: return S.Zero if len(args) == 1 and not isinstance(args[0], TensExpr): return args[0] # now check that all addends have the same indices: TensAdd._tensAdd_check(args) # if TensAdd has only 1 element in its `args`: if len(args) == 1: # and isinstance(args[0], TensMul): return args[0] # Remove zeros: args = [x for x in args if x] # if there are no more args (i.e. have cancelled out), # just return zero: if not args: return S.Zero if len(args) == 1: return args[0] # Collect terms appearing more than once, differing by their coefficients: args = TensAdd._tensAdd_collect_terms(args) # collect canonicalized terms def sort_key(t): if not isinstance(t, TensExpr): return [], [], [] if hasattr(t, "_index_structure") and hasattr(t, "components"): x = get_index_structure(t) return t.components, x.free, x.dum return [], [], [] args.sort(key=sort_key) if not args: return S.Zero # it there is only a component tensor return it if len(args) == 1: return args[0] obj = self.func(*args) return obj @staticmethod def _tensAdd_flatten(args): # flatten TensAdd, coerce terms which are not tensors to tensors a = [] for x in args: if isinstance(x, (Add, TensAdd)): a.extend(list(x.args)) else: a.append(x) args = [x for x in a if x.coeff] return args @staticmethod def _tensAdd_check(args): # check that all addends have the same free indices def get_indices_set(x): # type: (Expr) -> Set[TensorIndex] if isinstance(x, TensExpr): return set(x.get_free_indices()) return set() indices0 = get_indices_set(args[0]) # type: Set[TensorIndex] list_indices = [get_indices_set(arg) for arg in args[1:]] # type: List[Set[TensorIndex]] if not all(x == indices0 for x in list_indices): raise ValueError('all tensors must have the same indices') @staticmethod def _tensAdd_collect_terms(args): # collect TensMul terms differing at most by their coefficient terms_dict = defaultdict(list) scalars = S.Zero if isinstance(args[0], TensExpr): free_indices = set(args[0].get_free_indices()) else: free_indices = set() for arg in args: if not isinstance(arg, TensExpr): if free_indices != set(): raise ValueError("wrong valence") scalars += arg continue if free_indices != set(arg.get_free_indices()): raise ValueError("wrong valence") # TODO: what is the part which is not a coeff? # needs an implementation similar to .as_coeff_Mul() terms_dict[arg.nocoeff].append(arg.coeff) new_args = [TensMul(Add(*coeff), t).doit() for t, coeff in terms_dict.items() if Add(*coeff) != 0] if isinstance(scalars, Add): new_args = list(scalars.args) + new_args elif scalars != 0: new_args = [scalars] + new_args return new_args def get_indices(self): indices = [] for arg in self.args: indices.extend([i for i in get_indices(arg) if i not in indices]) return indices def _expand(self, **hints): return TensAdd(*[_expand(i, **hints) for i in self.args]) def __call__(self, *indices): deprecate_fun_eval() free_args = self.free_args indices = list(indices) if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: raise ValueError('incompatible types') if indices == free_args: return self index_tuples = list(zip(free_args, indices)) a = [x.func(*x.substitute_indices(*index_tuples).args) for x in self.args] res = TensAdd(*a).doit() return res def canon_bp(self): """ Canonicalize using the Butler-Portugal algorithm for canonicalization under monoterm symmetries. """ expr = self.expand() args = [canon_bp(x) for x in expr.args] res = TensAdd(*args).doit() return res def equals(self, other): other = _sympify(other) if isinstance(other, TensMul) and other.coeff == 0: return all(x.coeff == 0 for x in self.args) if isinstance(other, TensExpr): if self.rank != other.rank: return False if isinstance(other, TensAdd): if set(self.args) != set(other.args): return False else: return True t = self - other if not isinstance(t, TensExpr): return t == 0 else: if isinstance(t, TensMul): return t.coeff == 0 else: return all(x.coeff == 0 for x in t.args) def __getitem__(self, item): deprecate_data() return self.data[item] def contract_delta(self, delta): args = [x.contract_delta(delta) for x in self.args] t = TensAdd(*args).doit() return canon_bp(t) def contract_metric(self, g): """ Raise or lower indices with the metric ``g`` Parameters ========== g : metric contract_all : if True, eliminate all ``g`` which are contracted Notes ===== see the ``TensorIndexType`` docstring for the contraction conventions """ args = [contract_metric(x, g) for x in self.args] t = TensAdd(*args).doit() return canon_bp(t) def substitute_indices(self, *index_tuples): new_args = [] for arg in self.args: if isinstance(arg, TensExpr): arg = arg.substitute_indices(*index_tuples) new_args.append(arg) return TensAdd(*new_args).doit() def _print(self): a = [] args = self.args for x in args: a.append(str(x)) s = ' + '.join(a) s = s.replace('+ -', '- ') return s def _extract_data(self, replacement_dict): from sympy.tensor.array import Array, permutedims args_indices, arrays = zip(*[ arg._extract_data(replacement_dict) if isinstance(arg, TensExpr) else ([], arg) for arg in self.args ]) arrays = [Array(i) for i in arrays] ref_indices = args_indices[0] for i in range(1, len(args_indices)): indices = args_indices[i] array = arrays[i] permutation = TensMul._get_indices_permutation(indices, ref_indices) arrays[i] = permutedims(array, permutation) return ref_indices, sum(arrays, Array.zeros(*array.shape)) @property def data(self): deprecate_data() return _tensor_data_substitution_dict[self.expand()] @data.setter def data(self, data): deprecate_data() _tensor_data_substitution_dict[self] = data @data.deleter def data(self): deprecate_data() if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] def __iter__(self): deprecate_data() if not self.data: raise ValueError("No iteration on abstract tensors") return self.data.flatten().__iter__() def _eval_rewrite_as_Indexed(self, *args): return Add.fromiter(args) def _eval_partial_derivative(self, s): # Evaluation like Add list_addends = [] for a in self.args: if isinstance(a, TensExpr): list_addends.append(a._eval_partial_derivative(s)) # do not call diff if s is no symbol elif s._diff_wrt: list_addends.append(a._eval_derivative(s)) return self.func(*list_addends) class Tensor(TensExpr): """ Base tensor class, i.e. this represents a tensor, the single unit to be put into an expression. This object is usually created from a ``TensorHead``, by attaching indices to it. Indices preceded by a minus sign are considered contravariant, otherwise covariant. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead >>> Lorentz = TensorIndexType("Lorentz", dummy_name="L") >>> mu, nu = tensor_indices('mu nu', Lorentz) >>> A = TensorHead("A", [Lorentz, Lorentz]) >>> A(mu, -nu) A(mu, -nu) >>> A(mu, -mu) A(L_0, -L_0) It is also possible to use symbols instead of inidices (appropriate indices are then generated automatically). >>> from sympy import Symbol >>> x = Symbol('x') >>> A(x, mu) A(x, mu) >>> A(x, -x) A(L_0, -L_0) """ is_commutative = False _index_structure = None # type: _IndexStructure def __new__(cls, tensor_head, indices, *, is_canon_bp=False, **kw_args): indices = cls._parse_indices(tensor_head, indices) obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args) obj._index_structure = _IndexStructure.from_indices(*indices) obj._free = obj._index_structure.free[:] obj._dum = obj._index_structure.dum[:] obj._ext_rank = obj._index_structure._ext_rank obj._coeff = S.One obj._nocoeff = obj obj._component = tensor_head obj._components = [tensor_head] if tensor_head.rank != len(indices): raise ValueError("wrong number of indices") obj.is_canon_bp = is_canon_bp obj._index_map = Tensor._build_index_map(indices, obj._index_structure) return obj @property def free(self): return self._free @property def dum(self): return self._dum @property def ext_rank(self): return self._ext_rank @property def coeff(self): return self._coeff @property def nocoeff(self): return self._nocoeff @property def component(self): return self._component @property def components(self): return self._components @property def head(self): return self.args[0] @property def indices(self): return self.args[1] @property def free_indices(self): return set(self._index_structure.get_free_indices()) @property def index_types(self): return self.head.index_types @property def rank(self): return len(self.free_indices) @staticmethod def _build_index_map(indices, index_structure): index_map = {} for idx in indices: index_map[idx] = (indices.index(idx),) return index_map def doit(self, **kwargs): args, indices, free, dum = TensMul._tensMul_contract_indices([self]) return args[0] @staticmethod def _parse_indices(tensor_head, indices): if not isinstance(indices, (tuple, list, Tuple)): raise TypeError("indices should be an array, got %s" % type(indices)) indices = list(indices) for i, index in enumerate(indices): if isinstance(index, Symbol): indices[i] = TensorIndex(index, tensor_head.index_types[i], True) elif isinstance(index, Mul): c, e = index.as_coeff_Mul() if c == -1 and isinstance(e, Symbol): indices[i] = TensorIndex(e, tensor_head.index_types[i], False) else: raise ValueError("index not understood: %s" % index) elif not isinstance(index, TensorIndex): raise TypeError("wrong type for index: %s is %s" % (index, type(index))) return indices def _set_new_index_structure(self, im, is_canon_bp=False): indices = im.get_indices() return self._set_indices(*indices, is_canon_bp=is_canon_bp) def _set_indices(self, *indices, is_canon_bp=False, **kw_args): if len(indices) != self.ext_rank: raise ValueError("indices length mismatch") return self.func(self.args[0], indices, is_canon_bp=is_canon_bp).doit() def _get_free_indices_set(self): return {i[0] for i in self._index_structure.free} def _get_dummy_indices_set(self): dummy_pos = set(itertools.chain(*self._index_structure.dum)) return {idx for i, idx in enumerate(self.args[1]) if i in dummy_pos} def _get_indices_set(self): return set(self.args[1].args) @property def free_in_args(self): return [(ind, pos, 0) for ind, pos in self.free] @property def dum_in_args(self): return [(p1, p2, 0, 0) for p1, p2 in self.dum] @property def free_args(self): return sorted([x[0] for x in self.free]) def commutes_with(self, other): """ :param other: :return: 0 commute 1 anticommute None neither commute nor anticommute """ if not isinstance(other, TensExpr): return 0 elif isinstance(other, Tensor): return self.component.commutes_with(other.component) return NotImplementedError def perm2tensor(self, g, is_canon_bp=False): """ Returns the tensor corresponding to the permutation ``g`` For further details, see the method in ``TIDS`` with the same name. """ return perm2tensor(self, g, is_canon_bp) def canon_bp(self): if self.is_canon_bp: return self expr = self.expand() g, dummies, msym = expr._index_structure.indices_canon_args() v = components_canon_args([expr.component]) can = canonicalize(g, dummies, msym, *v) if can == 0: return S.Zero tensor = self.perm2tensor(can, True) return tensor def split(self): return [self] def _expand(self, **kwargs): return self def sorted_components(self): return self def get_indices(self): # type: () -> List[TensorIndex] """ Get a list of indices, corresponding to those of the tensor. """ return list(self.args[1]) def get_free_indices(self): # type: () -> List[TensorIndex] """ Get a list of free indices, corresponding to those of the tensor. """ return self._index_structure.get_free_indices() def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> Tensor # TODO: this could be optimized by only swapping the indices # instead of visiting the whole expression tree: return self.xreplace(repl) def as_base_exp(self): return self, S.One def substitute_indices(self, *index_tuples): """ Return a tensor with free indices substituted according to ``index_tuples`` ``index_types`` list of tuples ``(old_index, new_index)`` Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) >>> t = A(i, k)*B(-k, -j); t A(i, L_0)*B(-L_0, -j) >>> t.substitute_indices((i, k),(-j, l)) A(k, L_0)*B(-L_0, l) """ indices = [] for index in self.indices: for ind_old, ind_new in index_tuples: if (index.name == ind_old.name and index.tensor_index_type == ind_old.tensor_index_type): if index.is_up == ind_old.is_up: indices.append(ind_new) else: indices.append(-ind_new) break else: indices.append(index) return self.head(*indices) def __call__(self, *indices): deprecate_fun_eval() free_args = self.free_args indices = list(indices) if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: raise ValueError('incompatible types') if indices == free_args: return self t = self.substitute_indices(*list(zip(free_args, indices))) # object is rebuilt in order to make sure that all contracted indices # get recognized as dummies, but only if there are contracted indices. if len({i if i.is_up else -i for i in indices}) != len(indices): return t.func(*t.args) return t # TODO: put this into TensExpr? def __iter__(self): deprecate_data() return self.data.__iter__() # TODO: put this into TensExpr? def __getitem__(self, item): deprecate_data() return self.data[item] def _extract_data(self, replacement_dict): from .array import Array for k, v in replacement_dict.items(): if isinstance(k, Tensor) and k.args[0] == self.args[0]: other = k array = v break else: raise ValueError("%s not found in %s" % (self, replacement_dict)) # TODO: inefficient, this should be done at root level only: replacement_dict = {k: Array(v) for k, v in replacement_dict.items()} array = Array(array) dum1 = self.dum dum2 = other.dum if len(dum2) > 0: for pair in dum2: # allow `dum2` if the contained values are also in `dum1`. if pair not in dum1: raise NotImplementedError("%s with contractions is not implemented" % other) # Remove elements in `dum2` from `dum1`: dum1 = [pair for pair in dum1 if pair not in dum2] if len(dum1) > 0: indices1 = self.get_indices() indices2 = other.get_indices() repl = {} for p1, p2 in dum1: repl[indices2[p2]] = -indices2[p1] for pos in (p1, p2): if indices1[pos].is_up ^ indices2[pos].is_up: metric = replacement_dict[indices1[pos].tensor_index_type] if indices1[pos].is_up: metric = _TensorDataLazyEvaluator.inverse_matrix(metric) array = self._contract_and_permute_with_metric(metric, array, pos, len(indices2)) other = other.xreplace(repl).doit() array = _TensorDataLazyEvaluator.data_contract_dum([array], dum1, len(indices2)) free_ind1 = self.get_free_indices() free_ind2 = other.get_free_indices() return self._match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict) @property def data(self): deprecate_data() return _tensor_data_substitution_dict[self] @data.setter def data(self, data): deprecate_data() # TODO: check data compatibility with properties of tensor. _tensor_data_substitution_dict[self] = data @data.deleter def data(self): deprecate_data() if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] if self.metric in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self.metric] def _print(self): indices = [str(ind) for ind in self.indices] component = self.component if component.rank > 0: return ('%s(%s)' % (component.name, ', '.join(indices))) else: return ('%s' % component.name) def equals(self, other): if other == 0: return self.coeff == 0 other = _sympify(other) if not isinstance(other, TensExpr): assert not self.components return S.One == other def _get_compar_comp(self): t = self.canon_bp() r = (t.coeff, tuple(t.components), \ tuple(sorted(t.free)), tuple(sorted(t.dum))) return r return _get_compar_comp(self) == _get_compar_comp(other) def contract_metric(self, g): # if metric is not the same, ignore this step: if self.component != g: return self # in case there are free components, do not perform anything: if len(self.free) != 0: return self #antisym = g.index_types[0].metric_antisym if g.symmetry == TensorSymmetry.fully_symmetric(-2): antisym = 1 elif g.symmetry == TensorSymmetry.fully_symmetric(2): antisym = 0 elif g.symmetry == TensorSymmetry.no_symmetry(2): antisym = None else: raise NotImplementedError sign = S.One typ = g.index_types[0] if not antisym: # g(i, -i) sign = sign*typ.dim else: # g(i, -i) sign = sign*typ.dim dp0, dp1 = self.dum[0] if dp0 < dp1: # g(i, -i) = -D with antisymmetric metric sign = -sign return sign def contract_delta(self, metric): return self.contract_metric(metric) def _eval_rewrite_as_Indexed(self, tens, indices): from sympy import Indexed # TODO: replace .args[0] with .name: index_symbols = [i.args[0] for i in self.get_indices()] expr = Indexed(tens.args[0], *index_symbols) return self._check_add_Sum(expr, index_symbols) def _eval_partial_derivative(self, s): # type: (Tensor) -> Expr if not isinstance(s, Tensor): return S.Zero else: # @a_i/@a_k = delta_i^k # @a_i/@a^k = g_ij delta^j_k # @a^i/@a^k = delta^i_k # @a^i/@a_k = g^ij delta_j^k # TODO: if there is no metric present, the derivative should be zero? if self.head != s.head: return S.Zero # if heads are the same, provide delta and/or metric products # for every free index pair in the appropriate tensor # assumed that the free indices are in proper order # A contravariante index in the derivative becomes covariant # after performing the derivative and vice versa kronecker_delta_list = [1] # not guarantee a correct index order for (count, (iself, iother)) in enumerate(zip(self.get_free_indices(), s.get_free_indices())): if iself.tensor_index_type != iother.tensor_index_type: raise ValueError("index types not compatible") else: tensor_index_type = iself.tensor_index_type tensor_metric = tensor_index_type.metric dummy = TensorIndex("d_" + str(count), tensor_index_type, is_up=iself.is_up) if iself.is_up == iother.is_up: kroneckerdelta = tensor_index_type.delta(iself, -iother) else: kroneckerdelta = ( TensMul(tensor_metric(iself, dummy), tensor_index_type.delta(-dummy, -iother)) ) kronecker_delta_list.append(kroneckerdelta) return TensMul.fromiter(kronecker_delta_list).doit() # doit necessary to rename dummy indices accordingly class TensMul(TensExpr, AssocOp): """ Product of tensors Parameters ========== coeff : SymPy coefficient of the tensor args Attributes ========== ``components`` : list of ``TensorHead`` of the component tensors ``types`` : list of nonrepeated ``TensorIndexType`` ``free`` : list of ``(ind, ipos, icomp)``, see Notes ``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes ``ext_rank`` : rank of the tensor counting the dummy indices ``rank`` : rank of the tensor ``coeff`` : SymPy coefficient of the tensor ``free_args`` : list of the free indices in sorted order ``is_canon_bp`` : ``True`` if the tensor in in canonical form Notes ===== ``args[0]`` list of ``TensorHead`` of the component tensors. ``args[1]`` list of ``(ind, ipos, icomp)`` where ``ind`` is a free index, ``ipos`` is the slot position of ``ind`` in the ``icomp``-th component tensor. ``args[2]`` list of tuples representing dummy indices. ``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant dummy index is the ``ipos1``-th slot position in the ``icomp1``-th component tensor; the corresponding covariant index is in the ``ipos2`` slot position in the ``icomp2``-th component tensor. """ identity = S.One _index_structure = None # type: _IndexStructure def __new__(cls, *args, **kw_args): is_canon_bp = kw_args.get('is_canon_bp', False) args = list(map(_sympify, args)) # Flatten: args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) # Data for indices: index_types = [i.tensor_index_type for i in indices] index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) obj = TensExpr.__new__(cls, *args) obj._indices = indices obj._index_types = index_types[:] obj._index_structure = index_structure obj._free = index_structure.free[:] obj._dum = index_structure.dum[:] obj._free_indices = {x[0] for x in obj.free} obj._rank = len(obj.free) obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) obj._coeff = S.One obj._is_canon_bp = is_canon_bp return obj index_types = property(lambda self: self._index_types) free = property(lambda self: self._free) dum = property(lambda self: self._dum) free_indices = property(lambda self: self._free_indices) rank = property(lambda self: self._rank) ext_rank = property(lambda self: self._ext_rank) @staticmethod def _indices_to_free_dum(args_indices): free2pos1 = {} free2pos2 = {} dummy_data = [] indices = [] # Notation for positions (to better understand the code): # `pos1`: position in the `args`. # `pos2`: position in the indices. # Example: # A(i, j)*B(k, m, n)*C(p) # `pos1` of `n` is 1 because it's in `B` (second `args` of TensMul). # `pos2` of `n` is 4 because it's the fifth overall index. # Counter for the index position wrt the whole expression: pos2 = 0 for pos1, arg_indices in enumerate(args_indices): for index_pos, index in enumerate(arg_indices): if not isinstance(index, TensorIndex): raise TypeError("expected TensorIndex") if -index in free2pos1: # Dummy index detected: other_pos1 = free2pos1.pop(-index) other_pos2 = free2pos2.pop(-index) if index.is_up: dummy_data.append((index, pos1, other_pos1, pos2, other_pos2)) else: dummy_data.append((-index, other_pos1, pos1, other_pos2, pos2)) indices.append(index) elif index in free2pos1: raise ValueError("Repeated index: %s" % index) else: free2pos1[index] = pos1 free2pos2[index] = pos2 indices.append(index) pos2 += 1 free = [(i, p) for (i, p) in free2pos2.items()] free_names = [i.name for i in free2pos2.keys()] dummy_data.sort(key=lambda x: x[3]) return indices, free, free_names, dummy_data @staticmethod def _dummy_data_to_dum(dummy_data): return [(p2a, p2b) for (i, p1a, p1b, p2a, p2b) in dummy_data] @staticmethod def _tensMul_contract_indices(args, replace_indices=True): replacements = [{} for _ in args] #_index_order = all([_has_index_order(arg) for arg in args]) args_indices = [get_indices(arg) for arg in args] indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) cdt = defaultdict(int) def dummy_name_gen(tensor_index_type): nd = str(cdt[tensor_index_type]) cdt[tensor_index_type] += 1 return tensor_index_type.dummy_name + '_' + nd if replace_indices: for old_index, pos1cov, pos1contra, pos2cov, pos2contra in dummy_data: index_type = old_index.tensor_index_type while True: dummy_name = dummy_name_gen(index_type) if dummy_name not in free_names: break dummy = TensorIndex(dummy_name, index_type, True) replacements[pos1cov][old_index] = dummy replacements[pos1contra][-old_index] = -dummy indices[pos2cov] = dummy indices[pos2contra] = -dummy args = [ arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg, repl in zip(args, replacements)] dum = TensMul._dummy_data_to_dum(dummy_data) return args, indices, free, dum @staticmethod def _get_components_from_args(args): """ Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied by one another. """ components = [] for arg in args: if not isinstance(arg, TensExpr): continue if isinstance(arg, TensAdd): continue components.extend(arg.components) return components @staticmethod def _rebuild_tensors_list(args, index_structure): indices = index_structure.get_indices() #tensors = [None for i in components] # pre-allocate list ind_pos = 0 for i, arg in enumerate(args): if not isinstance(arg, TensExpr): continue prev_pos = ind_pos ind_pos += arg.ext_rank args[i] = Tensor(arg.component, indices[prev_pos:ind_pos]) def doit(self, **kwargs): is_canon_bp = self._is_canon_bp deep = kwargs.get('deep', True) if deep: args = [arg.doit(**kwargs) for arg in self.args] else: args = self.args args = [arg for arg in args if arg != self.identity] # Extract non-tensor coefficients: coeff = reduce(lambda a, b: a*b, [arg for arg in args if not isinstance(arg, TensExpr)], S.One) args = [arg for arg in args if isinstance(arg, TensExpr)] if len(args) == 0: return coeff if coeff != self.identity: args = [coeff] + args if coeff == 0: return S.Zero if len(args) == 1: return args[0] args, indices, free, dum = TensMul._tensMul_contract_indices(args) # Data for indices: index_types = [i.tensor_index_type for i in indices] index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) obj = self.func(*args) obj._index_types = index_types obj._index_structure = index_structure obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) obj._coeff = coeff obj._is_canon_bp = is_canon_bp return obj # TODO: this method should be private # TODO: should this method be renamed _from_components_free_dum ? @staticmethod def from_data(coeff, components, free, dum, **kw_args): return TensMul(coeff, *TensMul._get_tensors_from_components_free_dum(components, free, dum), **kw_args).doit() @staticmethod def _get_tensors_from_components_free_dum(components, free, dum): """ Get a list of ``Tensor`` objects by distributing ``free`` and ``dum`` indices on the ``components``. """ index_structure = _IndexStructure.from_components_free_dum(components, free, dum) indices = index_structure.get_indices() tensors = [None for i in components] # pre-allocate list # distribute indices on components to build a list of tensors: ind_pos = 0 for i, component in enumerate(components): prev_pos = ind_pos ind_pos += component.rank tensors[i] = Tensor(component, indices[prev_pos:ind_pos]) return tensors def _get_free_indices_set(self): return {i[0] for i in self.free} def _get_dummy_indices_set(self): dummy_pos = set(itertools.chain(*self.dum)) return {idx for i, idx in enumerate(self._index_structure.get_indices()) if i in dummy_pos} def _get_position_offset_for_indices(self): arg_offset = [None for i in range(self.ext_rank)] counter = 0 for i, arg in enumerate(self.args): if not isinstance(arg, TensExpr): continue for j in range(arg.ext_rank): arg_offset[j + counter] = counter counter += arg.ext_rank return arg_offset @property def free_args(self): return sorted([x[0] for x in self.free]) @property def components(self): return self._get_components_from_args(self.args) @property def free_in_args(self): arg_offset = self._get_position_offset_for_indices() argpos = self._get_indices_to_args_pos() return [(ind, pos-arg_offset[pos], argpos[pos]) for (ind, pos) in self.free] @property def coeff(self): # return Mul.fromiter([c for c in self.args if not isinstance(c, TensExpr)]) return self._coeff @property def nocoeff(self): return self.func(*[t for t in self.args if isinstance(t, TensExpr)]).doit() @property def dum_in_args(self): arg_offset = self._get_position_offset_for_indices() argpos = self._get_indices_to_args_pos() return [(p1-arg_offset[p1], p2-arg_offset[p2], argpos[p1], argpos[p2]) for p1, p2 in self.dum] def equals(self, other): if other == 0: return self.coeff == 0 other = _sympify(other) if not isinstance(other, TensExpr): assert not self.components return self.coeff == other return self.canon_bp() == other.canon_bp() def get_indices(self): """ Returns the list of indices of the tensor The indices are listed in the order in which they appear in the component tensors. The dummy indices are given a name which does not collide with the names of the free indices. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) >>> g = Lorentz.metric >>> p, q = tensor_heads('p,q', [Lorentz]) >>> t = p(m1)*g(m0,m2) >>> t.get_indices() [m1, m0, m2] >>> t2 = p(m1)*g(-m1, m2) >>> t2.get_indices() [L_0, -L_0, m2] """ return self._indices def get_free_indices(self): # type: () -> List[TensorIndex] """ Returns the list of free indices of the tensor The indices are listed in the order in which they appear in the component tensors. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) >>> g = Lorentz.metric >>> p, q = tensor_heads('p,q', [Lorentz]) >>> t = p(m1)*g(m0,m2) >>> t.get_free_indices() [m1, m0, m2] >>> t2 = p(m1)*g(-m1, m2) >>> t2.get_free_indices() [m2] """ return self._index_structure.get_free_indices() def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr return self.func(*[arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args]) def split(self): """ Returns a list of tensors, whose product is ``self`` Dummy indices contracted among different tensor components become free indices with the same name as the one used to represent the dummy indices. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) >>> t = A(a,b)*B(-b,c) >>> t A(a, L_0)*B(-L_0, c) >>> t.split() [A(a, L_0), B(-L_0, c)] """ if self.args == (): return [self] splitp = [] res = 1 for arg in self.args: if isinstance(arg, Tensor): splitp.append(res*arg) res = 1 else: res *= arg return splitp def _expand(self, **hints): # TODO: temporary solution, in the future this should be linked to # `Expr.expand`. args = [_expand(arg, **hints) for arg in self.args] args1 = [arg.args if isinstance(arg, (Add, TensAdd)) else (arg,) for arg in args] return TensAdd(*[ TensMul(*i) for i in itertools.product(*args1)] ) def __neg__(self): return TensMul(S.NegativeOne, self, is_canon_bp=self._is_canon_bp).doit() def __getitem__(self, item): deprecate_data() return self.data[item] def _get_args_for_traditional_printer(self): args = list(self.args) if (self.coeff < 0) == True: # expressions like "-A(a)" sign = "-" if self.coeff == S.NegativeOne: args = args[1:] else: args[0] = -args[0] else: sign = "" return sign, args def _sort_args_for_sorted_components(self): """ Returns the ``args`` sorted according to the components commutation properties. The sorting is done taking into account the commutation group of the component tensors. """ cv = [arg for arg in self.args if isinstance(arg, TensExpr)] sign = 1 n = len(cv) - 1 for i in range(n): for j in range(n, i, -1): c = cv[j-1].commutes_with(cv[j]) # if `c` is `None`, it does neither commute nor anticommute, skip: if c not in [0, 1]: continue typ1 = sorted(set(cv[j-1].component.index_types), key=lambda x: x.name) typ2 = sorted(set(cv[j].component.index_types), key=lambda x: x.name) if (typ1, cv[j-1].component.name) > (typ2, cv[j].component.name): cv[j-1], cv[j] = cv[j], cv[j-1] # if `c` is 1, the anticommute, so change sign: if c: sign = -sign coeff = sign * self.coeff if coeff != 1: return [coeff] + cv return cv def sorted_components(self): """ Returns a tensor product with sorted components. """ return TensMul(*self._sort_args_for_sorted_components()).doit() def perm2tensor(self, g, is_canon_bp=False): """ Returns the tensor corresponding to the permutation ``g`` For further details, see the method in ``TIDS`` with the same name. """ return perm2tensor(self, g, is_canon_bp=is_canon_bp) def canon_bp(self): """ Canonicalize using the Butler-Portugal algorithm for canonicalization under monoterm symmetries. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorSymmetry >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2)) >>> t = A(m0,-m1)*A(m1,-m0) >>> t.canon_bp() -A(L_0, L_1)*A(-L_0, -L_1) >>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0) >>> t.canon_bp() 0 """ if self._is_canon_bp: return self expr = self.expand() if isinstance(expr, TensAdd): return expr.canon_bp() if not expr.components: return expr t = expr.sorted_components() g, dummies, msym = t._index_structure.indices_canon_args() v = components_canon_args(t.components) can = canonicalize(g, dummies, msym, *v) if can == 0: return S.Zero tmul = t.perm2tensor(can, True) return tmul def contract_delta(self, delta): t = self.contract_metric(delta) return t def _get_indices_to_args_pos(self): """ Get a dict mapping the index position to TensMul's argument number. """ pos_map = dict() pos_counter = 0 for arg_i, arg in enumerate(self.args): if not isinstance(arg, TensExpr): continue assert isinstance(arg, Tensor) for i in range(arg.ext_rank): pos_map[pos_counter] = arg_i pos_counter += 1 return pos_map def contract_metric(self, g): """ Raise or lower indices with the metric ``g`` Parameters ========== g : metric Notes ===== see the ``TensorIndexType`` docstring for the contraction conventions Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) >>> g = Lorentz.metric >>> p, q = tensor_heads('p,q', [Lorentz]) >>> t = p(m0)*q(m1)*g(-m0, -m1) >>> t.canon_bp() metric(L_0, L_1)*p(-L_0)*q(-L_1) >>> t.contract_metric(g).canon_bp() p(L_0)*q(-L_0) """ expr = self.expand() if self != expr: expr = expr.canon_bp() return expr.contract_metric(g) pos_map = self._get_indices_to_args_pos() args = list(self.args) #antisym = g.index_types[0].metric_antisym if g.symmetry == TensorSymmetry.fully_symmetric(-2): antisym = 1 elif g.symmetry == TensorSymmetry.fully_symmetric(2): antisym = 0 elif g.symmetry == TensorSymmetry.no_symmetry(2): antisym = None else: raise NotImplementedError # list of positions of the metric ``g`` inside ``args`` gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g] if not gpos: return self # Sign is either 1 or -1, to correct the sign after metric contraction # (for spinor indices). sign = 1 dum = self.dum[:] free = self.free[:] elim = set() for gposx in gpos: if gposx in elim: continue free1 = [x for x in free if pos_map[x[1]] == gposx] dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx] if not dum1: continue elim.add(gposx) # subs with the multiplication neutral element, that is, remove it: args[gposx] = 1 if len(dum1) == 2: if not antisym: dum10, dum11 = dum1 if pos_map[dum10[1]] == gposx: # the index with pos p0 contravariant p0 = dum10[0] else: # the index with pos p0 is covariant p0 = dum10[1] if pos_map[dum11[1]] == gposx: # the index with pos p1 is contravariant p1 = dum11[0] else: # the index with pos p1 is covariant p1 = dum11[1] dum.append((p0, p1)) else: dum10, dum11 = dum1 # change the sign to bring the indices of the metric to contravariant # form; change the sign if dum10 has the metric index in position 0 if pos_map[dum10[1]] == gposx: # the index with pos p0 is contravariant p0 = dum10[0] if dum10[1] == 1: sign = -sign else: # the index with pos p0 is covariant p0 = dum10[1] if dum10[0] == 0: sign = -sign if pos_map[dum11[1]] == gposx: # the index with pos p1 is contravariant p1 = dum11[0] sign = -sign else: # the index with pos p1 is covariant p1 = dum11[1] dum.append((p0, p1)) elif len(dum1) == 1: if not antisym: dp0, dp1 = dum1[0] if pos_map[dp0] == pos_map[dp1]: # g(i, -i) typ = g.index_types[0] sign = sign*typ.dim else: # g(i0, i1)*p(-i1) if pos_map[dp0] == gposx: p1 = dp1 else: p1 = dp0 ind, p = free1[0] free.append((ind, p1)) else: dp0, dp1 = dum1[0] if pos_map[dp0] == pos_map[dp1]: # g(i, -i) typ = g.index_types[0] sign = sign*typ.dim if dp0 < dp1: # g(i, -i) = -D with antisymmetric metric sign = -sign else: # g(i0, i1)*p(-i1) if pos_map[dp0] == gposx: p1 = dp1 if dp0 == 0: sign = -sign else: p1 = dp0 ind, p = free1[0] free.append((ind, p1)) dum = [x for x in dum if x not in dum1] free = [x for x in free if x not in free1] # shift positions: shift = 0 shifts = [0]*len(args) for i in range(len(args)): if i in elim: shift += 2 continue shifts[i] = shift free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim] dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for i, (p0, p1) in enumerate(dum) if pos_map[p0] not in elim and pos_map[p1] not in elim] res = sign*TensMul(*args).doit() if not isinstance(res, TensExpr): return res im = _IndexStructure.from_components_free_dum(res.components, free, dum) return res._set_new_index_structure(im) def _set_new_index_structure(self, im, is_canon_bp=False): indices = im.get_indices() return self._set_indices(*indices, is_canon_bp=is_canon_bp) def _set_indices(self, *indices, is_canon_bp=False, **kw_args): if len(indices) != self.ext_rank: raise ValueError("indices length mismatch") args = list(self.args)[:] pos = 0 for i, arg in enumerate(args): if not isinstance(arg, TensExpr): continue assert isinstance(arg, Tensor) ext_rank = arg.ext_rank args[i] = arg._set_indices(*indices[pos:pos+ext_rank]) pos += ext_rank return TensMul(*args, is_canon_bp=is_canon_bp).doit() @staticmethod def _index_replacement_for_contract_metric(args, free, dum): for arg in args: if not isinstance(arg, TensExpr): continue assert isinstance(arg, Tensor) def substitute_indices(self, *index_tuples): new_args = [] for arg in self.args: if isinstance(arg, TensExpr): arg = arg.substitute_indices(*index_tuples) new_args.append(arg) return TensMul(*new_args).doit() def __call__(self, *indices): deprecate_fun_eval() free_args = self.free_args indices = list(indices) if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: raise ValueError('incompatible types') if indices == free_args: return self t = self.substitute_indices(*list(zip(free_args, indices))) # object is rebuilt in order to make sure that all contracted indices # get recognized as dummies, but only if there are contracted indices. if len({i if i.is_up else -i for i in indices}) != len(indices): return t.func(*t.args) return t def _extract_data(self, replacement_dict): args_indices, arrays = zip(*[arg._extract_data(replacement_dict) for arg in self.args if isinstance(arg, TensExpr)]) coeff = reduce(operator.mul, [a for a in self.args if not isinstance(a, TensExpr)], S.One) indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) dum = TensMul._dummy_data_to_dum(dummy_data) ext_rank = self.ext_rank free.sort(key=lambda x: x[1]) free_indices = [i[0] for i in free] return free_indices, coeff*_TensorDataLazyEvaluator.data_contract_dum(arrays, dum, ext_rank) @property def data(self): deprecate_data() dat = _tensor_data_substitution_dict[self.expand()] return dat @data.setter def data(self, data): deprecate_data() raise ValueError("Not possible to set component data to a tensor expression") @data.deleter def data(self): deprecate_data() raise ValueError("Not possible to delete component data to a tensor expression") def __iter__(self): deprecate_data() if self.data is None: raise ValueError("No iteration on abstract tensors") return self.data.__iter__() def _eval_rewrite_as_Indexed(self, *args): from sympy import Sum index_symbols = [i.args[0] for i in self.get_indices()] args = [arg.args[0] if isinstance(arg, Sum) else arg for arg in args] expr = Mul.fromiter(args) return self._check_add_Sum(expr, index_symbols) def _eval_partial_derivative(self, s): # Evaluation like Mul terms = [] for i, arg in enumerate(self.args): # checking whether some tensor instance is differentiated # or some other thing is necessary, but ugly if isinstance(arg, TensExpr): d = arg._eval_partial_derivative(s) else: # do not call diff is s is no symbol if s._diff_wrt: d = arg._eval_derivative(s) else: d = S.Zero if d: terms.append(TensMul.fromiter(self.args[:i] + (d,) + self.args[i + 1:])) return TensAdd.fromiter(terms) class TensorElement(TensExpr): """ Tensor with evaluated components. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry >>> from sympy import symbols >>> L = TensorIndexType("L") >>> i, j, k = symbols("i j k") >>> A = TensorHead("A", [L, L], TensorSymmetry.fully_symmetric(2)) >>> A(i, j).get_free_indices() [i, j] If we want to set component ``i`` to a specific value, use the ``TensorElement`` class: >>> from sympy.tensor.tensor import TensorElement >>> te = TensorElement(A(i, j), {i: 2}) As index ``i`` has been accessed (``{i: 2}`` is the evaluation of its 3rd element), the free indices will only contain ``j``: >>> te.get_free_indices() [j] """ def __new__(cls, expr, index_map): if not isinstance(expr, Tensor): # remap if not isinstance(expr, TensExpr): raise TypeError("%s is not a tensor expression" % expr) return expr.func(*[TensorElement(arg, index_map) for arg in expr.args]) expr_free_indices = expr.get_free_indices() name_translation = {i.args[0]: i for i in expr_free_indices} index_map = {name_translation.get(index, index): value for index, value in index_map.items()} index_map = {index: value for index, value in index_map.items() if index in expr_free_indices} if len(index_map) == 0: return expr free_indices = [i for i in expr_free_indices if i not in index_map.keys()] index_map = Dict(index_map) obj = TensExpr.__new__(cls, expr, index_map) obj._free_indices = free_indices return obj @property def free(self): return [(index, i) for i, index in enumerate(self.get_free_indices())] @property def dum(self): # TODO: inherit dummies from expr return [] @property def expr(self): return self._args[0] @property def index_map(self): return self._args[1] @property def coeff(self): return S.One @property def nocoeff(self): return self def get_free_indices(self): return self._free_indices def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr # TODO: can be improved: return self.xreplace(repl) def get_indices(self): return self.get_free_indices() def _extract_data(self, replacement_dict): ret_indices, array = self.expr._extract_data(replacement_dict) index_map = self.index_map slice_tuple = tuple(index_map.get(i, slice(None)) for i in ret_indices) ret_indices = [i for i in ret_indices if i not in index_map] array = array.__getitem__(slice_tuple) return ret_indices, array def canon_bp(p): """ Butler-Portugal canonicalization. See ``tensor_can.py`` from the combinatorics module for the details. """ if isinstance(p, TensExpr): return p.canon_bp() return p def tensor_mul(*a): """ product of tensors """ if not a: return TensMul.from_data(S.One, [], [], []) t = a[0] for tx in a[1:]: t = t*tx return t def riemann_cyclic_replace(t_r): """ replace Riemann tensor with an equivalent expression ``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)`` """ free = sorted(t_r.free, key=lambda x: x[1]) m, n, p, q = [x[0] for x in free] t0 = t_r*Rational(2, 3) t1 = -t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))*Rational(1, 3) t2 = t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))*Rational(1, 3) t3 = t0 + t1 + t2 return t3 def riemann_cyclic(t2): """ replace each Riemann tensor with an equivalent expression satisfying the cyclic identity. This trick is discussed in the reference guide to Cadabra. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, riemann_cyclic, TensorSymmetry >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) >>> R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann()) >>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l)) >>> riemann_cyclic(t) 0 """ t2 = t2.expand() if isinstance(t2, (TensMul, Tensor)): args = [t2] else: args = t2.args a1 = [x.split() for x in args] a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1] a3 = [tensor_mul(*v) for v in a2] t3 = TensAdd(*a3).doit() if not t3: return t3 else: return canon_bp(t3) def get_lines(ex, index_type): """ returns ``(lines, traces, rest)`` for an index type, where ``lines`` is the list of list of positions of a matrix line, ``traces`` is the list of list of traced matrix lines, ``rest`` is the rest of the elements ot the tensor. """ def _join_lines(a): i = 0 while i < len(a): x = a[i] xend = x[-1] xstart = x[0] hit = True while hit: hit = False for j in range(i + 1, len(a)): if j >= len(a): break if a[j][0] == xend: hit = True x.extend(a[j][1:]) xend = x[-1] a.pop(j) continue if a[j][0] == xstart: hit = True a[i] = reversed(a[j][1:]) + x x = a[i] xstart = a[i][0] a.pop(j) continue if a[j][-1] == xend: hit = True x.extend(reversed(a[j][:-1])) xend = x[-1] a.pop(j) continue if a[j][-1] == xstart: hit = True a[i] = a[j][:-1] + x x = a[i] xstart = x[0] a.pop(j) continue i += 1 return a arguments = ex.args dt = {} for c in ex.args: if not isinstance(c, TensExpr): continue if c in dt: continue index_types = c.index_types a = [] for i in range(len(index_types)): if index_types[i] is index_type: a.append(i) if len(a) > 2: raise ValueError('at most two indices of type %s allowed' % index_type) if len(a) == 2: dt[c] = a #dum = ex.dum lines = [] traces = [] traces1 = [] #indices_to_args_pos = ex._get_indices_to_args_pos() # TODO: add a dum_to_components_map ? for p0, p1, c0, c1 in ex.dum_in_args: if arguments[c0] not in dt: continue if c0 == c1: traces.append([c0]) continue ta0 = dt[arguments[c0]] ta1 = dt[arguments[c1]] if p0 not in ta0: continue if ta0.index(p0) == ta1.index(p1): # case gamma(i,s0,-s1) in c0, gamma(j,-s0,s2) in c1; # to deal with this case one could add to the position # a flag for transposition; # one could write [(c0, False), (c1, True)] raise NotImplementedError # if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1 # if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0 ta0 = dt[arguments[c0]] b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0) lines1 = lines[:] for line in lines: if line[-1] == b0: if line[0] == b1: n = line.index(min(line)) traces1.append(line) traces.append(line[n:] + line[:n]) else: line.append(b1) break elif line[0] == b1: line.insert(0, b0) break else: lines1.append([b0, b1]) lines = [x for x in lines1 if x not in traces1] lines = _join_lines(lines) rest = [] for line in lines: for y in line: rest.append(y) for line in traces: for y in line: rest.append(y) rest = [x for x in range(len(arguments)) if x not in rest] return lines, traces, rest def get_free_indices(t): if not isinstance(t, TensExpr): return () return t.get_free_indices() def get_indices(t): if not isinstance(t, TensExpr): return () return t.get_indices() def get_index_structure(t): if isinstance(t, TensExpr): return t._index_structure return _IndexStructure([], [], [], []) def get_coeff(t): if isinstance(t, Tensor): return S.One if isinstance(t, TensMul): return t.coeff if isinstance(t, TensExpr): raise ValueError("no coefficient associated to this tensor expression") return t def contract_metric(t, g): if isinstance(t, TensExpr): return t.contract_metric(g) return t def perm2tensor(t, g, is_canon_bp=False): """ Returns the tensor corresponding to the permutation ``g`` For further details, see the method in ``TIDS`` with the same name. """ if not isinstance(t, TensExpr): return t elif isinstance(t, (Tensor, TensMul)): nim = get_index_structure(t).perm2tensor(g, is_canon_bp=is_canon_bp) res = t._set_new_index_structure(nim, is_canon_bp=is_canon_bp) if g[-1] != len(g) - 1: return -res return res raise NotImplementedError() def substitute_indices(t, *index_tuples): if not isinstance(t, TensExpr): return t return t.substitute_indices(*index_tuples) def _expand(expr, **kwargs): if isinstance(expr, TensExpr): return expr._expand(**kwargs) else: return expr.expand(**kwargs)
a86ea1a13e38a4def8ca0abae449be1ba37b29a8443d568daafbde9aede6e4bc
"""Module with functions operating on IndexedBase, Indexed and Idx objects - Check shape conformance - Determine indices in resulting expression etc. Methods in this module could be implemented by calling methods on Expr objects instead. When things stabilize this could be a useful refactoring. """ from functools import reduce from sympy.core.function import Function from sympy.functions import exp, Piecewise from sympy.tensor.indexed import Idx, Indexed from sympy.utilities import sift from collections import OrderedDict class IndexConformanceException(Exception): pass def _unique_and_repeated(inds): """ Returns the unique and repeated indices. Also note, from the examples given below that the order of indices is maintained as given in the input. Examples ======== >>> from sympy.tensor.index_methods import _unique_and_repeated >>> _unique_and_repeated([2, 3, 1, 3, 0, 4, 0]) ([2, 1, 4], [3, 0]) """ uniq = OrderedDict() for i in inds: if i in uniq: uniq[i] = 0 else: uniq[i] = 1 return sift(uniq, lambda x: uniq[x], binary=True) def _remove_repeated(inds): """ Removes repeated objects from sequences Returns a set of the unique objects and a tuple of all that have been removed. Examples ======== >>> from sympy.tensor.index_methods import _remove_repeated >>> l1 = [1, 2, 3, 2] >>> _remove_repeated(l1) ({1, 3}, (2,)) """ u, r = _unique_and_repeated(inds) return set(u), tuple(r) def _get_indices_Mul(expr, return_dummies=False): """Determine the outer indices of a Mul object. Examples ======== >>> from sympy.tensor.index_methods import _get_indices_Mul >>> from sympy.tensor.indexed import IndexedBase, Idx >>> i, j, k = map(Idx, ['i', 'j', 'k']) >>> x = IndexedBase('x') >>> y = IndexedBase('y') >>> _get_indices_Mul(x[i, k]*y[j, k]) ({i, j}, {}) >>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True) ({i, j}, {}, (k,)) """ inds = list(map(get_indices, expr.args)) inds, syms = list(zip(*inds)) inds = list(map(list, inds)) inds = list(reduce(lambda x, y: x + y, inds)) inds, dummies = _remove_repeated(inds) symmetry = {} for s in syms: for pair in s: if pair in symmetry: symmetry[pair] *= s[pair] else: symmetry[pair] = s[pair] if return_dummies: return inds, symmetry, dummies else: return inds, symmetry def _get_indices_Pow(expr): """Determine outer indices of a power or an exponential. A power is considered a universal function, so that the indices of a Pow is just the collection of indices present in the expression. This may be viewed as a bit inconsistent in the special case: x[i]**2 = x[i]*x[i] (1) The above expression could have been interpreted as the contraction of x[i] with itself, but we choose instead to interpret it as a function lambda y: y**2 applied to each element of x (a universal function in numpy terms). In order to allow an interpretation of (1) as a contraction, we need contravariant and covariant Idx subclasses. (FIXME: this is not yet implemented) Expressions in the base or exponent are subject to contraction as usual, but an index that is present in the exponent, will not be considered contractable with its own base. Note however, that indices in the same exponent can be contracted with each other. Examples ======== >>> from sympy.tensor.index_methods import _get_indices_Pow >>> from sympy import Pow, exp, IndexedBase, Idx >>> A = IndexedBase('A') >>> x = IndexedBase('x') >>> i, j, k = map(Idx, ['i', 'j', 'k']) >>> _get_indices_Pow(exp(A[i, j]*x[j])) ({i}, {}) >>> _get_indices_Pow(Pow(x[i], x[i])) ({i}, {}) >>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i])) ({i}, {}) """ base, exp = expr.as_base_exp() binds, bsyms = get_indices(base) einds, esyms = get_indices(exp) inds = binds | einds # FIXME: symmetries from power needs to check special cases, else nothing symmetries = {} return inds, symmetries def _get_indices_Add(expr): """Determine outer indices of an Add object. In a sum, each term must have the same set of outer indices. A valid expression could be x(i)*y(j) - x(j)*y(i) But we do not allow expressions like: x(i)*y(j) - z(j)*z(j) FIXME: Add support for Numpy broadcasting Examples ======== >>> from sympy.tensor.index_methods import _get_indices_Add >>> from sympy.tensor.indexed import IndexedBase, Idx >>> i, j, k = map(Idx, ['i', 'j', 'k']) >>> x = IndexedBase('x') >>> y = IndexedBase('y') >>> _get_indices_Add(x[i] + x[k]*y[i, k]) ({i}, {}) """ inds = list(map(get_indices, expr.args)) inds, syms = list(zip(*inds)) # allow broadcast of scalars non_scalars = [x for x in inds if x != set()] if not non_scalars: return set(), {} if not all([x == non_scalars[0] for x in non_scalars[1:]]): raise IndexConformanceException("Indices are not consistent: %s" % expr) if not reduce(lambda x, y: x != y or y, syms): symmetries = syms[0] else: # FIXME: search for symmetries symmetries = {} return non_scalars[0], symmetries def get_indices(expr): """Determine the outer indices of expression ``expr`` By *outer* we mean indices that are not summation indices. Returns a set and a dict. The set contains outer indices and the dict contains information about index symmetries. Examples ======== >>> from sympy.tensor.index_methods import get_indices >>> from sympy import symbols >>> from sympy.tensor import IndexedBase >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, a, z = symbols('i j a z', integer=True) The indices of the total expression is determined, Repeated indices imply a summation, for instance the trace of a matrix A: >>> get_indices(A[i, i]) (set(), {}) In the case of many terms, the terms are required to have identical outer indices. Else an IndexConformanceException is raised. >>> get_indices(x[i] + A[i, j]*y[j]) ({i}, {}) :Exceptions: An IndexConformanceException means that the terms ar not compatible, e.g. >>> get_indices(x[i] + y[j]) #doctest: +SKIP (...) IndexConformanceException: Indices are not consistent: x(i) + y(j) .. warning:: The concept of *outer* indices applies recursively, starting on the deepest level. This implies that dummies inside parenthesis are assumed to be summed first, so that the following expression is handled gracefully: >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) ({i, j}, {}) This is correct and may appear convenient, but you need to be careful with this as SymPy will happily .expand() the product, if requested. The resulting expression would mix the outer ``j`` with the dummies inside the parenthesis, which makes it a different expression. To be on the safe side, it is best to avoid such ambiguities by using unique indices for all contractions that should be held separate. """ # We call ourself recursively to determine indices of sub expressions. # break recursion if isinstance(expr, Indexed): c = expr.indices inds, dummies = _remove_repeated(c) return inds, {} elif expr is None: return set(), {} elif isinstance(expr, Idx): return {expr}, {} elif expr.is_Atom: return set(), {} # recurse via specialized functions else: if expr.is_Mul: return _get_indices_Mul(expr) elif expr.is_Add: return _get_indices_Add(expr) elif expr.is_Pow or isinstance(expr, exp): return _get_indices_Pow(expr) elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return set(), {} elif isinstance(expr, Function): # Support ufunc like behaviour by returning indices from arguments. # Functions do not interpret repeated indices across argumnts # as summation ind0 = set() for arg in expr.args: ind, sym = get_indices(arg) ind0 |= ind return ind0, sym # this test is expensive, so it should be at the end elif not expr.has(Indexed): return set(), {} raise NotImplementedError( "FIXME: No specialized handling of type %s" % type(expr)) def get_contraction_structure(expr): """Determine dummy indices of ``expr`` and describe its structure By *dummy* we mean indices that are summation indices. The structure of the expression is determined and described as follows: 1) A conforming summation of Indexed objects is described with a dict where the keys are summation indices and the corresponding values are sets containing all terms for which the summation applies. All Add objects in the SymPy expression tree are described like this. 2) For all nodes in the SymPy expression tree that are *not* of type Add, the following applies: If a node discovers contractions in one of its arguments, the node itself will be stored as a key in the dict. For that key, the corresponding value is a list of dicts, each of which is the result of a recursive call to get_contraction_structure(). The list contains only dicts for the non-trivial deeper contractions, omitting dicts with None as the one and only key. .. Note:: The presence of expressions among the dictionary keys indicates multiple levels of index contractions. A nested dict displays nested contractions and may itself contain dicts from a deeper level. In practical calculations the summation in the deepest nested level must be calculated first so that the outer expression can access the resulting indexed object. Examples ======== >>> from sympy.tensor.index_methods import get_contraction_structure >>> from sympy import default_sort_key >>> from sympy.tensor import IndexedBase, Idx >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l']) >>> get_contraction_structure(x[i]*y[i] + A[j, j]) {(i,): {x[i]*y[i]}, (j,): {A[j, j]}} >>> get_contraction_structure(x[i]*y[j]) {None: {x[i]*y[j]}} A multiplication of contracted factors results in nested dicts representing the internal contractions. >>> d = get_contraction_structure(x[i, i]*y[j, j]) >>> sorted(d.keys(), key=default_sort_key) [None, x[i, i]*y[j, j]] In this case, the product has no contractions: >>> d[None] {x[i, i]*y[j, j]} Factors are contracted "first": >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key) [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}] A parenthesized Add object is also returned as a nested dictionary. The term containing the parenthesis is a Mul with a contraction among the arguments, so it will be found as a key in the result. It stores the dictionary resulting from a recursive call on the Add expression. >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j])) >>> sorted(d.keys(), key=default_sort_key) [(A[i, j]*x[j] + y[i])*x[i], (i,)] >>> d[(i,)] {(A[i, j]*x[j] + y[i])*x[i]} >>> d[x[i]*(A[i, j]*x[j] + y[i])] [{None: {y[i]}, (j,): {A[i, j]*x[j]}}] Powers with contractions in either base or exponent will also be found as keys in the dictionary, mapping to a list of results from recursive calls: >>> d = get_contraction_structure(A[j, j]**A[i, i]) >>> d[None] {A[j, j]**A[i, i]} >>> nested_contractions = d[A[j, j]**A[i, i]] >>> nested_contractions[0] {(j,): {A[j, j]}} >>> nested_contractions[1] {(i,): {A[i, i]}} The description of the contraction structure may appear complicated when represented with a string in the above examples, but it is easy to iterate over: >>> from sympy import Expr >>> for key in d: ... if isinstance(key, Expr): ... continue ... for term in d[key]: ... if term in d: ... # treat deepest contraction first ... pass ... # treat outermost contactions here """ # We call ourself recursively to inspect sub expressions. if isinstance(expr, Indexed): junk, key = _remove_repeated(expr.indices) return {key or None: {expr}} elif expr.is_Atom: return {None: {expr}} elif expr.is_Mul: junk, junk, key = _get_indices_Mul(expr, return_dummies=True) result = {key or None: {expr}} # recurse on every factor nested = [] for fac in expr.args: facd = get_contraction_structure(fac) if not (None in facd and len(facd) == 1): nested.append(facd) if nested: result[expr] = nested return result elif expr.is_Pow or isinstance(expr, exp): # recurse in base and exp separately. If either has internal # contractions we must include ourselves as a key in the returned dict b, e = expr.as_base_exp() dbase = get_contraction_structure(b) dexp = get_contraction_structure(e) dicts = [] for d in dbase, dexp: if not (None in d and len(d) == 1): dicts.append(d) result = {None: {expr}} if dicts: result[expr] = dicts return result elif expr.is_Add: # Note: we just collect all terms with identical summation indices, We # do nothing to identify equivalent terms here, as this would require # substitutions or pattern matching in expressions of unknown # complexity. result = {} for term in expr.args: # recurse on every term d = get_contraction_structure(term) for key in d: if key in result: result[key] |= d[key] else: result[key] = d[key] return result elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return {None: expr} elif isinstance(expr, Function): # Collect non-trivial contraction structures in each argument # We do not report repeated indices in separate arguments as a # contraction deeplist = [] for arg in expr.args: deep = get_contraction_structure(arg) if not (None in deep and len(deep) == 1): deeplist.append(deep) d = {None: {expr}} if deeplist: d[expr] = deeplist return d # this test is expensive, so it should be at the end elif not expr.has(Indexed): return {None: {expr}} raise NotImplementedError( "FIXME: No specialized handling of type %s" % type(expr))
f74a8a652f74148d265569d2048fded22d7ba9ef4fa95e4e6648b065c9920a58
from collections.abc import Iterable from sympy import Expr, S, Mul, sympify from sympy.core.parameters import global_parameters class TensorProduct(Expr): """ Generic class for tensor products. """ is_number = False def __new__(cls, *args, **kwargs): from sympy.tensor.array import NDimArray, tensorproduct, Array from sympy import MatrixBase, MatrixExpr from sympy.strategies import flatten args = [sympify(arg) for arg in args] evaluate = kwargs.get("evaluate", global_parameters.evaluate) if not evaluate: obj = Expr.__new__(cls, *args) return obj arrays = [] other = [] scalar = S.One for arg in args: if isinstance(arg, (Iterable, MatrixBase, NDimArray)): arrays.append(Array(arg)) elif isinstance(arg, (MatrixExpr,)): other.append(arg) else: scalar *= arg coeff = scalar*tensorproduct(*arrays) if len(other) == 0: return coeff if coeff != 1: newargs = [coeff] + other else: newargs = other obj = Expr.__new__(cls, *newargs, **kwargs) return flatten(obj) def rank(self): return len(self.shape) def _get_args_shapes(self): from sympy import Array return [i.shape if hasattr(i, "shape") else Array(i).shape for i in self.args] @property def shape(self): shape_list = self._get_args_shapes() return sum(shape_list, ()) def __getitem__(self, index): index = iter(index) return Mul.fromiter( arg.__getitem__(tuple(next(index) for i in shp)) for arg, shp in zip(self.args, self._get_args_shapes()) )
9d2af4af741ea6b0ea383674ed424d08e497e1f70eeca6f83757c4579c5cb284
r"""Module that defines indexed objects The classes ``IndexedBase``, ``Indexed``, and ``Idx`` represent a matrix element ``M[i, j]`` as in the following diagram:: 1) The Indexed class represents the entire indexed object. | ___|___ ' ' M[i, j] / \__\______ | | | | | 2) The Idx class represents indices; each Idx can | optionally contain information about its range. | 3) IndexedBase represents the 'stem' of an indexed object, here `M`. The stem used by itself is usually taken to represent the entire array. There can be any number of indices on an Indexed object. No transformation properties are implemented in these Base objects, but implicit contraction of repeated indices is supported. Note that the support for complicated (i.e. non-atomic) integer expressions as indices is limited. (This should be improved in future releases.) Examples ======== To express the above matrix element example you would write: >>> from sympy import symbols, IndexedBase, Idx >>> M = IndexedBase('M') >>> i, j = symbols('i j', cls=Idx) >>> M[i, j] M[i, j] Repeated indices in a product implies a summation, so to express a matrix-vector product in terms of Indexed objects: >>> x = IndexedBase('x') >>> M[i, j]*x[j] M[i, j]*x[j] If the indexed objects will be converted to component based arrays, e.g. with the code printers or the autowrap framework, you also need to provide (symbolic or numerical) dimensions. This can be done by passing an optional shape parameter to IndexedBase upon construction: >>> dim1, dim2 = symbols('dim1 dim2', integer=True) >>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2)) >>> A.shape (dim1, 2*dim1, dim2) >>> A[i, j, 3].shape (dim1, 2*dim1, dim2) If an IndexedBase object has no shape information, it is assumed that the array is as large as the ranges of its indices: >>> n, m = symbols('n m', integer=True) >>> i = Idx('i', m) >>> j = Idx('j', n) >>> M[i, j].shape (m, n) >>> M[i, j].ranges [(0, m - 1), (0, n - 1)] The above can be compared with the following: >>> A[i, 2, j].shape (dim1, 2*dim1, dim2) >>> A[i, 2, j].ranges [(0, m - 1), None, (0, n - 1)] To analyze the structure of indexed expressions, you can use the methods get_indices() and get_contraction_structure(): >>> from sympy.tensor import get_indices, get_contraction_structure >>> get_indices(A[i, j, j]) ({i}, {}) >>> get_contraction_structure(A[i, j, j]) {(j,): {A[i, j, j]}} See the appropriate docstrings for a detailed explanation of the output. """ # TODO: (some ideas for improvement) # # o test and guarantee numpy compatibility # - implement full support for broadcasting # - strided arrays # # o more functions to analyze indexed expressions # - identify standard constructs, e.g matrix-vector product in a subexpression # # o functions to generate component based arrays (numpy and sympy.Matrix) # - generate a single array directly from Indexed # - convert simple sub-expressions # # o sophisticated indexing (possibly in subclasses to preserve simplicity) # - Idx with range smaller than dimension of Indexed # - Idx with stepsize != 1 # - Idx with step determined by function call from collections.abc import Iterable from sympy import Number from sympy.core.assumptions import StdFactKB from sympy.core import Expr, Tuple, sympify, S from sympy.core.symbol import _filter_assumptions, Symbol from sympy.core.compatibility import (is_sequence, NotIterable) from sympy.core.logic import fuzzy_bool, fuzzy_not from sympy.core.sympify import _sympify from sympy.functions.special.tensor_functions import KroneckerDelta from sympy.multipledispatch import dispatch class IndexException(Exception): pass class Indexed(Expr): """Represents a mathematical object with indices. >>> from sympy import Indexed, IndexedBase, Idx, symbols >>> i, j = symbols('i j', cls=Idx) >>> Indexed('A', i, j) A[i, j] It is recommended that ``Indexed`` objects be created by indexing ``IndexedBase``: ``IndexedBase('A')[i, j]`` instead of ``Indexed(IndexedBase('A'), i, j)``. >>> A = IndexedBase('A') >>> a_ij = A[i, j] # Prefer this, >>> b_ij = Indexed(A, i, j) # over this. >>> a_ij == b_ij True """ is_commutative = True is_Indexed = True is_symbol = True is_Atom = True def __new__(cls, base, *args, **kw_args): from sympy.utilities.misc import filldedent from sympy.tensor.array.ndim_array import NDimArray from sympy.matrices.matrices import MatrixBase if not args: raise IndexException("Indexed needs at least one index.") if isinstance(base, (str, Symbol)): base = IndexedBase(base) elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase): raise TypeError(filldedent(""" The base can only be replaced with a string, Symbol, IndexedBase or an object with a method for getting items (i.e. an object with a `__getitem__` method). """)) args = list(map(sympify, args)) if isinstance(base, (NDimArray, Iterable, Tuple, MatrixBase)) and all([i.is_number for i in args]): if len(args) == 1: return base[args[0]] else: return base[args] obj = Expr.__new__(cls, base, *args, **kw_args) try: IndexedBase._set_assumptions(obj, base.assumptions0) except AttributeError: IndexedBase._set_assumptions(obj, {}) return obj def _hashable_content(self): return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) @property def name(self): return str(self) @property def _diff_wrt(self): """Allow derivatives with respect to an ``Indexed`` object.""" return True def _eval_derivative(self, wrt): from sympy.tensor.array.ndim_array import NDimArray if isinstance(wrt, Indexed) and wrt.base == self.base: if len(self.indices) != len(wrt.indices): msg = "Different # of indices: d({!s})/d({!s})".format(self, wrt) raise IndexException(msg) result = S.One for index1, index2 in zip(self.indices, wrt.indices): result *= KroneckerDelta(index1, index2) return result elif isinstance(self.base, NDimArray): from sympy.tensor.array import derive_by_array return Indexed(derive_by_array(self.base, wrt), *self.args[1:]) else: if Tuple(self.indices).has(wrt): return S.NaN return S.Zero @property def assumptions0(self): return {k: v for k, v in self._assumptions.items() if v is not None} @property def base(self): """Returns the ``IndexedBase`` of the ``Indexed`` object. Examples ======== >>> from sympy import Indexed, IndexedBase, Idx, symbols >>> i, j = symbols('i j', cls=Idx) >>> Indexed('A', i, j).base A >>> B = IndexedBase('B') >>> B == B[i, j].base True """ return self.args[0] @property def indices(self): """ Returns the indices of the ``Indexed`` object. Examples ======== >>> from sympy import Indexed, Idx, symbols >>> i, j = symbols('i j', cls=Idx) >>> Indexed('A', i, j).indices (i, j) """ return self.args[1:] @property def rank(self): """ Returns the rank of the ``Indexed`` object. Examples ======== >>> from sympy import Indexed, Idx, symbols >>> i, j, k, l, m = symbols('i:m', cls=Idx) >>> Indexed('A', i, j).rank 2 >>> q = Indexed('A', i, j, k, l, m) >>> q.rank 5 >>> q.rank == len(q.indices) True """ return len(self.args) - 1 @property def shape(self): """Returns a list with dimensions of each index. Dimensions is a property of the array, not of the indices. Still, if the ``IndexedBase`` does not define a shape attribute, it is assumed that the ranges of the indices correspond to the shape of the array. >>> from sympy import IndexedBase, Idx, symbols >>> n, m = symbols('n m', integer=True) >>> i = Idx('i', m) >>> j = Idx('j', m) >>> A = IndexedBase('A', shape=(n, n)) >>> B = IndexedBase('B') >>> A[i, j].shape (n, n) >>> B[i, j].shape (m, m) """ from sympy.utilities.misc import filldedent if self.base.shape: return self.base.shape sizes = [] for i in self.indices: upper = getattr(i, 'upper', None) lower = getattr(i, 'lower', None) if None in (upper, lower): raise IndexException(filldedent(""" Range is not defined for all indices in: %s""" % self)) try: size = upper - lower + 1 except TypeError: raise IndexException(filldedent(""" Shape cannot be inferred from Idx with undefined range: %s""" % self)) sizes.append(size) return Tuple(*sizes) @property def ranges(self): """Returns a list of tuples with lower and upper range of each index. If an index does not define the data members upper and lower, the corresponding slot in the list contains ``None`` instead of a tuple. Examples ======== >>> from sympy import Indexed,Idx, symbols >>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges [(0, 1), (0, 3), (0, 7)] >>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges [(0, 2), (0, 2), (0, 2)] >>> x, y, z = symbols('x y z', integer=True) >>> Indexed('A', x, y, z).ranges [None, None, None] """ ranges = [] for i in self.indices: sentinel = object() upper = getattr(i, 'upper', sentinel) lower = getattr(i, 'lower', sentinel) if sentinel not in (upper, lower): ranges.append(Tuple(lower, upper)) else: ranges.append(None) return ranges def _sympystr(self, p): indices = list(map(p.doprint, self.indices)) return "%s[%s]" % (p.doprint(self.base), ", ".join(indices)) @property def free_symbols(self): base_free_symbols = self.base.free_symbols indices_free_symbols = { fs for i in self.indices for fs in i.free_symbols} if base_free_symbols: return {self} | base_free_symbols | indices_free_symbols else: return indices_free_symbols @property def expr_free_symbols(self): return {self} class IndexedBase(Expr, NotIterable): """Represent the base or stem of an indexed object The IndexedBase class represent an array that contains elements. The main purpose of this class is to allow the convenient creation of objects of the Indexed class. The __getitem__ method of IndexedBase returns an instance of Indexed. Alone, without indices, the IndexedBase class can be used as a notation for e.g. matrix equations, resembling what you could do with the Symbol class. But, the IndexedBase class adds functionality that is not available for Symbol instances: - An IndexedBase object can optionally store shape information. This can be used in to check array conformance and conditions for numpy broadcasting. (TODO) - An IndexedBase object implements syntactic sugar that allows easy symbolic representation of array operations, using implicit summation of repeated indices. - The IndexedBase object symbolizes a mathematical structure equivalent to arrays, and is recognized as such for code generation and automatic compilation and wrapping. >>> from sympy.tensor import IndexedBase, Idx >>> from sympy import symbols >>> A = IndexedBase('A'); A A >>> type(A) <class 'sympy.tensor.indexed.IndexedBase'> When an IndexedBase object receives indices, it returns an array with named axes, represented by an Indexed object: >>> i, j = symbols('i j', integer=True) >>> A[i, j, 2] A[i, j, 2] >>> type(A[i, j, 2]) <class 'sympy.tensor.indexed.Indexed'> The IndexedBase constructor takes an optional shape argument. If given, it overrides any shape information in the indices. (But not the index ranges!) >>> m, n, o, p = symbols('m n o p', integer=True) >>> i = Idx('i', m) >>> j = Idx('j', n) >>> A[i, j].shape (m, n) >>> B = IndexedBase('B', shape=(o, p)) >>> B[i, j].shape (o, p) Assumptions can be specified with keyword arguments the same way as for Symbol: >>> A_real = IndexedBase('A', real=True) >>> A_real.is_real True >>> A != A_real True Assumptions can also be inherited if a Symbol is used to initialize the IndexedBase: >>> I = symbols('I', integer=True) >>> C_inherit = IndexedBase(I) >>> C_explicit = IndexedBase('I', integer=True) >>> C_inherit == C_explicit True """ is_commutative = True is_symbol = True is_Atom = True @staticmethod def _set_assumptions(obj, assumptions): """Set assumptions on obj, making sure to apply consistent values.""" tmp_asm_copy = assumptions.copy() is_commutative = fuzzy_bool(assumptions.get('commutative', True)) assumptions['commutative'] = is_commutative obj._assumptions = StdFactKB(assumptions) obj._assumptions._generator = tmp_asm_copy # Issue #8873 def __new__(cls, label, shape=None, *, offset=S.Zero, strides=None, **kw_args): from sympy import MatrixBase, NDimArray assumptions, kw_args = _filter_assumptions(kw_args) if isinstance(label, str): label = Symbol(label, **assumptions) elif isinstance(label, Symbol): assumptions = label._merge(assumptions) elif isinstance(label, (MatrixBase, NDimArray)): return label elif isinstance(label, Iterable): return _sympify(label) else: label = _sympify(label) if is_sequence(shape): shape = Tuple(*shape) elif shape is not None: shape = Tuple(shape) if shape is not None: obj = Expr.__new__(cls, label, shape) else: obj = Expr.__new__(cls, label) obj._shape = shape obj._offset = offset obj._strides = strides obj._name = str(label) IndexedBase._set_assumptions(obj, assumptions) return obj @property def name(self): return self._name def _hashable_content(self): return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) @property def assumptions0(self): return {k: v for k, v in self._assumptions.items() if v is not None} def __getitem__(self, indices, **kw_args): if is_sequence(indices): # Special case needed because M[*my_tuple] is a syntax error. if self.shape and len(self.shape) != len(indices): raise IndexException("Rank mismatch.") return Indexed(self, *indices, **kw_args) else: if self.shape and len(self.shape) != 1: raise IndexException("Rank mismatch.") return Indexed(self, indices, **kw_args) @property def shape(self): """Returns the shape of the ``IndexedBase`` object. Examples ======== >>> from sympy import IndexedBase, Idx >>> from sympy.abc import x, y >>> IndexedBase('A', shape=(x, y)).shape (x, y) Note: If the shape of the ``IndexedBase`` is specified, it will override any shape information given by the indices. >>> A = IndexedBase('A', shape=(x, y)) >>> B = IndexedBase('B') >>> i = Idx('i', 2) >>> j = Idx('j', 1) >>> A[i, j].shape (x, y) >>> B[i, j].shape (2, 1) """ return self._shape @property def strides(self): """Returns the strided scheme for the ``IndexedBase`` object. Normally this is a tuple denoting the number of steps to take in the respective dimension when traversing an array. For code generation purposes strides='C' and strides='F' can also be used. strides='C' would mean that code printer would unroll in row-major order and 'F' means unroll in column major order. """ return self._strides @property def offset(self): """Returns the offset for the ``IndexedBase`` object. This is the value added to the resulting index when the 2D Indexed object is unrolled to a 1D form. Used in code generation. Examples ========== >>> from sympy.printing import ccode >>> from sympy.tensor import IndexedBase, Idx >>> from sympy import symbols >>> l, m, n, o = symbols('l m n o', integer=True) >>> A = IndexedBase('A', strides=(l, m, n), offset=o) >>> i, j, k = map(Idx, 'ijk') >>> ccode(A[i, j, k]) 'A[l*i + m*j + n*k + o]' """ return self._offset @property def label(self): """Returns the label of the ``IndexedBase`` object. Examples ======== >>> from sympy import IndexedBase >>> from sympy.abc import x, y >>> IndexedBase('A', shape=(x, y)).label A """ return self.args[0] def _sympystr(self, p): return p.doprint(self.label) class Idx(Expr): """Represents an integer index as an ``Integer`` or integer expression. There are a number of ways to create an ``Idx`` object. The constructor takes two arguments: ``label`` An integer or a symbol that labels the index. ``range`` Optionally you can specify a range as either * ``Symbol`` or integer: This is interpreted as a dimension. Lower and upper bounds are set to ``0`` and ``range - 1``, respectively. * ``tuple``: The two elements are interpreted as the lower and upper bounds of the range, respectively. Note: bounds of the range are assumed to be either integer or infinite (oo and -oo are allowed to specify an unbounded range). If ``n`` is given as a bound, then ``n.is_integer`` must not return false. For convenience, if the label is given as a string it is automatically converted to an integer symbol. (Note: this conversion is not done for range or dimension arguments.) Examples ======== >>> from sympy import Idx, symbols, oo >>> n, i, L, U = symbols('n i L U', integer=True) If a string is given for the label an integer ``Symbol`` is created and the bounds are both ``None``: >>> idx = Idx('qwerty'); idx qwerty >>> idx.lower, idx.upper (None, None) Both upper and lower bounds can be specified: >>> idx = Idx(i, (L, U)); idx i >>> idx.lower, idx.upper (L, U) When only a single bound is given it is interpreted as the dimension and the lower bound defaults to 0: >>> idx = Idx(i, n); idx.lower, idx.upper (0, n - 1) >>> idx = Idx(i, 4); idx.lower, idx.upper (0, 3) >>> idx = Idx(i, oo); idx.lower, idx.upper (0, oo) """ is_integer = True is_finite = True is_real = True is_symbol = True is_Atom = True _diff_wrt = True def __new__(cls, label, range=None, **kw_args): from sympy.utilities.misc import filldedent if isinstance(label, str): label = Symbol(label, integer=True) label, range = list(map(sympify, (label, range))) if label.is_Number: if not label.is_integer: raise TypeError("Index is not an integer number.") return label if not label.is_integer: raise TypeError("Idx object requires an integer label.") elif is_sequence(range): if len(range) != 2: raise ValueError(filldedent(""" Idx range tuple must have length 2, but got %s""" % len(range))) for bound in range: if (bound.is_integer is False and bound is not S.Infinity and bound is not S.NegativeInfinity): raise TypeError("Idx object requires integer bounds.") args = label, Tuple(*range) elif isinstance(range, Expr): if range is not S.Infinity and fuzzy_not(range.is_integer): raise TypeError("Idx object requires an integer dimension.") args = label, Tuple(0, range - 1) elif range: raise TypeError(filldedent(""" The range must be an ordered iterable or integer SymPy expression.""")) else: args = label, obj = Expr.__new__(cls, *args, **kw_args) obj._assumptions["finite"] = True obj._assumptions["real"] = True return obj @property def label(self): """Returns the label (Integer or integer expression) of the Idx object. Examples ======== >>> from sympy import Idx, Symbol >>> x = Symbol('x', integer=True) >>> Idx(x).label x >>> j = Symbol('j', integer=True) >>> Idx(j).label j >>> Idx(j + 1).label j + 1 """ return self.args[0] @property def lower(self): """Returns the lower bound of the ``Idx``. Examples ======== >>> from sympy import Idx >>> Idx('j', 2).lower 0 >>> Idx('j', 5).lower 0 >>> Idx('j').lower is None True """ try: return self.args[1][0] except IndexError: return @property def upper(self): """Returns the upper bound of the ``Idx``. Examples ======== >>> from sympy import Idx >>> Idx('j', 2).upper 1 >>> Idx('j', 5).upper 4 >>> Idx('j').upper is None True """ try: return self.args[1][1] except IndexError: return def _sympystr(self, p): return p.doprint(self.label) @property def name(self): return self.label.name if self.label.is_Symbol else str(self.label) @property def free_symbols(self): return {self} @dispatch(Idx, Idx) def _eval_is_ge(lhs, rhs): # noqa:F811 other_upper = rhs if rhs.upper is None else rhs.upper other_lower = rhs if rhs.lower is None else rhs.lower if lhs.lower is not None and (lhs.lower >= other_upper) == True: return True if lhs.upper is not None and (lhs.upper < other_lower) == True: return False return None @dispatch(Idx, Number) # type:ignore def _eval_is_ge(lhs, rhs): # noqa:F811 other_upper = rhs other_lower = rhs if lhs.lower is not None and (lhs.lower >= other_upper) == True: return True if lhs.upper is not None and (lhs.upper < other_lower) == True: return False return None @dispatch(Number, Idx) # type:ignore def _eval_is_ge(lhs, rhs): # noqa:F811 other_upper = lhs other_lower = lhs if rhs.upper is not None and (rhs.upper <= other_lower) == True: return True if rhs.lower is not None and (rhs.lower > other_upper) == True: return False return None
5aeb3753936a852bfe830229217bd5fa68dc95b85fc762f90efdf9f795efb24b
import copy from sympy.core.function import expand_mul from sympy.functions.elementary.miscellaneous import Min, sqrt from .common import NonSquareMatrixError, NonPositiveDefiniteMatrixError from .utilities import _get_intermediate_simp, _iszero from .determinant import _find_reasonable_pivot_naive def _rank_decomposition(M, iszerofunc=_iszero, simplify=False): r"""Returns a pair of matrices (`C`, `F`) with matching rank such that `A = C F`. Parameters ========== iszerofunc : Function, optional A function used for detecting whether an element can act as a pivot. ``lambda x: x.is_zero`` is used by default. simplify : Bool or Function, optional A function used to simplify elements when looking for a pivot. By default SymPy's ``simplify`` is used. Returns ======= (C, F) : Matrices `C` and `F` are full-rank matrices with rank as same as `A`, whose product gives `A`. See Notes for additional mathematical details. Examples ======== >>> from sympy.matrices import Matrix >>> A = Matrix([ ... [1, 3, 1, 4], ... [2, 7, 3, 9], ... [1, 5, 3, 1], ... [1, 2, 0, 8] ... ]) >>> C, F = A.rank_decomposition() >>> C Matrix([ [1, 3, 4], [2, 7, 9], [1, 5, 1], [1, 2, 8]]) >>> F Matrix([ [1, 0, -2, 0], [0, 1, 1, 0], [0, 0, 0, 1]]) >>> C * F == A True Notes ===== Obtaining `F`, an RREF of `A`, is equivalent to creating a product .. math:: E_n E_{n-1} ... E_1 A = F where `E_n, E_{n-1}, ... , E_1` are the elimination matrices or permutation matrices equivalent to each row-reduction step. The inverse of the same product of elimination matrices gives `C`: .. math:: C = (E_n E_{n-1} ... E_1)^{-1} It is not necessary, however, to actually compute the inverse: the columns of `C` are those from the original matrix with the same column indices as the indices of the pivot columns of `F`. References ========== .. [1] https://en.wikipedia.org/wiki/Rank_factorization .. [2] Piziak, R.; Odell, P. L. (1 June 1999). "Full Rank Factorization of Matrices". Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882 See Also ======== rref """ F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc, pivots=True) rank = len(pivot_cols) C = M.extract(range(M.rows), pivot_cols) F = F[:rank, :] return C, F def _liupc(M): """Liu's algorithm, for pre-determination of the Elimination Tree of the given matrix, used in row-based symbolic Cholesky factorization. Examples ======== >>> from sympy.matrices import SparseMatrix >>> S = SparseMatrix([ ... [1, 0, 3, 2], ... [0, 0, 1, 0], ... [4, 0, 0, 5], ... [0, 6, 7, 0]]) >>> S.liupc() ([[0], [], [0], [1, 2]], [4, 3, 4, 4]) References ========== Symbolic Sparse Cholesky Factorization using Elimination Trees, Jeroen Van Grondelle (1999) http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582 """ # Algorithm 2.4, p 17 of reference # get the indices of the elements that are non-zero on or below diag R = [[] for r in range(M.rows)] for r, c, _ in M.row_list(): if c <= r: R[r].append(c) inf = len(R) # nothing will be this large parent = [inf]*M.rows virtual = [inf]*M.rows for r in range(M.rows): for c in R[r][:-1]: while virtual[c] < r: t = virtual[c] virtual[c] = r c = t if virtual[c] == inf: parent[c] = virtual[c] = r return R, parent def _row_structure_symbolic_cholesky(M): """Symbolic cholesky factorization, for pre-determination of the non-zero structure of the Cholesky factororization. Examples ======== >>> from sympy.matrices import SparseMatrix >>> S = SparseMatrix([ ... [1, 0, 3, 2], ... [0, 0, 1, 0], ... [4, 0, 0, 5], ... [0, 6, 7, 0]]) >>> S.row_structure_symbolic_cholesky() [[0], [], [0], [1, 2]] References ========== Symbolic Sparse Cholesky Factorization using Elimination Trees, Jeroen Van Grondelle (1999) http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582 """ R, parent = M.liupc() inf = len(R) # this acts as infinity Lrow = copy.deepcopy(R) for k in range(M.rows): for j in R[k]: while j != inf and j != k: Lrow[k].append(j) j = parent[j] Lrow[k] = list(sorted(set(Lrow[k]))) return Lrow def _cholesky(M, hermitian=True): """Returns the Cholesky-type decomposition L of a matrix A such that L * L.H == A if hermitian flag is True, or L * L.T == A if hermitian is False. A must be a Hermitian positive-definite matrix if hermitian is True, or a symmetric matrix if it is False. Examples ======== >>> from sympy.matrices import Matrix >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11))) >>> A.cholesky() Matrix([ [ 5, 0, 0], [ 3, 3, 0], [-1, 1, 3]]) >>> A.cholesky() * A.cholesky().T Matrix([ [25, 15, -5], [15, 18, 0], [-5, 0, 11]]) The matrix can have complex entries: >>> from sympy import I >>> A = Matrix(((9, 3*I), (-3*I, 5))) >>> A.cholesky() Matrix([ [ 3, 0], [-I, 2]]) >>> A.cholesky() * A.cholesky().H Matrix([ [ 9, 3*I], [-3*I, 5]]) Non-hermitian Cholesky-type decomposition may be useful when the matrix is not positive-definite. >>> A = Matrix([[1, 2], [2, 1]]) >>> L = A.cholesky(hermitian=False) >>> L Matrix([ [1, 0], [2, sqrt(3)*I]]) >>> L*L.T == A True See Also ======== sympy.matrices.dense.DenseMatrix.LDLdecomposition sympy.matrices.matrices.MatrixBase.LUdecomposition QRdecomposition """ from .dense import MutableDenseMatrix if not M.is_square: raise NonSquareMatrixError("Matrix must be square.") if hermitian and not M.is_hermitian: raise ValueError("Matrix must be Hermitian.") if not hermitian and not M.is_symmetric(): raise ValueError("Matrix must be symmetric.") L = MutableDenseMatrix.zeros(M.rows, M.rows) if hermitian: for i in range(M.rows): for j in range(i): L[i, j] = ((1 / L[j, j])*(M[i, j] - sum(L[i, k]*L[j, k].conjugate() for k in range(j)))) Lii2 = (M[i, i] - sum(L[i, k]*L[i, k].conjugate() for k in range(i))) if Lii2.is_positive is False: raise NonPositiveDefiniteMatrixError( "Matrix must be positive-definite") L[i, i] = sqrt(Lii2) else: for i in range(M.rows): for j in range(i): L[i, j] = ((1 / L[j, j])*(M[i, j] - sum(L[i, k]*L[j, k] for k in range(j)))) L[i, i] = sqrt(M[i, i] - sum(L[i, k]**2 for k in range(i))) return M._new(L) def _cholesky_sparse(M, hermitian=True): """ Returns the Cholesky decomposition L of a matrix A such that L * L.T = A A must be a square, symmetric, positive-definite and non-singular matrix Examples ======== >>> from sympy.matrices import SparseMatrix >>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11))) >>> A.cholesky() Matrix([ [ 5, 0, 0], [ 3, 3, 0], [-1, 1, 3]]) >>> A.cholesky() * A.cholesky().T == A True The matrix can have complex entries: >>> from sympy import I >>> A = SparseMatrix(((9, 3*I), (-3*I, 5))) >>> A.cholesky() Matrix([ [ 3, 0], [-I, 2]]) >>> A.cholesky() * A.cholesky().H Matrix([ [ 9, 3*I], [-3*I, 5]]) Non-hermitian Cholesky-type decomposition may be useful when the matrix is not positive-definite. >>> A = SparseMatrix([[1, 2], [2, 1]]) >>> L = A.cholesky(hermitian=False) >>> L Matrix([ [1, 0], [2, sqrt(3)*I]]) >>> L*L.T == A True See Also ======== sympy.matrices.sparse.SparseMatrix.LDLdecomposition sympy.matrices.matrices.MatrixBase.LUdecomposition QRdecomposition """ from .dense import MutableDenseMatrix if not M.is_square: raise NonSquareMatrixError("Matrix must be square.") if hermitian and not M.is_hermitian: raise ValueError("Matrix must be Hermitian.") if not hermitian and not M.is_symmetric(): raise ValueError("Matrix must be symmetric.") dps = _get_intermediate_simp(expand_mul, expand_mul) Crowstruc = M.row_structure_symbolic_cholesky() C = MutableDenseMatrix.zeros(M.rows) for i in range(len(Crowstruc)): for j in Crowstruc[i]: if i != j: C[i, j] = M[i, j] summ = 0 for p1 in Crowstruc[i]: if p1 < j: for p2 in Crowstruc[j]: if p2 < j: if p1 == p2: if hermitian: summ += C[i, p1]*C[j, p1].conjugate() else: summ += C[i, p1]*C[j, p1] else: break else: break C[i, j] = dps((C[i, j] - summ) / C[j, j]) else: # i == j C[j, j] = M[j, j] summ = 0 for k in Crowstruc[j]: if k < j: if hermitian: summ += C[j, k]*C[j, k].conjugate() else: summ += C[j, k]**2 else: break Cjj2 = dps(C[j, j] - summ) if hermitian and Cjj2.is_positive is False: raise NonPositiveDefiniteMatrixError( "Matrix must be positive-definite") C[j, j] = sqrt(Cjj2) return M._new(C) def _LDLdecomposition(M, hermitian=True): """Returns the LDL Decomposition (L, D) of matrix A, such that L * D * L.H == A if hermitian flag is True, or L * D * L.T == A if hermitian is False. This method eliminates the use of square root. Further this ensures that all the diagonal entries of L are 1. A must be a Hermitian positive-definite matrix if hermitian is True, or a symmetric matrix otherwise. Examples ======== >>> from sympy.matrices import Matrix, eye >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11))) >>> L, D = A.LDLdecomposition() >>> L Matrix([ [ 1, 0, 0], [ 3/5, 1, 0], [-1/5, 1/3, 1]]) >>> D Matrix([ [25, 0, 0], [ 0, 9, 0], [ 0, 0, 9]]) >>> L * D * L.T * A.inv() == eye(A.rows) True The matrix can have complex entries: >>> from sympy import I >>> A = Matrix(((9, 3*I), (-3*I, 5))) >>> L, D = A.LDLdecomposition() >>> L Matrix([ [ 1, 0], [-I/3, 1]]) >>> D Matrix([ [9, 0], [0, 4]]) >>> L*D*L.H == A True See Also ======== sympy.matrices.dense.DenseMatrix.cholesky sympy.matrices.matrices.MatrixBase.LUdecomposition QRdecomposition """ from .dense import MutableDenseMatrix if not M.is_square: raise NonSquareMatrixError("Matrix must be square.") if hermitian and not M.is_hermitian: raise ValueError("Matrix must be Hermitian.") if not hermitian and not M.is_symmetric(): raise ValueError("Matrix must be symmetric.") D = MutableDenseMatrix.zeros(M.rows, M.rows) L = MutableDenseMatrix.eye(M.rows) if hermitian: for i in range(M.rows): for j in range(i): L[i, j] = (1 / D[j, j])*(M[i, j] - sum( L[i, k]*L[j, k].conjugate()*D[k, k] for k in range(j))) D[i, i] = (M[i, i] - sum(L[i, k]*L[i, k].conjugate()*D[k, k] for k in range(i))) if D[i, i].is_positive is False: raise NonPositiveDefiniteMatrixError( "Matrix must be positive-definite") else: for i in range(M.rows): for j in range(i): L[i, j] = (1 / D[j, j])*(M[i, j] - sum( L[i, k]*L[j, k]*D[k, k] for k in range(j))) D[i, i] = M[i, i] - sum(L[i, k]**2*D[k, k] for k in range(i)) return M._new(L), M._new(D) def _LDLdecomposition_sparse(M, hermitian=True): """ Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix ``A``, such that ``L * D * L.T == A``. ``A`` must be a square, symmetric, positive-definite and non-singular. This method eliminates the use of square root and ensures that all the diagonal entries of L are 1. Examples ======== >>> from sympy.matrices import SparseMatrix >>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11))) >>> L, D = A.LDLdecomposition() >>> L Matrix([ [ 1, 0, 0], [ 3/5, 1, 0], [-1/5, 1/3, 1]]) >>> D Matrix([ [25, 0, 0], [ 0, 9, 0], [ 0, 0, 9]]) >>> L * D * L.T == A True """ from .dense import MutableDenseMatrix if not M.is_square: raise NonSquareMatrixError("Matrix must be square.") if hermitian and not M.is_hermitian: raise ValueError("Matrix must be Hermitian.") if not hermitian and not M.is_symmetric(): raise ValueError("Matrix must be symmetric.") dps = _get_intermediate_simp(expand_mul, expand_mul) Lrowstruc = M.row_structure_symbolic_cholesky() L = MutableDenseMatrix.eye(M.rows) D = MutableDenseMatrix.zeros(M.rows, M.cols) for i in range(len(Lrowstruc)): for j in Lrowstruc[i]: if i != j: L[i, j] = M[i, j] summ = 0 for p1 in Lrowstruc[i]: if p1 < j: for p2 in Lrowstruc[j]: if p2 < j: if p1 == p2: if hermitian: summ += L[i, p1]*L[j, p1].conjugate()*D[p1, p1] else: summ += L[i, p1]*L[j, p1]*D[p1, p1] else: break else: break L[i, j] = dps((L[i, j] - summ) / D[j, j]) else: # i == j D[i, i] = M[i, i] summ = 0 for k in Lrowstruc[i]: if k < i: if hermitian: summ += L[i, k]*L[i, k].conjugate()*D[k, k] else: summ += L[i, k]**2*D[k, k] else: break D[i, i] = dps(D[i, i] - summ) if hermitian and D[i, i].is_positive is False: raise NonPositiveDefiniteMatrixError( "Matrix must be positive-definite") return M._new(L), M._new(D) def _LUdecomposition(M, iszerofunc=_iszero, simpfunc=None, rankcheck=False): """Returns (L, U, perm) where L is a lower triangular matrix with unit diagonal, U is an upper triangular matrix, and perm is a list of row swap index pairs. If A is the original matrix, then A = (L*U).permuteBkwd(perm), and the row permutation matrix P such that P*A = L*U can be computed by P=eye(A.row).permuteFwd(perm). See documentation for LUCombined for details about the keyword argument rankcheck, iszerofunc, and simpfunc. Parameters ========== rankcheck : bool, optional Determines if this function should detect the rank deficiency of the matrixis and should raise a ``ValueError``. iszerofunc : function, optional A function which determines if a given expression is zero. The function should be a callable that takes a single sympy expression and returns a 3-valued boolean value ``True``, ``False``, or ``None``. It is internally used by the pivot searching algorithm. See the notes section for a more information about the pivot searching algorithm. simpfunc : function or None, optional A function that simplifies the input. If this is specified as a function, this function should be a callable that takes a single sympy expression and returns an another sympy expression that is algebraically equivalent. If ``None``, it indicates that the pivot search algorithm should not attempt to simplify any candidate pivots. It is internally used by the pivot searching algorithm. See the notes section for a more information about the pivot searching algorithm. Examples ======== >>> from sympy import Matrix >>> a = Matrix([[4, 3], [6, 3]]) >>> L, U, _ = a.LUdecomposition() >>> L Matrix([ [ 1, 0], [3/2, 1]]) >>> U Matrix([ [4, 3], [0, -3/2]]) See Also ======== sympy.matrices.dense.DenseMatrix.cholesky sympy.matrices.dense.DenseMatrix.LDLdecomposition QRdecomposition LUdecomposition_Simple LUdecompositionFF LUsolve """ combined, p = M.LUdecomposition_Simple(iszerofunc=iszerofunc, simpfunc=simpfunc, rankcheck=rankcheck) # L is lower triangular ``M.rows x M.rows`` # U is upper triangular ``M.rows x M.cols`` # L has unit diagonal. For each column in combined, the subcolumn # below the diagonal of combined is shared by L. # If L has more columns than combined, then the remaining subcolumns # below the diagonal of L are zero. # The upper triangular portion of L and combined are equal. def entry_L(i, j): if i < j: # Super diagonal entry return M.zero elif i == j: return M.one elif j < combined.cols: return combined[i, j] # Subdiagonal entry of L with no corresponding # entry in combined return M.zero def entry_U(i, j): return M.zero if i > j else combined[i, j] L = M._new(combined.rows, combined.rows, entry_L) U = M._new(combined.rows, combined.cols, entry_U) return L, U, p def _LUdecomposition_Simple(M, iszerofunc=_iszero, simpfunc=None, rankcheck=False): r"""Compute the PLU decomposition of the matrix. Parameters ========== rankcheck : bool, optional Determines if this function should detect the rank deficiency of the matrixis and should raise a ``ValueError``. iszerofunc : function, optional A function which determines if a given expression is zero. The function should be a callable that takes a single sympy expression and returns a 3-valued boolean value ``True``, ``False``, or ``None``. It is internally used by the pivot searching algorithm. See the notes section for a more information about the pivot searching algorithm. simpfunc : function or None, optional A function that simplifies the input. If this is specified as a function, this function should be a callable that takes a single sympy expression and returns an another sympy expression that is algebraically equivalent. If ``None``, it indicates that the pivot search algorithm should not attempt to simplify any candidate pivots. It is internally used by the pivot searching algorithm. See the notes section for a more information about the pivot searching algorithm. Returns ======= (lu, row_swaps) : (Matrix, list) If the original matrix is a $m, n$ matrix: *lu* is a $m, n$ matrix, which contains result of the decomposition in a compresed form. See the notes section to see how the matrix is compressed. *row_swaps* is a $m$-element list where each element is a pair of row exchange indices. ``A = (L*U).permute_backward(perm)``, and the row permutation matrix $P$ from the formula $P A = L U$ can be computed by ``P=eye(A.row).permute_forward(perm)``. Raises ====== ValueError Raised if ``rankcheck=True`` and the matrix is found to be rank deficient during the computation. Notes ===== About the PLU decomposition: PLU decomposition is a generalization of a LU decomposition which can be extended for rank-deficient matrices. It can further be generalized for non-square matrices, and this is the notation that SymPy is using. PLU decomposition is a decomposition of a $m, n$ matrix $A$ in the form of $P A = L U$ where * $L$ is a $m, m$ lower triangular matrix with unit diagonal entries. * $U$ is a $m, n$ upper triangular matrix. * $P$ is a $m, m$ permutation matrix. So, for a square matrix, the decomposition would look like: .. math:: L = \begin{bmatrix} 1 & 0 & 0 & \cdots & 0 \\ L_{1, 0} & 1 & 0 & \cdots & 0 \\ L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1 \end{bmatrix} .. math:: U = \begin{bmatrix} U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\ 0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\ 0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & U_{n-1, n-1} \end{bmatrix} And for a matrix with more rows than the columns, the decomposition would look like: .. math:: L = \begin{bmatrix} 1 & 0 & 0 & \cdots & 0 & 0 & \cdots & 0 \\ L_{1, 0} & 1 & 0 & \cdots & 0 & 0 & \cdots & 0 \\ L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \ddots & \vdots \\ L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1 & 0 & \cdots & 0 \\ L_{n, 0} & L_{n, 1} & L_{n, 2} & \cdots & L_{n, n-1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \ddots & \vdots \\ L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & L_{m-1, n-1} & 0 & \cdots & 1 \\ \end{bmatrix} .. math:: U = \begin{bmatrix} U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\ 0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\ 0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & U_{n-1, n-1} \\ 0 & 0 & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \end{bmatrix} Finally, for a matrix with more columns than the rows, the decomposition would look like: .. math:: L = \begin{bmatrix} 1 & 0 & 0 & \cdots & 0 \\ L_{1, 0} & 1 & 0 & \cdots & 0 \\ L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & 1 \end{bmatrix} .. math:: U = \begin{bmatrix} U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1} & \cdots & U_{0, n-1} \\ 0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1} & \cdots & U_{1, n-1} \\ 0 & 0 & U_{2, 2} & \cdots & U_{2, m-1} & \cdots & U_{2, n-1} \\ \vdots & \vdots & \vdots & \ddots & \vdots & \cdots & \vdots \\ 0 & 0 & 0 & \cdots & U_{m-1, m-1} & \cdots & U_{m-1, n-1} \\ \end{bmatrix} About the compressed LU storage: The results of the decomposition are often stored in compressed forms rather than returning $L$ and $U$ matrices individually. It may be less intiuitive, but it is commonly used for a lot of numeric libraries because of the efficiency. The storage matrix is defined as following for this specific method: * The subdiagonal elements of $L$ are stored in the subdiagonal portion of $LU$, that is $LU_{i, j} = L_{i, j}$ whenever $i > j$. * The elements on the diagonal of $L$ are all 1, and are not explicitly stored. * $U$ is stored in the upper triangular portion of $LU$, that is $LU_{i, j} = U_{i, j}$ whenever $i <= j$. * For a case of $m > n$, the right side of the $L$ matrix is trivial to store. * For a case of $m < n$, the below side of the $U$ matrix is trivial to store. So, for a square matrix, the compressed output matrix would be: .. math:: LU = \begin{bmatrix} U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\ L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\ L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & U_{n-1, n-1} \end{bmatrix} For a matrix with more rows than the columns, the compressed output matrix would be: .. math:: LU = \begin{bmatrix} U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\ L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\ L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & U_{n-1, n-1} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & L_{m-1, n-1} \\ \end{bmatrix} For a matrix with more columns than the rows, the compressed output matrix would be: .. math:: LU = \begin{bmatrix} U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1} & \cdots & U_{0, n-1} \\ L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1} & \cdots & U_{1, n-1} \\ L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, m-1} & \cdots & U_{2, n-1} \\ \vdots & \vdots & \vdots & \ddots & \vdots & \cdots & \vdots \\ L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & U_{m-1, m-1} & \cdots & U_{m-1, n-1} \\ \end{bmatrix} About the pivot searching algorithm: When a matrix contains symbolic entries, the pivot search algorithm differs from the case where every entry can be categorized as zero or nonzero. The algorithm searches column by column through the submatrix whose top left entry coincides with the pivot position. If it exists, the pivot is the first entry in the current search column that iszerofunc guarantees is nonzero. If no such candidate exists, then each candidate pivot is simplified if simpfunc is not None. The search is repeated, with the difference that a candidate may be the pivot if ``iszerofunc()`` cannot guarantee that it is nonzero. In the second search the pivot is the first candidate that iszerofunc can guarantee is nonzero. If no such candidate exists, then the pivot is the first candidate for which iszerofunc returns None. If no such candidate exists, then the search is repeated in the next column to the right. The pivot search algorithm differs from the one in ``rref()``, which relies on ``_find_reasonable_pivot()``. Future versions of ``LUdecomposition_simple()`` may use ``_find_reasonable_pivot()``. See Also ======== sympy.matrices.matrices.MatrixBase.LUdecomposition LUdecompositionFF LUsolve """ if rankcheck: # https://github.com/sympy/sympy/issues/9796 pass if M.rows == 0 or M.cols == 0: # Define LU decomposition of a matrix with no entries as a matrix # of the same dimensions with all zero entries. return M.zeros(M.rows, M.cols), [] dps = _get_intermediate_simp() lu = M.as_mutable() row_swaps = [] pivot_col = 0 for pivot_row in range(0, lu.rows - 1): # Search for pivot. Prefer entry that iszeropivot determines # is nonzero, over entry that iszeropivot cannot guarantee # is zero. # XXX ``_find_reasonable_pivot`` uses slow zero testing. Blocked by bug #10279 # Future versions of LUdecomposition_simple can pass iszerofunc and simpfunc # to _find_reasonable_pivot(). # In pass 3 of _find_reasonable_pivot(), the predicate in ``if x.equals(S.Zero):`` # calls sympy.simplify(), and not the simplification function passed in via # the keyword argument simpfunc. iszeropivot = True while pivot_col != M.cols and iszeropivot: sub_col = (lu[r, pivot_col] for r in range(pivot_row, M.rows)) pivot_row_offset, pivot_value, is_assumed_non_zero, ind_simplified_pairs =\ _find_reasonable_pivot_naive(sub_col, iszerofunc, simpfunc) iszeropivot = pivot_value is None if iszeropivot: # All candidate pivots in this column are zero. # Proceed to next column. pivot_col += 1 if rankcheck and pivot_col != pivot_row: # All entries including and below the pivot position are # zero, which indicates that the rank of the matrix is # strictly less than min(num rows, num cols) # Mimic behavior of previous implementation, by throwing a # ValueError. raise ValueError("Rank of matrix is strictly less than" " number of rows or columns." " Pass keyword argument" " rankcheck=False to compute" " the LU decomposition of this matrix.") candidate_pivot_row = None if pivot_row_offset is None else pivot_row + pivot_row_offset if candidate_pivot_row is None and iszeropivot: # If candidate_pivot_row is None and iszeropivot is True # after pivot search has completed, then the submatrix # below and to the right of (pivot_row, pivot_col) is # all zeros, indicating that Gaussian elimination is # complete. return lu, row_swaps # Update entries simplified during pivot search. for offset, val in ind_simplified_pairs: lu[pivot_row + offset, pivot_col] = val if pivot_row != candidate_pivot_row: # Row swap book keeping: # Record which rows were swapped. # Update stored portion of L factor by multiplying L on the # left and right with the current permutation. # Swap rows of U. row_swaps.append([pivot_row, candidate_pivot_row]) # Update L. lu[pivot_row, 0:pivot_row], lu[candidate_pivot_row, 0:pivot_row] = \ lu[candidate_pivot_row, 0:pivot_row], lu[pivot_row, 0:pivot_row] # Swap pivot row of U with candidate pivot row. lu[pivot_row, pivot_col:lu.cols], lu[candidate_pivot_row, pivot_col:lu.cols] = \ lu[candidate_pivot_row, pivot_col:lu.cols], lu[pivot_row, pivot_col:lu.cols] # Introduce zeros below the pivot by adding a multiple of the # pivot row to a row under it, and store the result in the # row under it. # Only entries in the target row whose index is greater than # start_col may be nonzero. start_col = pivot_col + 1 for row in range(pivot_row + 1, lu.rows): # Store factors of L in the subcolumn below # (pivot_row, pivot_row). lu[row, pivot_row] = \ dps(lu[row, pivot_col]/lu[pivot_row, pivot_col]) # Form the linear combination of the pivot row and the current # row below the pivot row that zeros the entries below the pivot. # Employing slicing instead of a loop here raises # NotImplementedError: Cannot add Zero to MutableSparseMatrix # in sympy/matrices/tests/test_sparse.py. # c = pivot_row + 1 if pivot_row == pivot_col else pivot_col for c in range(start_col, lu.cols): lu[row, c] = dps(lu[row, c] - lu[row, pivot_row]*lu[pivot_row, c]) if pivot_row != pivot_col: # matrix rank < min(num rows, num cols), # so factors of L are not stored directly below the pivot. # These entries are zero by construction, so don't bother # computing them. for row in range(pivot_row + 1, lu.rows): lu[row, pivot_col] = M.zero pivot_col += 1 if pivot_col == lu.cols: # All candidate pivots are zero implies that Gaussian # elimination is complete. return lu, row_swaps if rankcheck: if iszerofunc( lu[Min(lu.rows, lu.cols) - 1, Min(lu.rows, lu.cols) - 1]): raise ValueError("Rank of matrix is strictly less than" " number of rows or columns." " Pass keyword argument" " rankcheck=False to compute" " the LU decomposition of this matrix.") return lu, row_swaps def _LUdecompositionFF(M): """Compute a fraction-free LU decomposition. Returns 4 matrices P, L, D, U such that PA = L D**-1 U. If the elements of the matrix belong to some integral domain I, then all elements of L, D and U are guaranteed to belong to I. See Also ======== sympy.matrices.matrices.MatrixBase.LUdecomposition LUdecomposition_Simple LUsolve References ========== .. [1] W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms for LU and QR factors". Frontiers in Computer Science in China, Vol 2, no. 1, pp. 67-80, 2008. """ from sympy.matrices import SparseMatrix zeros = SparseMatrix.zeros eye = SparseMatrix.eye n, m = M.rows, M.cols U, L, P = M.as_mutable(), eye(n), eye(n) DD = zeros(n, n) oldpivot = 1 for k in range(n - 1): if U[k, k] == 0: for kpivot in range(k + 1, n): if U[kpivot, k]: break else: raise ValueError("Matrix is not full rank") U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:] L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k] P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :] L [k, k] = Ukk = U[k, k] DD[k, k] = oldpivot * Ukk for i in range(k + 1, n): L[i, k] = Uik = U[i, k] for j in range(k + 1, m): U[i, j] = (Ukk * U[i, j] - U[k, j] * Uik) / oldpivot U[i, k] = 0 oldpivot = Ukk DD[n - 1, n - 1] = oldpivot return P, L, DD, U def _QRdecomposition_optional(M, normalize=True): def dot(u, v): return u.dot(v, hermitian=True) dps = _get_intermediate_simp(expand_mul, expand_mul) A = M.as_mutable() ranked = list() Q = A R = A.zeros(A.cols) for j in range(A.cols): for i in range(j): if Q[:, i].is_zero_matrix: continue R[i, j] = dot(Q[:, i], Q[:, j]) / dot(Q[:, i], Q[:, i]) R[i, j] = dps(R[i, j]) Q[:, j] -= Q[:, i] * R[i, j] Q[:, j] = dps(Q[:, j]) if Q[:, j].is_zero_matrix is False: ranked.append(j) R[j, j] = M.one Q = Q.extract(range(Q.rows), ranked) R = R.extract(ranked, range(R.cols)) if normalize: # Normalization for i in range(Q.cols): norm = Q[:, i].norm() Q[:, i] /= norm R[i, :] *= norm return M.__class__(Q), M.__class__(R) def _QRdecomposition(M): r"""Returns a QR decomposition. Explanation =========== A QR decomposition is a decomposition in the form $A = Q R$ where - $Q$ is a column orthogonal matrix. - $R$ is a upper triangular (trapezoidal) matrix. A column orthogonal matrix satisfies $\mathbb{I} = Q^H Q$ while a full orthogonal matrix satisfies relation $\mathbb{I} = Q Q^H = Q^H Q$ where $I$ is an identity matrix with matching dimensions. For matrices which are not square or are rank-deficient, it is sufficient to return a column orthogonal matrix because augmenting them may introduce redundant computations. And an another advantage of this is that you can easily inspect the matrix rank by counting the number of columns of $Q$. If you want to augment the results to return a full orthogonal decomposition, you should use the following procedures. - Augment the $Q$ matrix with columns that are orthogonal to every other columns and make it square. - Augument the $R$ matrix with zero rows to make it have the same shape as the original matrix. The procedure will be illustrated in the examples section. Examples ======== A full rank matrix example: >>> from sympy import Matrix >>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]]) >>> Q, R = A.QRdecomposition() >>> Q Matrix([ [ 6/7, -69/175, -58/175], [ 3/7, 158/175, 6/175], [-2/7, 6/35, -33/35]]) >>> R Matrix([ [14, 21, -14], [ 0, 175, -70], [ 0, 0, 35]]) If the matrix is square and full rank, the $Q$ matrix becomes orthogonal in both directions, and needs no augmentation. >>> Q * Q.H Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> Q.H * Q Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> A == Q*R True A rank deficient matrix example: >>> A = Matrix([[12, -51, 0], [6, 167, 0], [-4, 24, 0]]) >>> Q, R = A.QRdecomposition() >>> Q Matrix([ [ 6/7, -69/175], [ 3/7, 158/175], [-2/7, 6/35]]) >>> R Matrix([ [14, 21, 0], [ 0, 175, 0]]) QRdecomposition might return a matrix Q that is rectangular. In this case the orthogonality condition might be satisfied as $\mathbb{I} = Q.H*Q$ but not in the reversed product $\mathbb{I} = Q * Q.H$. >>> Q.H * Q Matrix([ [1, 0], [0, 1]]) >>> Q * Q.H Matrix([ [27261/30625, 348/30625, -1914/6125], [ 348/30625, 30589/30625, 198/6125], [ -1914/6125, 198/6125, 136/1225]]) If you want to augment the results to be a full orthogonal decomposition, you should augment $Q$ with an another orthogonal column. You are able to append an arbitrary standard basis that are linearly independent to every other columns and you can run the Gram-Schmidt process to make them augmented as orthogonal basis. >>> Q_aug = Q.row_join(Matrix([0, 0, 1])) >>> Q_aug = Q_aug.QRdecomposition()[0] >>> Q_aug Matrix([ [ 6/7, -69/175, 58/175], [ 3/7, 158/175, -6/175], [-2/7, 6/35, 33/35]]) >>> Q_aug.H * Q_aug Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> Q_aug * Q_aug.H Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) Augmenting the $R$ matrix with zero row is straightforward. >>> R_aug = R.col_join(Matrix([[0, 0, 0]])) >>> R_aug Matrix([ [14, 21, 0], [ 0, 175, 0], [ 0, 0, 0]]) >>> Q_aug * R_aug == A True A zero matrix example: >>> from sympy import Matrix >>> A = Matrix.zeros(3, 4) >>> Q, R = A.QRdecomposition() They may return matrices with zero rows and columns. >>> Q Matrix(3, 0, []) >>> R Matrix(0, 4, []) >>> Q*R Matrix([ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) As the same augmentation rule described above, $Q$ can be augmented with columns of an identity matrix and $R$ can be augmented with rows of a zero matrix. >>> Q_aug = Q.row_join(Matrix.eye(3)) >>> R_aug = R.col_join(Matrix.zeros(3, 4)) >>> Q_aug * Q_aug.T Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> R_aug Matrix([ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) >>> Q_aug * R_aug == A True See Also ======== sympy.matrices.dense.DenseMatrix.cholesky sympy.matrices.dense.DenseMatrix.LDLdecomposition sympy.matrices.matrices.MatrixBase.LUdecomposition QRsolve """ return _QRdecomposition_optional(M, normalize=True)
82d5459a7f8297ad0509671dac0a0bae04c2ce01e289ac19a078f9840b7872bf
""" Basic methods common to all matrices to be used when creating more advanced matrices (e.g., matrices over rings, etc.). """ from collections import defaultdict from collections.abc import Iterable from inspect import isfunction from functools import reduce from sympy.core.logic import FuzzyBool from sympy.assumptions.refine import refine from sympy.core import SympifyError, Add from sympy.core.basic import Atom from sympy.core.compatibility import as_int, is_sequence from sympy.core.decorators import call_highest_priority from sympy.core.logic import fuzzy_and from sympy.core.singleton import S from sympy.core.symbol import Symbol from sympy.core.sympify import sympify from sympy.functions import Abs from sympy.polys.polytools import Poly from sympy.simplify import simplify as _simplify from sympy.simplify.simplify import dotprodsimp as _dotprodsimp from sympy.utilities.exceptions import SymPyDeprecationWarning from sympy.utilities.iterables import flatten from sympy.utilities.misc import filldedent from sympy.tensor.array import NDimArray from .utilities import _get_intermediate_simp_bool class MatrixError(Exception): pass class ShapeError(ValueError, MatrixError): """Wrong matrix shape""" pass class NonSquareMatrixError(ShapeError): pass class NonInvertibleMatrixError(ValueError, MatrixError): """The matrix in not invertible (division by multidimensional zero error).""" pass class NonPositiveDefiniteMatrixError(ValueError, MatrixError): """The matrix is not a positive-definite matrix.""" pass class MatrixRequired: """All subclasses of matrix objects must implement the required matrix properties listed here.""" rows = None # type: int cols = None # type: int _simplify = None @classmethod def _new(cls, *args, **kwargs): """`_new` must, at minimum, be callable as `_new(rows, cols, mat) where mat is a flat list of the elements of the matrix.""" raise NotImplementedError("Subclasses must implement this.") def __eq__(self, other): raise NotImplementedError("Subclasses must implement this.") def __getitem__(self, key): """Implementations of __getitem__ should accept ints, in which case the matrix is indexed as a flat list, tuples (i,j) in which case the (i,j) entry is returned, slices, or mixed tuples (a,b) where a and b are any combintion of slices and integers.""" raise NotImplementedError("Subclasses must implement this.") def __len__(self): """The total number of entries in the matrix.""" raise NotImplementedError("Subclasses must implement this.") @property def shape(self): raise NotImplementedError("Subclasses must implement this.") class MatrixShaping(MatrixRequired): """Provides basic matrix shaping and extracting of submatrices""" def _eval_col_del(self, col): def entry(i, j): return self[i, j] if j < col else self[i, j + 1] return self._new(self.rows, self.cols - 1, entry) def _eval_col_insert(self, pos, other): def entry(i, j): if j < pos: return self[i, j] elif pos <= j < pos + other.cols: return other[i, j - pos] return self[i, j - other.cols] return self._new(self.rows, self.cols + other.cols, lambda i, j: entry(i, j)) def _eval_col_join(self, other): rows = self.rows def entry(i, j): if i < rows: return self[i, j] return other[i - rows, j] return classof(self, other)._new(self.rows + other.rows, self.cols, lambda i, j: entry(i, j)) def _eval_extract(self, rowsList, colsList): mat = list(self) cols = self.cols indices = (i * cols + j for i in rowsList for j in colsList) return self._new(len(rowsList), len(colsList), list(mat[i] for i in indices)) def _eval_get_diag_blocks(self): sub_blocks = [] def recurse_sub_blocks(M): i = 1 while i <= M.shape[0]: if i == 1: to_the_right = M[0, i:] to_the_bottom = M[i:, 0] else: to_the_right = M[:i, i:] to_the_bottom = M[i:, :i] if any(to_the_right) or any(to_the_bottom): i += 1 continue else: sub_blocks.append(M[:i, :i]) if M.shape == M[:i, :i].shape: return else: recurse_sub_blocks(M[i:, i:]) return recurse_sub_blocks(self) return sub_blocks def _eval_row_del(self, row): def entry(i, j): return self[i, j] if i < row else self[i + 1, j] return self._new(self.rows - 1, self.cols, entry) def _eval_row_insert(self, pos, other): entries = list(self) insert_pos = pos * self.cols entries[insert_pos:insert_pos] = list(other) return self._new(self.rows + other.rows, self.cols, entries) def _eval_row_join(self, other): cols = self.cols def entry(i, j): if j < cols: return self[i, j] return other[i, j - cols] return classof(self, other)._new(self.rows, self.cols + other.cols, lambda i, j: entry(i, j)) def _eval_tolist(self): return [list(self[i,:]) for i in range(self.rows)] def _eval_todok(self): dok = {} rows, cols = self.shape for i in range(rows): for j in range(cols): val = self[i, j] if val != self.zero: dok[i, j] = val return dok def _eval_vec(self): rows = self.rows def entry(n, _): # we want to read off the columns first j = n // rows i = n - j * rows return self[i, j] return self._new(len(self), 1, entry) def _eval_vech(self, diagonal): c = self.cols v = [] if diagonal: for j in range(c): for i in range(j, c): v.append(self[i, j]) else: for j in range(c): for i in range(j + 1, c): v.append(self[i, j]) return self._new(len(v), 1, v) def col_del(self, col): """Delete the specified column.""" if col < 0: col += self.cols if not 0 <= col < self.cols: raise IndexError("Column {} is out of range.".format(col)) return self._eval_col_del(col) def col_insert(self, pos, other): """Insert one or more columns at the given column position. Examples ======== >>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(3, 1) >>> M.col_insert(1, V) Matrix([ [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]]) See Also ======== col row_insert """ # Allows you to build a matrix even if it is null matrix if not self: return type(self)(other) pos = as_int(pos) if pos < 0: pos = self.cols + pos if pos < 0: pos = 0 elif pos > self.cols: pos = self.cols if self.rows != other.rows: raise ShapeError( "`self` and `other` must have the same number of rows.") return self._eval_col_insert(pos, other) def col_join(self, other): """Concatenates two matrices along self's last and other's first row. Examples ======== >>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(1, 3) >>> M.col_join(V) Matrix([ [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1]]) See Also ======== col row_join """ # A null matrix can always be stacked (see #10770) if self.rows == 0 and self.cols != other.cols: return self._new(0, other.cols, []).col_join(other) if self.cols != other.cols: raise ShapeError( "`self` and `other` must have the same number of columns.") return self._eval_col_join(other) def col(self, j): """Elementary column selector. Examples ======== >>> from sympy import eye >>> eye(2).col(0) Matrix([ [1], [0]]) See Also ======== row sympy.matrices.dense.MutableDenseMatrix.col_op sympy.matrices.dense.MutableDenseMatrix.col_swap col_del col_join col_insert """ return self[:, j] def extract(self, rowsList, colsList): """Return a submatrix by specifying a list of rows and columns. Negative indices can be given. All indices must be in the range -n <= i < n where n is the number of rows or columns. Examples ======== >>> from sympy import Matrix >>> m = Matrix(4, 3, range(12)) >>> m Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) >>> m.extract([0, 1, 3], [0, 1]) Matrix([ [0, 1], [3, 4], [9, 10]]) Rows or columns can be repeated: >>> m.extract([0, 0, 1], [-1]) Matrix([ [2], [2], [5]]) Every other row can be taken by using range to provide the indices: >>> m.extract(range(0, m.rows, 2), [-1]) Matrix([ [2], [8]]) RowsList or colsList can also be a list of booleans, in which case the rows or columns corresponding to the True values will be selected: >>> m.extract([0, 1, 2, 3], [True, False, True]) Matrix([ [0, 2], [3, 5], [6, 8], [9, 11]]) """ if not is_sequence(rowsList) or not is_sequence(colsList): raise TypeError("rowsList and colsList must be iterable") # ensure rowsList and colsList are lists of integers if rowsList and all(isinstance(i, bool) for i in rowsList): rowsList = [index for index, item in enumerate(rowsList) if item] if colsList and all(isinstance(i, bool) for i in colsList): colsList = [index for index, item in enumerate(colsList) if item] # ensure everything is in range rowsList = [a2idx(k, self.rows) for k in rowsList] colsList = [a2idx(k, self.cols) for k in colsList] return self._eval_extract(rowsList, colsList) def get_diag_blocks(self): """Obtains the square sub-matrices on the main diagonal of a square matrix. Useful for inverting symbolic matrices or solving systems of linear equations which may be decoupled by having a block diagonal structure. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y, z >>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]]) >>> a1, a2, a3 = A.get_diag_blocks() >>> a1 Matrix([ [1, 3], [y, z**2]]) >>> a2 Matrix([[x]]) >>> a3 Matrix([[0]]) """ return self._eval_get_diag_blocks() @classmethod def hstack(cls, *args): """Return a matrix formed by joining args horizontally (i.e. by repeated application of row_join). Examples ======== >>> from sympy.matrices import Matrix, eye >>> Matrix.hstack(eye(2), 2*eye(2)) Matrix([ [1, 0, 2, 0], [0, 1, 0, 2]]) """ if len(args) == 0: return cls._new() kls = type(args[0]) return reduce(kls.row_join, args) def reshape(self, rows, cols): """Reshape the matrix. Total number of elements must remain the same. Examples ======== >>> from sympy import Matrix >>> m = Matrix(2, 3, lambda i, j: 1) >>> m Matrix([ [1, 1, 1], [1, 1, 1]]) >>> m.reshape(1, 6) Matrix([[1, 1, 1, 1, 1, 1]]) >>> m.reshape(3, 2) Matrix([ [1, 1], [1, 1], [1, 1]]) """ if self.rows * self.cols != rows * cols: raise ValueError("Invalid reshape parameters %d %d" % (rows, cols)) return self._new(rows, cols, lambda i, j: self[i * cols + j]) def row_del(self, row): """Delete the specified row.""" if row < 0: row += self.rows if not 0 <= row < self.rows: raise IndexError("Row {} is out of range.".format(row)) return self._eval_row_del(row) def row_insert(self, pos, other): """Insert one or more rows at the given row position. Examples ======== >>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(1, 3) >>> M.row_insert(1, V) Matrix([ [0, 0, 0], [1, 1, 1], [0, 0, 0], [0, 0, 0]]) See Also ======== row col_insert """ # Allows you to build a matrix even if it is null matrix if not self: return self._new(other) pos = as_int(pos) if pos < 0: pos = self.rows + pos if pos < 0: pos = 0 elif pos > self.rows: pos = self.rows if self.cols != other.cols: raise ShapeError( "`self` and `other` must have the same number of columns.") return self._eval_row_insert(pos, other) def row_join(self, other): """Concatenates two matrices along self's last and rhs's first column Examples ======== >>> from sympy import zeros, ones >>> M = zeros(3) >>> V = ones(3, 1) >>> M.row_join(V) Matrix([ [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1]]) See Also ======== row col_join """ # A null matrix can always be stacked (see #10770) if self.cols == 0 and self.rows != other.rows: return self._new(other.rows, 0, []).row_join(other) if self.rows != other.rows: raise ShapeError( "`self` and `rhs` must have the same number of rows.") return self._eval_row_join(other) def diagonal(self, k=0): """Returns the kth diagonal of self. The main diagonal corresponds to `k=0`; diagonals above and below correspond to `k > 0` and `k < 0`, respectively. The values of `self[i, j]` for which `j - i = k`, are returned in order of increasing `i + j`, starting with `i + j = |k|`. Examples ======== >>> from sympy import Matrix >>> m = Matrix(3, 3, lambda i, j: j - i); m Matrix([ [ 0, 1, 2], [-1, 0, 1], [-2, -1, 0]]) >>> _.diagonal() Matrix([[0, 0, 0]]) >>> m.diagonal(1) Matrix([[1, 1]]) >>> m.diagonal(-2) Matrix([[-2]]) Even though the diagonal is returned as a Matrix, the element retrieval can be done with a single index: >>> Matrix.diag(1, 2, 3).diagonal()[1] # instead of [0, 1] 2 See Also ======== diag - to create a diagonal matrix """ rv = [] k = as_int(k) r = 0 if k > 0 else -k c = 0 if r else k while True: if r == self.rows or c == self.cols: break rv.append(self[r, c]) r += 1 c += 1 if not rv: raise ValueError(filldedent(''' The %s diagonal is out of range [%s, %s]''' % ( k, 1 - self.rows, self.cols - 1))) return self._new(1, len(rv), rv) def row(self, i): """Elementary row selector. Examples ======== >>> from sympy import eye >>> eye(2).row(0) Matrix([[1, 0]]) See Also ======== col sympy.matrices.dense.MutableDenseMatrix.row_op sympy.matrices.dense.MutableDenseMatrix.row_swap row_del row_join row_insert """ return self[i, :] @property def shape(self): """The shape (dimensions) of the matrix as the 2-tuple (rows, cols). Examples ======== >>> from sympy.matrices import zeros >>> M = zeros(2, 3) >>> M.shape (2, 3) >>> M.rows 2 >>> M.cols 3 """ return (self.rows, self.cols) def todok(self): """Return the matrix as dictionary of keys. Examples ======== >>> from sympy import Matrix >>> M = Matrix.eye(3) >>> M.todok() {(0, 0): 1, (1, 1): 1, (2, 2): 1} """ return self._eval_todok() def tolist(self): """Return the Matrix as a nested Python list. Examples ======== >>> from sympy import Matrix, ones >>> m = Matrix(3, 3, range(9)) >>> m Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> m.tolist() [[0, 1, 2], [3, 4, 5], [6, 7, 8]] >>> ones(3, 0).tolist() [[], [], []] When there are no rows then it will not be possible to tell how many columns were in the original matrix: >>> ones(0, 3).tolist() [] """ if not self.rows: return [] if not self.cols: return [[] for i in range(self.rows)] return self._eval_tolist() def vec(self): """Return the Matrix converted into a one column matrix by stacking columns Examples ======== >>> from sympy import Matrix >>> m=Matrix([[1, 3], [2, 4]]) >>> m Matrix([ [1, 3], [2, 4]]) >>> m.vec() Matrix([ [1], [2], [3], [4]]) See Also ======== vech """ return self._eval_vec() def vech(self, diagonal=True, check_symmetry=True): """Reshapes the matrix into a column vector by stacking the elements in the lower triangle. Parameters ========== diagonal : bool, optional If ``True``, it includes the diagonal elements. check_symmetry : bool, optional If ``True``, it checks whether the matrix is symmetric. Examples ======== >>> from sympy import Matrix >>> m=Matrix([[1, 2], [2, 3]]) >>> m Matrix([ [1, 2], [2, 3]]) >>> m.vech() Matrix([ [1], [2], [3]]) >>> m.vech(diagonal=False) Matrix([[2]]) Notes ===== This should work for symmetric matrices and ``vech`` can represent symmetric matrices in vector form with less size than ``vec``. See Also ======== vec """ if not self.is_square: raise NonSquareMatrixError if check_symmetry and not self.is_symmetric(): raise ValueError("The matrix is not symmetric.") return self._eval_vech(diagonal) @classmethod def vstack(cls, *args): """Return a matrix formed by joining args vertically (i.e. by repeated application of col_join). Examples ======== >>> from sympy.matrices import Matrix, eye >>> Matrix.vstack(eye(2), 2*eye(2)) Matrix([ [1, 0], [0, 1], [2, 0], [0, 2]]) """ if len(args) == 0: return cls._new() kls = type(args[0]) return reduce(kls.col_join, args) class MatrixSpecial(MatrixRequired): """Construction of special matrices""" @classmethod def _eval_diag(cls, rows, cols, diag_dict): """diag_dict is a defaultdict containing all the entries of the diagonal matrix.""" def entry(i, j): return diag_dict[(i, j)] return cls._new(rows, cols, entry) @classmethod def _eval_eye(cls, rows, cols): def entry(i, j): return cls.one if i == j else cls.zero return cls._new(rows, cols, entry) @classmethod def _eval_jordan_block(cls, rows, cols, eigenvalue, band='upper'): if band == 'lower': def entry(i, j): if i == j: return eigenvalue elif j + 1 == i: return cls.one return cls.zero else: def entry(i, j): if i == j: return eigenvalue elif i + 1 == j: return cls.one return cls.zero return cls._new(rows, cols, entry) @classmethod def _eval_ones(cls, rows, cols): def entry(i, j): return cls.one return cls._new(rows, cols, entry) @classmethod def _eval_zeros(cls, rows, cols): def entry(i, j): return cls.zero return cls._new(rows, cols, entry) @classmethod def diag(kls, *args, strict=False, unpack=True, rows=None, cols=None, **kwargs): """Returns a matrix with the specified diagonal. If matrices are passed, a block-diagonal matrix is created (i.e. the "direct sum" of the matrices). kwargs ====== rows : rows of the resulting matrix; computed if not given. cols : columns of the resulting matrix; computed if not given. cls : class for the resulting matrix unpack : bool which, when True (default), unpacks a single sequence rather than interpreting it as a Matrix. strict : bool which, when False (default), allows Matrices to have variable-length rows. Examples ======== >>> from sympy.matrices import Matrix >>> Matrix.diag(1, 2, 3) Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]]) The current default is to unpack a single sequence. If this is not desired, set `unpack=False` and it will be interpreted as a matrix. >>> Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3) True When more than one element is passed, each is interpreted as something to put on the diagonal. Lists are converted to matrices. Filling of the diagonal always continues from the bottom right hand corner of the previous item: this will create a block-diagonal matrix whether the matrices are square or not. >>> col = [1, 2, 3] >>> row = [[4, 5]] >>> Matrix.diag(col, row) Matrix([ [1, 0, 0], [2, 0, 0], [3, 0, 0], [0, 4, 5]]) When `unpack` is False, elements within a list need not all be of the same length. Setting `strict` to True would raise a ValueError for the following: >>> Matrix.diag([[1, 2, 3], [4, 5], [6]], unpack=False) Matrix([ [1, 2, 3], [4, 5, 0], [6, 0, 0]]) The type of the returned matrix can be set with the ``cls`` keyword. >>> from sympy.matrices import ImmutableMatrix >>> from sympy.utilities.misc import func_name >>> func_name(Matrix.diag(1, cls=ImmutableMatrix)) 'ImmutableDenseMatrix' A zero dimension matrix can be used to position the start of the filling at the start of an arbitrary row or column: >>> from sympy import ones >>> r2 = ones(0, 2) >>> Matrix.diag(r2, 1, 2) Matrix([ [0, 0, 1, 0], [0, 0, 0, 2]]) See Also ======== eye diagonal - to extract a diagonal .dense.diag .expressions.blockmatrix.BlockMatrix .sparsetools.banded - to create multi-diagonal matrices """ from sympy.matrices.matrices import MatrixBase from sympy.matrices.dense import Matrix from sympy.matrices.sparse import SparseMatrix klass = kwargs.get('cls', kls) if unpack and len(args) == 1 and is_sequence(args[0]) and \ not isinstance(args[0], MatrixBase): args = args[0] # fill a default dict with the diagonal entries diag_entries = defaultdict(int) rmax = cmax = 0 # keep track of the biggest index seen for m in args: if isinstance(m, list): if strict: # if malformed, Matrix will raise an error _ = Matrix(m) r, c = _.shape m = _.tolist() else: r, c, smat = SparseMatrix._handle_creation_inputs(m) for (i, j), _ in smat.items(): diag_entries[(i + rmax, j + cmax)] = _ m = [] # to skip process below elif hasattr(m, 'shape'): # a Matrix # convert to list of lists r, c = m.shape m = m.tolist() else: # in this case, we're a single value diag_entries[(rmax, cmax)] = m rmax += 1 cmax += 1 continue # process list of lists for i in range(len(m)): for j, _ in enumerate(m[i]): diag_entries[(i + rmax, j + cmax)] = _ rmax += r cmax += c if rows is None: rows, cols = cols, rows if rows is None: rows, cols = rmax, cmax else: cols = rows if cols is None else cols if rows < rmax or cols < cmax: raise ValueError(filldedent(''' The constructed matrix is {} x {} but a size of {} x {} was specified.'''.format(rmax, cmax, rows, cols))) return klass._eval_diag(rows, cols, diag_entries) @classmethod def eye(kls, rows, cols=None, **kwargs): """Returns an identity matrix. Args ==== rows : rows of the matrix cols : cols of the matrix (if None, cols=rows) kwargs ====== cls : class of the returned matrix """ if cols is None: cols = rows klass = kwargs.get('cls', kls) rows, cols = as_int(rows), as_int(cols) return klass._eval_eye(rows, cols) @classmethod def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs): """Returns a Jordan block Parameters ========== size : Integer, optional Specifies the shape of the Jordan block matrix. eigenvalue : Number or Symbol Specifies the value for the main diagonal of the matrix. .. note:: The keyword ``eigenval`` is also specified as an alias of this keyword, but it is not recommended to use. We may deprecate the alias in later release. band : 'upper' or 'lower', optional Specifies the position of the off-diagonal to put `1` s on. cls : Matrix, optional Specifies the matrix class of the output form. If it is not specified, the class type where the method is being executed on will be returned. rows, cols : Integer, optional Specifies the shape of the Jordan block matrix. See Notes section for the details of how these key works. .. note:: This feature will be deprecated in the future. Returns ======= Matrix A Jordan block matrix. Raises ====== ValueError If insufficient arguments are given for matrix size specification, or no eigenvalue is given. Examples ======== Creating a default Jordan block: >>> from sympy import Matrix >>> from sympy.abc import x >>> Matrix.jordan_block(4, x) Matrix([ [x, 1, 0, 0], [0, x, 1, 0], [0, 0, x, 1], [0, 0, 0, x]]) Creating an alternative Jordan block matrix where `1` is on lower off-diagonal: >>> Matrix.jordan_block(4, x, band='lower') Matrix([ [x, 0, 0, 0], [1, x, 0, 0], [0, 1, x, 0], [0, 0, 1, x]]) Creating a Jordan block with keyword arguments >>> Matrix.jordan_block(size=4, eigenvalue=x) Matrix([ [x, 1, 0, 0], [0, x, 1, 0], [0, 0, x, 1], [0, 0, 0, x]]) Notes ===== .. note:: This feature will be deprecated in the future. The keyword arguments ``size``, ``rows``, ``cols`` relates to the Jordan block size specifications. If you want to create a square Jordan block, specify either one of the three arguments. If you want to create a rectangular Jordan block, specify ``rows`` and ``cols`` individually. +--------------------------------+---------------------+ | Arguments Given | Matrix Shape | +----------+----------+----------+----------+----------+ | size | rows | cols | rows | cols | +==========+==========+==========+==========+==========+ | size | Any | size | size | +----------+----------+----------+----------+----------+ | | None | ValueError | | +----------+----------+----------+----------+ | None | rows | None | rows | rows | | +----------+----------+----------+----------+ | | None | cols | cols | cols | + +----------+----------+----------+----------+ | | rows | cols | rows | cols | +----------+----------+----------+----------+----------+ References ========== .. [1] https://en.wikipedia.org/wiki/Jordan_matrix """ if 'rows' in kwargs or 'cols' in kwargs: SymPyDeprecationWarning( feature="Keyword arguments 'rows' or 'cols'", issue=16102, useinstead="a more generic banded matrix constructor", deprecated_since_version="1.4" ).warn() klass = kwargs.pop('cls', kls) rows = kwargs.pop('rows', None) cols = kwargs.pop('cols', None) eigenval = kwargs.get('eigenval', None) if eigenvalue is None and eigenval is None: raise ValueError("Must supply an eigenvalue") elif eigenvalue != eigenval and None not in (eigenval, eigenvalue): raise ValueError( "Inconsistent values are given: 'eigenval'={}, " "'eigenvalue'={}".format(eigenval, eigenvalue)) else: if eigenval is not None: eigenvalue = eigenval if (size, rows, cols) == (None, None, None): raise ValueError("Must supply a matrix size") if size is not None: rows, cols = size, size elif rows is not None and cols is None: cols = rows elif cols is not None and rows is None: rows = cols rows, cols = as_int(rows), as_int(cols) return klass._eval_jordan_block(rows, cols, eigenvalue, band) @classmethod def ones(kls, rows, cols=None, **kwargs): """Returns a matrix of ones. Args ==== rows : rows of the matrix cols : cols of the matrix (if None, cols=rows) kwargs ====== cls : class of the returned matrix """ if cols is None: cols = rows klass = kwargs.get('cls', kls) rows, cols = as_int(rows), as_int(cols) return klass._eval_ones(rows, cols) @classmethod def zeros(kls, rows, cols=None, **kwargs): """Returns a matrix of zeros. Args ==== rows : rows of the matrix cols : cols of the matrix (if None, cols=rows) kwargs ====== cls : class of the returned matrix """ if cols is None: cols = rows klass = kwargs.get('cls', kls) rows, cols = as_int(rows), as_int(cols) return klass._eval_zeros(rows, cols) @classmethod def companion(kls, poly): """Returns a companion matrix of a polynomial. Examples ======== >>> from sympy import Matrix, Poly, Symbol, symbols >>> x = Symbol('x') >>> c0, c1, c2, c3, c4 = symbols('c0:5') >>> p = Poly(c0 + c1*x + c2*x**2 + c3*x**3 + c4*x**4 + x**5, x) >>> Matrix.companion(p) Matrix([ [0, 0, 0, 0, -c0], [1, 0, 0, 0, -c1], [0, 1, 0, 0, -c2], [0, 0, 1, 0, -c3], [0, 0, 0, 1, -c4]]) """ poly = kls._sympify(poly) if not isinstance(poly, Poly): raise ValueError("{} must be a Poly instance.".format(poly)) if not poly.is_monic: raise ValueError("{} must be a monic polynomial.".format(poly)) if not poly.is_univariate: raise ValueError( "{} must be a univariate polynomial.".format(poly)) size = poly.degree() if not size >= 1: raise ValueError( "{} must have degree not less than 1.".format(poly)) coeffs = poly.all_coeffs() def entry(i, j): if j == size - 1: return -coeffs[-1 - i] elif i == j + 1: return kls.one return kls.zero return kls._new(size, size, entry) class MatrixProperties(MatrixRequired): """Provides basic properties of a matrix.""" def _eval_atoms(self, *types): result = set() for i in self: result.update(i.atoms(*types)) return result def _eval_free_symbols(self): return set().union(*(i.free_symbols for i in self if i)) def _eval_has(self, *patterns): return any(a.has(*patterns) for a in self) def _eval_is_anti_symmetric(self, simpfunc): if not all(simpfunc(self[i, j] + self[j, i]).is_zero for i in range(self.rows) for j in range(self.cols)): return False return True def _eval_is_diagonal(self): for i in range(self.rows): for j in range(self.cols): if i != j and self[i, j]: return False return True # _eval_is_hermitian is called by some general sympy # routines and has a different *args signature. Make # sure the names don't clash by adding `_matrix_` in name. def _eval_is_matrix_hermitian(self, simpfunc): mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i].conjugate())) return mat.is_zero_matrix def _eval_is_Identity(self) -> FuzzyBool: def dirac(i, j): if i == j: return 1 return 0 return all(self[i, j] == dirac(i, j) for i in range(self.rows) for j in range(self.cols)) def _eval_is_lower_hessenberg(self): return all(self[i, j].is_zero for i in range(self.rows) for j in range(i + 2, self.cols)) def _eval_is_lower(self): return all(self[i, j].is_zero for i in range(self.rows) for j in range(i + 1, self.cols)) def _eval_is_symbolic(self): return self.has(Symbol) def _eval_is_symmetric(self, simpfunc): mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i])) return mat.is_zero_matrix def _eval_is_zero_matrix(self): if any(i.is_zero == False for i in self): return False if any(i.is_zero is None for i in self): return None return True def _eval_is_upper_hessenberg(self): return all(self[i, j].is_zero for i in range(2, self.rows) for j in range(min(self.cols, (i - 1)))) def _eval_values(self): return [i for i in self if not i.is_zero] def _has_positive_diagonals(self): diagonal_entries = (self[i, i] for i in range(self.rows)) return fuzzy_and(x.is_positive for x in diagonal_entries) def _has_nonnegative_diagonals(self): diagonal_entries = (self[i, i] for i in range(self.rows)) return fuzzy_and(x.is_nonnegative for x in diagonal_entries) def atoms(self, *types): """Returns the atoms that form the current object. Examples ======== >>> from sympy.abc import x, y >>> from sympy.matrices import Matrix >>> Matrix([[x]]) Matrix([[x]]) >>> _.atoms() {x} >>> Matrix([[x, y], [y, x]]) Matrix([ [x, y], [y, x]]) >>> _.atoms() {x, y} """ types = tuple(t if isinstance(t, type) else type(t) for t in types) if not types: types = (Atom,) return self._eval_atoms(*types) @property def free_symbols(self): """Returns the free symbols within the matrix. Examples ======== >>> from sympy.abc import x >>> from sympy.matrices import Matrix >>> Matrix([[x], [1]]).free_symbols {x} """ return self._eval_free_symbols() def has(self, *patterns): """Test whether any subexpression matches any of the patterns. Examples ======== >>> from sympy import Matrix, SparseMatrix, Float >>> from sympy.abc import x, y >>> A = Matrix(((1, x), (0.2, 3))) >>> B = SparseMatrix(((1, x), (0.2, 3))) >>> A.has(x) True >>> A.has(y) False >>> A.has(Float) True >>> B.has(x) True >>> B.has(y) False >>> B.has(Float) True """ return self._eval_has(*patterns) def is_anti_symmetric(self, simplify=True): """Check if matrix M is an antisymmetric matrix, that is, M is a square matrix with all M[i, j] == -M[j, i]. When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is simplified before testing to see if it is zero. By default, the SymPy simplify function is used. To use a custom function set simplify to a function that accepts a single argument which returns a simplified expression. To skip simplification, set simplify to False but note that although this will be faster, it may induce false negatives. Examples ======== >>> from sympy import Matrix, symbols >>> m = Matrix(2, 2, [0, 1, -1, 0]) >>> m Matrix([ [ 0, 1], [-1, 0]]) >>> m.is_anti_symmetric() True >>> x, y = symbols('x y') >>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0]) >>> m Matrix([ [ 0, 0, x], [-y, 0, 0]]) >>> m.is_anti_symmetric() False >>> from sympy.abc import x, y >>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y, ... -(x + 1)**2 , 0, x*y, ... -y, -x*y, 0]) Simplification of matrix elements is done by default so even though two elements which should be equal and opposite wouldn't pass an equality test, the matrix is still reported as anti-symmetric: >>> m[0, 1] == -m[1, 0] False >>> m.is_anti_symmetric() True If 'simplify=False' is used for the case when a Matrix is already simplified, this will speed things up. Here, we see that without simplification the matrix does not appear anti-symmetric: >>> m.is_anti_symmetric(simplify=False) False But if the matrix were already expanded, then it would appear anti-symmetric and simplification in the is_anti_symmetric routine is not needed: >>> m = m.expand() >>> m.is_anti_symmetric(simplify=False) True """ # accept custom simplification simpfunc = simplify if not isfunction(simplify): simpfunc = _simplify if simplify else lambda x: x if not self.is_square: return False return self._eval_is_anti_symmetric(simpfunc) def is_diagonal(self): """Check if matrix is diagonal, that is matrix in which the entries outside the main diagonal are all zero. Examples ======== >>> from sympy import Matrix, diag >>> m = Matrix(2, 2, [1, 0, 0, 2]) >>> m Matrix([ [1, 0], [0, 2]]) >>> m.is_diagonal() True >>> m = Matrix(2, 2, [1, 1, 0, 2]) >>> m Matrix([ [1, 1], [0, 2]]) >>> m.is_diagonal() False >>> m = diag(1, 2, 3) >>> m Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> m.is_diagonal() True See Also ======== is_lower is_upper sympy.matrices.matrices.MatrixEigen.is_diagonalizable diagonalize """ return self._eval_is_diagonal() @property def is_weakly_diagonally_dominant(self): r"""Tests if the matrix is row weakly diagonally dominant. Explanation =========== A $n, n$ matrix $A$ is row weakly diagonally dominant if .. math:: \left|A_{i, i}\right| \ge \sum_{j = 0, j \neq i}^{n-1} \left|A_{i, j}\right| \quad {\text{for all }} i \in \{ 0, ..., n-1 \} Examples ======== >>> from sympy.matrices import Matrix >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]]) >>> A.is_weakly_diagonally_dominant True >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]]) >>> A.is_weakly_diagonally_dominant False >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]]) >>> A.is_weakly_diagonally_dominant True Notes ===== If you want to test whether a matrix is column diagonally dominant, you can apply the test after transposing the matrix. """ if not self.is_square: return False rows, cols = self.shape def test_row(i): summation = self.zero for j in range(cols): if i != j: summation += Abs(self[i, j]) return (Abs(self[i, i]) - summation).is_nonnegative return fuzzy_and(test_row(i) for i in range(rows)) @property def is_strongly_diagonally_dominant(self): r"""Tests if the matrix is row strongly diagonally dominant. Explanation =========== A $n, n$ matrix $A$ is row strongly diagonally dominant if .. math:: \left|A_{i, i}\right| > \sum_{j = 0, j \neq i}^{n-1} \left|A_{i, j}\right| \quad {\text{for all }} i \in \{ 0, ..., n-1 \} Examples ======== >>> from sympy.matrices import Matrix >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]]) >>> A.is_strongly_diagonally_dominant True Notes ===== If you want to test whether a matrix is column diagonally dominant, you can apply the test after transposing the matrix. """ if not self.is_square: return False rows, cols = self.shape def test_row(i): summation = self.zero for j in range(cols): if i != j: summation += Abs(self[i, j]) return (Abs(self[i, i]) - summation).is_positive return fuzzy_and(test_row(i) for i in range(rows)) @property def is_hermitian(self): """Checks if the matrix is Hermitian. In a Hermitian matrix element i,j is the complex conjugate of element j,i. Examples ======== >>> from sympy.matrices import Matrix >>> from sympy import I >>> from sympy.abc import x >>> a = Matrix([[1, I], [-I, 1]]) >>> a Matrix([ [ 1, I], [-I, 1]]) >>> a.is_hermitian True >>> a[0, 0] = 2*I >>> a.is_hermitian False >>> a[0, 0] = x >>> a.is_hermitian >>> a[0, 1] = a[1, 0]*I >>> a.is_hermitian False """ if not self.is_square: return False return self._eval_is_matrix_hermitian(_simplify) @property def is_Identity(self) -> FuzzyBool: if not self.is_square: return False return self._eval_is_Identity() @property def is_lower_hessenberg(self): r"""Checks if the matrix is in the lower-Hessenberg form. The lower hessenberg matrix has zero entries above the first superdiagonal. Examples ======== >>> from sympy.matrices import Matrix >>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]]) >>> a Matrix([ [1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]]) >>> a.is_lower_hessenberg True See Also ======== is_upper_hessenberg is_lower """ return self._eval_is_lower_hessenberg() @property def is_lower(self): """Check if matrix is a lower triangular matrix. True can be returned even if the matrix is not square. Examples ======== >>> from sympy import Matrix >>> m = Matrix(2, 2, [1, 0, 0, 1]) >>> m Matrix([ [1, 0], [0, 1]]) >>> m.is_lower True >>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5]) >>> m Matrix([ [0, 0, 0], [2, 0, 0], [1, 4, 0], [6, 6, 5]]) >>> m.is_lower True >>> from sympy.abc import x, y >>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y]) >>> m Matrix([ [x**2 + y, x + y**2], [ 0, x + y]]) >>> m.is_lower False See Also ======== is_upper is_diagonal is_lower_hessenberg """ return self._eval_is_lower() @property def is_square(self): """Checks if a matrix is square. A matrix is square if the number of rows equals the number of columns. The empty matrix is square by definition, since the number of rows and the number of columns are both zero. Examples ======== >>> from sympy import Matrix >>> a = Matrix([[1, 2, 3], [4, 5, 6]]) >>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> c = Matrix([]) >>> a.is_square False >>> b.is_square True >>> c.is_square True """ return self.rows == self.cols def is_symbolic(self): """Checks if any elements contain Symbols. Examples ======== >>> from sympy.matrices import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.is_symbolic() True """ return self._eval_is_symbolic() def is_symmetric(self, simplify=True): """Check if matrix is symmetric matrix, that is square matrix and is equal to its transpose. By default, simplifications occur before testing symmetry. They can be skipped using 'simplify=False'; while speeding things a bit, this may however induce false negatives. Examples ======== >>> from sympy import Matrix >>> m = Matrix(2, 2, [0, 1, 1, 2]) >>> m Matrix([ [0, 1], [1, 2]]) >>> m.is_symmetric() True >>> m = Matrix(2, 2, [0, 1, 2, 0]) >>> m Matrix([ [0, 1], [2, 0]]) >>> m.is_symmetric() False >>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0]) >>> m Matrix([ [0, 0, 0], [0, 0, 0]]) >>> m.is_symmetric() False >>> from sympy.abc import x, y >>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3]) >>> m Matrix([ [ 1, x**2 + 2*x + 1, y], [(x + 1)**2, 2, 0], [ y, 0, 3]]) >>> m.is_symmetric() True If the matrix is already simplified, you may speed-up is_symmetric() test by using 'simplify=False'. >>> bool(m.is_symmetric(simplify=False)) False >>> m1 = m.expand() >>> m1.is_symmetric(simplify=False) True """ simpfunc = simplify if not isfunction(simplify): simpfunc = _simplify if simplify else lambda x: x if not self.is_square: return False return self._eval_is_symmetric(simpfunc) @property def is_upper_hessenberg(self): """Checks if the matrix is the upper-Hessenberg form. The upper hessenberg matrix has zero entries below the first subdiagonal. Examples ======== >>> from sympy.matrices import Matrix >>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]]) >>> a Matrix([ [1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]]) >>> a.is_upper_hessenberg True See Also ======== is_lower_hessenberg is_upper """ return self._eval_is_upper_hessenberg() @property def is_upper(self): """Check if matrix is an upper triangular matrix. True can be returned even if the matrix is not square. Examples ======== >>> from sympy import Matrix >>> m = Matrix(2, 2, [1, 0, 0, 1]) >>> m Matrix([ [1, 0], [0, 1]]) >>> m.is_upper True >>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0]) >>> m Matrix([ [5, 1, 9], [0, 4, 6], [0, 0, 5], [0, 0, 0]]) >>> m.is_upper True >>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1]) >>> m Matrix([ [4, 2, 5], [6, 1, 1]]) >>> m.is_upper False See Also ======== is_lower is_diagonal is_upper_hessenberg """ return all(self[i, j].is_zero for i in range(1, self.rows) for j in range(min(i, self.cols))) @property def is_zero_matrix(self): """Checks if a matrix is a zero matrix. A matrix is zero if every element is zero. A matrix need not be square to be considered zero. The empty matrix is zero by the principle of vacuous truth. For a matrix that may or may not be zero (e.g. contains a symbol), this will be None Examples ======== >>> from sympy import Matrix, zeros >>> from sympy.abc import x >>> a = Matrix([[0, 0], [0, 0]]) >>> b = zeros(3, 4) >>> c = Matrix([[0, 1], [0, 0]]) >>> d = Matrix([]) >>> e = Matrix([[x, 0], [0, 0]]) >>> a.is_zero_matrix True >>> b.is_zero_matrix True >>> c.is_zero_matrix False >>> d.is_zero_matrix True >>> e.is_zero_matrix """ return self._eval_is_zero_matrix() def values(self): """Return non-zero values of self.""" return self._eval_values() class MatrixOperations(MatrixRequired): """Provides basic matrix shape and elementwise operations. Should not be instantiated directly.""" def _eval_adjoint(self): return self.transpose().conjugate() def _eval_applyfunc(self, f): out = self._new(self.rows, self.cols, [f(x) for x in self]) return out def _eval_as_real_imag(self): # type: ignore from sympy.functions.elementary.complexes import re, im return (self.applyfunc(re), self.applyfunc(im)) def _eval_conjugate(self): return self.applyfunc(lambda x: x.conjugate()) def _eval_permute_cols(self, perm): # apply the permutation to a list mapping = list(perm) def entry(i, j): return self[i, mapping[j]] return self._new(self.rows, self.cols, entry) def _eval_permute_rows(self, perm): # apply the permutation to a list mapping = list(perm) def entry(i, j): return self[mapping[i], j] return self._new(self.rows, self.cols, entry) def _eval_trace(self): return sum(self[i, i] for i in range(self.rows)) def _eval_transpose(self): return self._new(self.cols, self.rows, lambda i, j: self[j, i]) def adjoint(self): """Conjugate transpose or Hermitian conjugation.""" return self._eval_adjoint() def applyfunc(self, f): """Apply a function to each element of the matrix. Examples ======== >>> from sympy import Matrix >>> m = Matrix(2, 2, lambda i, j: i*2+j) >>> m Matrix([ [0, 1], [2, 3]]) >>> m.applyfunc(lambda i: 2*i) Matrix([ [0, 2], [4, 6]]) """ if not callable(f): raise TypeError("`f` must be callable.") return self._eval_applyfunc(f) def as_real_imag(self, deep=True, **hints): """Returns a tuple containing the (real, imaginary) part of matrix.""" # XXX: Ignoring deep and hints... return self._eval_as_real_imag() def conjugate(self): """Return the by-element conjugation. Examples ======== >>> from sympy.matrices import SparseMatrix >>> from sympy import I >>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I))) >>> a Matrix([ [1, 2 + I], [3, 4], [I, -I]]) >>> a.C Matrix([ [ 1, 2 - I], [ 3, 4], [-I, I]]) See Also ======== transpose: Matrix transposition H: Hermite conjugation sympy.matrices.matrices.MatrixBase.D: Dirac conjugation """ return self._eval_conjugate() def doit(self, **kwargs): return self.applyfunc(lambda x: x.doit()) def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False): """Apply evalf() to each element of self.""" options = {'subs':subs, 'maxn':maxn, 'chop':chop, 'strict':strict, 'quad':quad, 'verbose':verbose} return self.applyfunc(lambda i: i.evalf(n, **options)) def expand(self, deep=True, modulus=None, power_base=True, power_exp=True, mul=True, log=True, multinomial=True, basic=True, **hints): """Apply core.function.expand to each entry of the matrix. Examples ======== >>> from sympy.abc import x >>> from sympy.matrices import Matrix >>> Matrix(1, 1, [x*(x+1)]) Matrix([[x*(x + 1)]]) >>> _.expand() Matrix([[x**2 + x]]) """ return self.applyfunc(lambda x: x.expand( deep, modulus, power_base, power_exp, mul, log, multinomial, basic, **hints)) @property def H(self): """Return Hermite conjugate. Examples ======== >>> from sympy import Matrix, I >>> m = Matrix((0, 1 + I, 2, 3)) >>> m Matrix([ [ 0], [1 + I], [ 2], [ 3]]) >>> m.H Matrix([[0, 1 - I, 2, 3]]) See Also ======== conjugate: By-element conjugation sympy.matrices.matrices.MatrixBase.D: Dirac conjugation """ return self.T.C def permute(self, perm, orientation='rows', direction='forward'): r"""Permute the rows or columns of a matrix by the given list of swaps. Parameters ========== perm : Permutation, list, or list of lists A representation for the permutation. If it is ``Permutation``, it is used directly with some resizing with respect to the matrix size. If it is specified as list of lists, (e.g., ``[[0, 1], [0, 2]]``), then the permutation is formed from applying the product of cycles. The direction how the cyclic product is applied is described in below. If it is specified as a list, the list should represent an array form of a permutation. (e.g., ``[1, 2, 0]``) which would would form the swapping function `0 \mapsto 1, 1 \mapsto 2, 2\mapsto 0`. orientation : 'rows', 'cols' A flag to control whether to permute the rows or the columns direction : 'forward', 'backward' A flag to control whether to apply the permutations from the start of the list first, or from the back of the list first. For example, if the permutation specification is ``[[0, 1], [0, 2]]``, If the flag is set to ``'forward'``, the cycle would be formed as `0 \mapsto 2, 2 \mapsto 1, 1 \mapsto 0`. If the flag is set to ``'backward'``, the cycle would be formed as `0 \mapsto 1, 1 \mapsto 2, 2 \mapsto 0`. If the argument ``perm`` is not in a form of list of lists, this flag takes no effect. Examples ======== >>> from sympy.matrices import eye >>> M = eye(3) >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward') Matrix([ [0, 0, 1], [1, 0, 0], [0, 1, 0]]) >>> from sympy.matrices import eye >>> M = eye(3) >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward') Matrix([ [0, 1, 0], [0, 0, 1], [1, 0, 0]]) Notes ===== If a bijective function `\sigma : \mathbb{N}_0 \rightarrow \mathbb{N}_0` denotes the permutation. If the matrix `A` is the matrix to permute, represented as a horizontal or a vertical stack of vectors: .. math:: A = \begin{bmatrix} a_0 \\ a_1 \\ \vdots \\ a_{n-1} \end{bmatrix} = \begin{bmatrix} \alpha_0 & \alpha_1 & \cdots & \alpha_{n-1} \end{bmatrix} If the matrix `B` is the result, the permutation of matrix rows is defined as: .. math:: B := \begin{bmatrix} a_{\sigma(0)} \\ a_{\sigma(1)} \\ \vdots \\ a_{\sigma(n-1)} \end{bmatrix} And the permutation of matrix columns is defined as: .. math:: B := \begin{bmatrix} \alpha_{\sigma(0)} & \alpha_{\sigma(1)} & \cdots & \alpha_{\sigma(n-1)} \end{bmatrix} """ from sympy.combinatorics import Permutation # allow british variants and `columns` if direction == 'forwards': direction = 'forward' if direction == 'backwards': direction = 'backward' if orientation == 'columns': orientation = 'cols' if direction not in ('forward', 'backward'): raise TypeError("direction='{}' is an invalid kwarg. " "Try 'forward' or 'backward'".format(direction)) if orientation not in ('rows', 'cols'): raise TypeError("orientation='{}' is an invalid kwarg. " "Try 'rows' or 'cols'".format(orientation)) if not isinstance(perm, (Permutation, Iterable)): raise ValueError( "{} must be a list, a list of lists, " "or a SymPy permutation object.".format(perm)) # ensure all swaps are in range max_index = self.rows if orientation == 'rows' else self.cols if not all(0 <= t <= max_index for t in flatten(list(perm))): raise IndexError("`swap` indices out of range.") if perm and not isinstance(perm, Permutation) and \ isinstance(perm[0], Iterable): if direction == 'forward': perm = list(reversed(perm)) perm = Permutation(perm, size=max_index+1) else: perm = Permutation(perm, size=max_index+1) if orientation == 'rows': return self._eval_permute_rows(perm) if orientation == 'cols': return self._eval_permute_cols(perm) def permute_cols(self, swaps, direction='forward'): """Alias for ``self.permute(swaps, orientation='cols', direction=direction)`` See Also ======== permute """ return self.permute(swaps, orientation='cols', direction=direction) def permute_rows(self, swaps, direction='forward'): """Alias for ``self.permute(swaps, orientation='rows', direction=direction)`` See Also ======== permute """ return self.permute(swaps, orientation='rows', direction=direction) def refine(self, assumptions=True): """Apply refine to each element of the matrix. Examples ======== >>> from sympy import Symbol, Matrix, Abs, sqrt, Q >>> x = Symbol('x') >>> Matrix([[Abs(x)**2, sqrt(x**2)],[sqrt(x**2), Abs(x)**2]]) Matrix([ [ Abs(x)**2, sqrt(x**2)], [sqrt(x**2), Abs(x)**2]]) >>> _.refine(Q.real(x)) Matrix([ [ x**2, Abs(x)], [Abs(x), x**2]]) """ return self.applyfunc(lambda x: refine(x, assumptions)) def replace(self, F, G, map=False, simultaneous=True, exact=None): """Replaces Function F in Matrix entries with Function G. Examples ======== >>> from sympy import symbols, Function, Matrix >>> F, G = symbols('F, G', cls=Function) >>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M Matrix([ [F(0), F(1)], [F(1), F(2)]]) >>> N = M.replace(F,G) >>> N Matrix([ [G(0), G(1)], [G(1), G(2)]]) """ return self.applyfunc( lambda x: x.replace(F, G, map=map, simultaneous=simultaneous, exact=exact)) def rot90(self, k=1): """Rotates Matrix by 90 degrees Parameters ========== k : int Specifies how many times the matrix is rotated by 90 degrees (clockwise when positive, counter-clockwise when negative). Examples ======== >>> from sympy import Matrix, symbols >>> A = Matrix(2, 2, symbols('a:d')) >>> A Matrix([ [a, b], [c, d]]) Rotating the matrix clockwise one time: >>> A.rot90(1) Matrix([ [c, a], [d, b]]) Rotating the matrix anticlockwise two times: >>> A.rot90(-2) Matrix([ [d, c], [b, a]]) """ mod = k%4 if mod == 0: return self if mod == 1: return self[::-1, ::].T if mod == 2: return self[::-1, ::-1] if mod == 3: return self[::, ::-1].T def simplify(self, **kwargs): """Apply simplify to each element of the matrix. Examples ======== >>> from sympy.abc import x, y >>> from sympy import sin, cos >>> from sympy.matrices import SparseMatrix >>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2]) Matrix([[x*sin(y)**2 + x*cos(y)**2]]) >>> _.simplify() Matrix([[x]]) """ return self.applyfunc(lambda x: x.simplify(**kwargs)) def subs(self, *args, **kwargs): # should mirror core.basic.subs """Return a new matrix with subs applied to each entry. Examples ======== >>> from sympy.abc import x, y >>> from sympy.matrices import SparseMatrix, Matrix >>> SparseMatrix(1, 1, [x]) Matrix([[x]]) >>> _.subs(x, y) Matrix([[y]]) >>> Matrix(_).subs(y, x) Matrix([[x]]) """ if len(args) == 1 and not isinstance(args[0], (dict, set)) and iter(args[0]) and not is_sequence(args[0]): args = (list(args[0]),) return self.applyfunc(lambda x: x.subs(*args, **kwargs)) def trace(self): """ Returns the trace of a square matrix i.e. the sum of the diagonal elements. Examples ======== >>> from sympy import Matrix >>> A = Matrix(2, 2, [1, 2, 3, 4]) >>> A.trace() 5 """ if self.rows != self.cols: raise NonSquareMatrixError() return self._eval_trace() def transpose(self): """ Returns the transpose of the matrix. Examples ======== >>> from sympy import Matrix >>> A = Matrix(2, 2, [1, 2, 3, 4]) >>> A.transpose() Matrix([ [1, 3], [2, 4]]) >>> from sympy import Matrix, I >>> m=Matrix(((1, 2+I), (3, 4))) >>> m Matrix([ [1, 2 + I], [3, 4]]) >>> m.transpose() Matrix([ [ 1, 3], [2 + I, 4]]) >>> m.T == m.transpose() True See Also ======== conjugate: By-element conjugation """ return self._eval_transpose() @property def T(self): '''Matrix transposition''' return self.transpose() @property def C(self): '''By-element conjugation''' return self.conjugate() def n(self, *args, **kwargs): """Apply evalf() to each element of self.""" return self.evalf(*args, **kwargs) def xreplace(self, rule): # should mirror core.basic.xreplace """Return a new matrix with xreplace applied to each entry. Examples ======== >>> from sympy.abc import x, y >>> from sympy.matrices import SparseMatrix, Matrix >>> SparseMatrix(1, 1, [x]) Matrix([[x]]) >>> _.xreplace({x: y}) Matrix([[y]]) >>> Matrix(_).xreplace({y: x}) Matrix([[x]]) """ return self.applyfunc(lambda x: x.xreplace(rule)) def _eval_simplify(self, **kwargs): # XXX: We can't use self.simplify here as mutable subclasses will # override simplify and have it return None return MatrixOperations.simplify(self, **kwargs) def _eval_trigsimp(self, **opts): from sympy.simplify import trigsimp return self.applyfunc(lambda x: trigsimp(x, **opts)) class MatrixArithmetic(MatrixRequired): """Provides basic matrix arithmetic operations. Should not be instantiated directly.""" _op_priority = 10.01 def _eval_Abs(self): return self._new(self.rows, self.cols, lambda i, j: Abs(self[i, j])) def _eval_add(self, other): return self._new(self.rows, self.cols, lambda i, j: self[i, j] + other[i, j]) def _eval_matrix_mul(self, other): def entry(i, j): vec = [self[i,k]*other[k,j] for k in range(self.cols)] try: return Add(*vec) except (TypeError, SympifyError): # Some matrices don't work with `sum` or `Add` # They don't work with `sum` because `sum` tries to add `0` # Fall back to a safe way to multiply if the `Add` fails. return reduce(lambda a, b: a + b, vec) return self._new(self.rows, other.cols, entry) def _eval_matrix_mul_elementwise(self, other): return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other[i,j]) def _eval_matrix_rmul(self, other): def entry(i, j): return sum(other[i,k]*self[k,j] for k in range(other.cols)) return self._new(other.rows, self.cols, entry) def _eval_pow_by_recursion(self, num): if num == 1: return self if num % 2 == 1: a, b = self, self._eval_pow_by_recursion(num - 1) else: a = b = self._eval_pow_by_recursion(num // 2) return a.multiply(b) def _eval_pow_by_cayley(self, exp): from sympy.discrete.recurrences import linrec_coeffs row = self.shape[0] p = self.charpoly() coeffs = (-p).all_coeffs()[1:] coeffs = linrec_coeffs(coeffs, exp) new_mat = self.eye(row) ans = self.zeros(row) for i in range(row): ans += coeffs[i]*new_mat new_mat *= self return ans def _eval_pow_by_recursion_dotprodsimp(self, num, prevsimp=None): if prevsimp is None: prevsimp = [True]*len(self) if num == 1: return self if num % 2 == 1: a, b = self, self._eval_pow_by_recursion_dotprodsimp(num - 1, prevsimp=prevsimp) else: a = b = self._eval_pow_by_recursion_dotprodsimp(num // 2, prevsimp=prevsimp) m = a.multiply(b, dotprodsimp=False) lenm = len(m) elems = [None]*lenm for i in range(lenm): if prevsimp[i]: elems[i], prevsimp[i] = _dotprodsimp(m[i], withsimp=True) else: elems[i] = m[i] return m._new(m.rows, m.cols, elems) def _eval_scalar_mul(self, other): return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other) def _eval_scalar_rmul(self, other): return self._new(self.rows, self.cols, lambda i, j: other*self[i,j]) def _eval_Mod(self, other): from sympy import Mod return self._new(self.rows, self.cols, lambda i, j: Mod(self[i, j], other)) # python arithmetic functions def __abs__(self): """Returns a new matrix with entry-wise absolute values.""" return self._eval_Abs() @call_highest_priority('__radd__') def __add__(self, other): """Return self + other, raising ShapeError if shapes don't match.""" if isinstance(other, NDimArray): # Matrix and array addition is currently not implemented return NotImplemented other = _matrixify(other) # matrix-like objects can have shapes. This is # our first sanity check. if hasattr(other, 'shape'): if self.shape != other.shape: raise ShapeError("Matrix size mismatch: %s + %s" % ( self.shape, other.shape)) # honest sympy matrices defer to their class's routine if getattr(other, 'is_Matrix', False): # call the highest-priority class's _eval_add a, b = self, other if a.__class__ != classof(a, b): b, a = a, b return a._eval_add(b) # Matrix-like objects can be passed to CommonMatrix routines directly. if getattr(other, 'is_MatrixLike', False): return MatrixArithmetic._eval_add(self, other) raise TypeError('cannot add %s and %s' % (type(self), type(other))) @call_highest_priority('__rtruediv__') def __truediv__(self, other): return self * (self.one / other) @call_highest_priority('__rmatmul__') def __matmul__(self, other): other = _matrixify(other) if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False): return NotImplemented return self.__mul__(other) def __mod__(self, other): return self.applyfunc(lambda x: x % other) @call_highest_priority('__rmul__') def __mul__(self, other): """Return self*other where other is either a scalar or a matrix of compatible dimensions. Examples ======== >>> from sympy.matrices import Matrix >>> A = Matrix([[1, 2, 3], [4, 5, 6]]) >>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]]) True >>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> A*B Matrix([ [30, 36, 42], [66, 81, 96]]) >>> B*A Traceback (most recent call last): ... ShapeError: Matrices size mismatch. >>> See Also ======== matrix_multiply_elementwise """ return self.multiply(other) def multiply(self, other, dotprodsimp=None): """Same as __mul__() but with optional simplification. Parameters ========== dotprodsimp : bool, optional Specifies whether intermediate term algebraic simplification is used during matrix multiplications to control expression blowup and thus speed up calculation. Default is off. """ isimpbool = _get_intermediate_simp_bool(False, dotprodsimp) other = _matrixify(other) # matrix-like objects can have shapes. This is # our first sanity check. Double check other is not explicitly not a Matrix. if (hasattr(other, 'shape') and len(other.shape) == 2 and (getattr(other, 'is_Matrix', True) or getattr(other, 'is_MatrixLike', True))): if self.shape[1] != other.shape[0]: raise ShapeError("Matrix size mismatch: %s * %s." % ( self.shape, other.shape)) # honest sympy matrices defer to their class's routine if getattr(other, 'is_Matrix', False): m = self._eval_matrix_mul(other) if isimpbool: return m._new(m.rows, m.cols, [_dotprodsimp(e) for e in m]) return m # Matrix-like objects can be passed to CommonMatrix routines directly. if getattr(other, 'is_MatrixLike', False): return MatrixArithmetic._eval_matrix_mul(self, other) # if 'other' is not iterable then scalar multiplication. if not isinstance(other, Iterable): try: return self._eval_scalar_mul(other) except TypeError: pass return NotImplemented def multiply_elementwise(self, other): """Return the Hadamard product (elementwise product) of A and B Examples ======== >>> from sympy.matrices import Matrix >>> A = Matrix([[0, 1, 2], [3, 4, 5]]) >>> B = Matrix([[1, 10, 100], [100, 10, 1]]) >>> A.multiply_elementwise(B) Matrix([ [ 0, 10, 200], [300, 40, 5]]) See Also ======== sympy.matrices.matrices.MatrixBase.cross sympy.matrices.matrices.MatrixBase.dot multiply """ if self.shape != other.shape: raise ShapeError("Matrix shapes must agree {} != {}".format(self.shape, other.shape)) return self._eval_matrix_mul_elementwise(other) def __neg__(self): return self._eval_scalar_mul(-1) @call_highest_priority('__rpow__') def __pow__(self, exp): """Return self**exp a scalar or symbol.""" return self.pow(exp) def pow(self, exp, method=None): r"""Return self**exp a scalar or symbol. Parameters ========== method : multiply, mulsimp, jordan, cayley If multiply then it returns exponentiation using recursion. If jordan then Jordan form exponentiation will be used. If cayley then the exponentiation is done using Cayley-Hamilton theorem. If mulsimp then the exponentiation is done using recursion with dotprodsimp. This specifies whether intermediate term algebraic simplification is used during naive matrix power to control expression blowup and thus speed up calculation. If None, then it heuristically decides which method to use. """ if method is not None and method not in ['multiply', 'mulsimp', 'jordan', 'cayley']: raise TypeError('No such method') if self.rows != self.cols: raise NonSquareMatrixError() a = self jordan_pow = getattr(a, '_matrix_pow_by_jordan_blocks', None) exp = sympify(exp) if exp.is_zero: return a._new(a.rows, a.cols, lambda i, j: int(i == j)) if exp == 1: return a diagonal = getattr(a, 'is_diagonal', None) if diagonal is not None and diagonal(): return a._new(a.rows, a.cols, lambda i, j: a[i,j]**exp if i == j else 0) if exp.is_Number and exp % 1 == 0: if a.rows == 1: return a._new([[a[0]**exp]]) if exp < 0: exp = -exp a = a.inv() # When certain conditions are met, # Jordan block algorithm is faster than # computation by recursion. if method == 'jordan': try: return jordan_pow(exp) except MatrixError: if method == 'jordan': raise elif method == 'cayley': if not exp.is_Number or exp % 1 != 0: raise ValueError("cayley method is only valid for integer powers") return a._eval_pow_by_cayley(exp) elif method == "mulsimp": if not exp.is_Number or exp % 1 != 0: raise ValueError("mulsimp method is only valid for integer powers") return a._eval_pow_by_recursion_dotprodsimp(exp) elif method == "multiply": if not exp.is_Number or exp % 1 != 0: raise ValueError("multiply method is only valid for integer powers") return a._eval_pow_by_recursion(exp) elif method is None and exp.is_Number and exp % 1 == 0: # Decide heuristically which method to apply if a.rows == 2 and exp > 100000: return jordan_pow(exp) elif _get_intermediate_simp_bool(True, None): return a._eval_pow_by_recursion_dotprodsimp(exp) elif exp > 10000: return a._eval_pow_by_cayley(exp) else: return a._eval_pow_by_recursion(exp) if jordan_pow: try: return jordan_pow(exp) except NonInvertibleMatrixError: # Raised by jordan_pow on zero determinant matrix unless exp is # definitely known to be a non-negative integer. # Here we raise if n is definitely not a non-negative integer # but otherwise we can leave this as an unevaluated MatPow. if exp.is_integer is False or exp.is_nonnegative is False: raise from sympy.matrices.expressions import MatPow return MatPow(a, exp) @call_highest_priority('__add__') def __radd__(self, other): return self + other @call_highest_priority('__matmul__') def __rmatmul__(self, other): other = _matrixify(other) if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False): return NotImplemented return self.__rmul__(other) @call_highest_priority('__mul__') def __rmul__(self, other): return self.rmultiply(other) def rmultiply(self, other, dotprodsimp=None): """Same as __rmul__() but with optional simplification. Parameters ========== dotprodsimp : bool, optional Specifies whether intermediate term algebraic simplification is used during matrix multiplications to control expression blowup and thus speed up calculation. Default is off. """ isimpbool = _get_intermediate_simp_bool(False, dotprodsimp) other = _matrixify(other) # matrix-like objects can have shapes. This is # our first sanity check. Double check other is not explicitly not a Matrix. if (hasattr(other, 'shape') and len(other.shape) == 2 and (getattr(other, 'is_Matrix', True) or getattr(other, 'is_MatrixLike', True))): if self.shape[0] != other.shape[1]: raise ShapeError("Matrix size mismatch.") # honest sympy matrices defer to their class's routine if getattr(other, 'is_Matrix', False): m = self._eval_matrix_rmul(other) if isimpbool: return m._new(m.rows, m.cols, [_dotprodsimp(e) for e in m]) return m # Matrix-like objects can be passed to CommonMatrix routines directly. if getattr(other, 'is_MatrixLike', False): return MatrixArithmetic._eval_matrix_rmul(self, other) # if 'other' is not iterable then scalar multiplication. if not isinstance(other, Iterable): try: return self._eval_scalar_rmul(other) except TypeError: pass return NotImplemented @call_highest_priority('__sub__') def __rsub__(self, a): return (-self) + a @call_highest_priority('__rsub__') def __sub__(self, a): return self + (-a) class MatrixCommon(MatrixArithmetic, MatrixOperations, MatrixProperties, MatrixSpecial, MatrixShaping): """All common matrix operations including basic arithmetic, shaping, and special matrices like `zeros`, and `eye`.""" _diff_wrt = True # type: bool class _MinimalMatrix: """Class providing the minimum functionality for a matrix-like object and implementing every method required for a `MatrixRequired`. This class does not have everything needed to become a full-fledged SymPy object, but it will satisfy the requirements of anything inheriting from `MatrixRequired`. If you wish to make a specialized matrix type, make sure to implement these methods and properties with the exception of `__init__` and `__repr__` which are included for convenience.""" is_MatrixLike = True _sympify = staticmethod(sympify) _class_priority = 3 zero = S.Zero one = S.One is_Matrix = True is_MatrixExpr = False @classmethod def _new(cls, *args, **kwargs): return cls(*args, **kwargs) def __init__(self, rows, cols=None, mat=None): if isfunction(mat): # if we passed in a function, use that to populate the indices mat = list(mat(i, j) for i in range(rows) for j in range(cols)) if cols is None and mat is None: mat = rows rows, cols = getattr(mat, 'shape', (rows, cols)) try: # if we passed in a list of lists, flatten it and set the size if cols is None and mat is None: mat = rows cols = len(mat[0]) rows = len(mat) mat = [x for l in mat for x in l] except (IndexError, TypeError): pass self.mat = tuple(self._sympify(x) for x in mat) self.rows, self.cols = rows, cols if self.rows is None or self.cols is None: raise NotImplementedError("Cannot initialize matrix with given parameters") def __getitem__(self, key): def _normalize_slices(row_slice, col_slice): """Ensure that row_slice and col_slice don't have `None` in their arguments. Any integers are converted to slices of length 1""" if not isinstance(row_slice, slice): row_slice = slice(row_slice, row_slice + 1, None) row_slice = slice(*row_slice.indices(self.rows)) if not isinstance(col_slice, slice): col_slice = slice(col_slice, col_slice + 1, None) col_slice = slice(*col_slice.indices(self.cols)) return (row_slice, col_slice) def _coord_to_index(i, j): """Return the index in _mat corresponding to the (i,j) position in the matrix. """ return i * self.cols + j if isinstance(key, tuple): i, j = key if isinstance(i, slice) or isinstance(j, slice): # if the coordinates are not slices, make them so # and expand the slices so they don't contain `None` i, j = _normalize_slices(i, j) rowsList, colsList = list(range(self.rows))[i], \ list(range(self.cols))[j] indices = (i * self.cols + j for i in rowsList for j in colsList) return self._new(len(rowsList), len(colsList), list(self.mat[i] for i in indices)) # if the key is a tuple of ints, change # it to an array index key = _coord_to_index(i, j) return self.mat[key] def __eq__(self, other): try: classof(self, other) except TypeError: return False return ( self.shape == other.shape and list(self) == list(other)) def __len__(self): return self.rows*self.cols def __repr__(self): return "_MinimalMatrix({}, {}, {})".format(self.rows, self.cols, self.mat) @property def shape(self): return (self.rows, self.cols) class _CastableMatrix: # this is needed here ONLY FOR TESTS. def as_mutable(self): return self def as_immutable(self): return self class _MatrixWrapper: """Wrapper class providing the minimum functionality for a matrix-like object: .rows, .cols, .shape, indexability, and iterability. CommonMatrix math operations should work on matrix-like objects. This one is intended for matrix-like objects which use the same indexing format as SymPy with respect to returning matrix elements instead of rows for non-tuple indexes. """ is_Matrix = False # needs to be here because of __getattr__ is_MatrixLike = True def __init__(self, mat, shape): self.mat = mat self.shape = shape self.rows, self.cols = shape def __getitem__(self, key): if isinstance(key, tuple): return sympify(self.mat.__getitem__(key)) return sympify(self.mat.__getitem__((key // self.rows, key % self.cols))) def __iter__(self): # supports numpy.matrix and numpy.array mat = self.mat cols = self.cols return iter(sympify(mat[r, c]) for r in range(self.rows) for c in range(cols)) def _matrixify(mat): """If `mat` is a Matrix or is matrix-like, return a Matrix or MatrixWrapper object. Otherwise `mat` is passed through without modification.""" if getattr(mat, 'is_Matrix', False) or getattr(mat, 'is_MatrixLike', False): return mat if not(getattr(mat, 'is_Matrix', True) or getattr(mat, 'is_MatrixLike', True)): return mat shape = None if hasattr(mat, 'shape'): # numpy, scipy.sparse if len(mat.shape) == 2: shape = mat.shape elif hasattr(mat, 'rows') and hasattr(mat, 'cols'): # mpmath shape = (mat.rows, mat.cols) if shape: return _MatrixWrapper(mat, shape) return mat def a2idx(j, n=None): """Return integer after making positive and validating against n.""" if type(j) is not int: jindex = getattr(j, '__index__', None) if jindex is not None: j = jindex() else: raise IndexError("Invalid index a[%r]" % (j,)) if n is not None: if j < 0: j += n if not (j >= 0 and j < n): raise IndexError("Index out of range: a[%s]" % (j,)) return int(j) def classof(A, B): """ Get the type of the result when combining matrices of different types. Currently the strategy is that immutability is contagious. Examples ======== >>> from sympy import Matrix, ImmutableMatrix >>> from sympy.matrices.common import classof >>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix >>> IM = ImmutableMatrix([[1, 2], [3, 4]]) >>> classof(M, IM) <class 'sympy.matrices.immutable.ImmutableDenseMatrix'> """ priority_A = getattr(A, '_class_priority', None) priority_B = getattr(B, '_class_priority', None) if None not in (priority_A, priority_B): if A._class_priority > B._class_priority: return A.__class__ else: return B.__class__ try: import numpy except ImportError: pass else: if isinstance(A, numpy.ndarray): return B.__class__ if isinstance(B, numpy.ndarray): return A.__class__ raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
c7d3e329b630cdfb8399ab57add3e704089ed8323a047ef94ff97efa4f646674
import random from functools import reduce from sympy.core import SympifyError, Add from sympy.core.basic import Basic from sympy.core.compatibility import is_sequence from sympy.core.expr import Expr from sympy.core.singleton import S from sympy.core.symbol import Symbol from sympy.core.sympify import sympify, _sympify from sympy.functions.elementary.trigonometric import cos, sin from sympy.matrices.common import \ a2idx, classof, ShapeError from sympy.matrices.matrices import MatrixBase from sympy.simplify.simplify import simplify as _simplify from sympy.utilities.decorator import doctest_depends_on from sympy.utilities.misc import filldedent from .decompositions import _cholesky, _LDLdecomposition from .solvers import _lower_triangular_solve, _upper_triangular_solve def _iszero(x): """Returns True if x is zero.""" return x.is_zero def _compare_sequence(a, b): """Compares the elements of a list/tuple `a` and a list/tuple `b`. `_compare_sequence((1,2), [1, 2])` is True, whereas `(1,2) == [1, 2]` is False""" if type(a) is type(b): # if they are the same type, compare directly return a == b # there is no overhead for calling `tuple` on a # tuple return tuple(a) == tuple(b) class DenseMatrix(MatrixBase): is_MatrixExpr = False # type: bool _op_priority = 10.01 _class_priority = 4 def __eq__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented self_shape = getattr(self, 'shape', None) other_shape = getattr(other, 'shape', None) if None in (self_shape, other_shape): return False if self_shape != other_shape: return False if isinstance(other, Matrix): return _compare_sequence(self._mat, other._mat) elif isinstance(other, MatrixBase): return _compare_sequence(self._mat, Matrix(other)._mat) def __getitem__(self, key): """Return portion of self defined by key. If the key involves a slice then a list will be returned (if key is a single slice) or a matrix (if key was a tuple involving a slice). Examples ======== >>> from sympy import Matrix, I >>> m = Matrix([ ... [1, 2 + I], ... [3, 4 ]]) If the key is a tuple that doesn't involve a slice then that element is returned: >>> m[1, 0] 3 When a tuple key involves a slice, a matrix is returned. Here, the first column is selected (all rows, column 0): >>> m[:, 0] Matrix([ [1], [3]]) If the slice is not a tuple then it selects from the underlying list of elements that are arranged in row order and a list is returned if a slice is involved: >>> m[0] 1 >>> m[::2] [1, 3] """ if isinstance(key, tuple): i, j = key try: i, j = self.key2ij(key) return self._mat[i*self.cols + j] except (TypeError, IndexError): if (isinstance(i, Expr) and not i.is_number) or (isinstance(j, Expr) and not j.is_number): if ((j < 0) is True) or ((j >= self.shape[1]) is True) or\ ((i < 0) is True) or ((i >= self.shape[0]) is True): raise ValueError("index out of boundary") from sympy.matrices.expressions.matexpr import MatrixElement return MatrixElement(self, i, j) if isinstance(i, slice): i = range(self.rows)[i] elif is_sequence(i): pass else: i = [i] if isinstance(j, slice): j = range(self.cols)[j] elif is_sequence(j): pass else: j = [j] return self.extract(i, j) else: # row-wise decomposition of matrix if isinstance(key, slice): return self._mat[key] return self._mat[a2idx(key)] def __setitem__(self, key, value): raise NotImplementedError() def _eval_add(self, other): # we assume both arguments are dense matrices since # sparse matrices have a higher priority mat = [a + b for a,b in zip(self._mat, other._mat)] return classof(self, other)._new(self.rows, self.cols, mat, copy=False) def _eval_extract(self, rowsList, colsList): mat = self._mat cols = self.cols indices = (i * cols + j for i in rowsList for j in colsList) return self._new(len(rowsList), len(colsList), list(mat[i] for i in indices), copy=False) def _eval_matrix_mul(self, other): other_len = other.rows*other.cols new_len = self.rows*other.cols new_mat = [self.zero]*new_len # if we multiply an n x 0 with a 0 x m, the # expected behavior is to produce an n x m matrix of zeros if self.cols != 0 and other.rows != 0: self_cols = self.cols mat = self._mat other_mat = other._mat for i in range(new_len): row, col = i // other.cols, i % other.cols row_indices = range(self_cols*row, self_cols*(row+1)) col_indices = range(col, other_len, other.cols) vec = [mat[a]*other_mat[b] for a, b in zip(row_indices, col_indices)] try: new_mat[i] = Add(*vec) except (TypeError, SympifyError): # Some matrices don't work with `sum` or `Add` # They don't work with `sum` because `sum` tries to add `0` # Fall back to a safe way to multiply if the `Add` fails. new_mat[i] = reduce(lambda a, b: a + b, vec) return classof(self, other)._new(self.rows, other.cols, new_mat, copy=False) def _eval_matrix_mul_elementwise(self, other): mat = [a*b for a,b in zip(self._mat, other._mat)] return classof(self, other)._new(self.rows, self.cols, mat, copy=False) def _eval_inverse(self, **kwargs): return self.inv(method=kwargs.get('method', 'GE'), iszerofunc=kwargs.get('iszerofunc', _iszero), try_block_diag=kwargs.get('try_block_diag', False)) def _eval_scalar_mul(self, other): mat = [other*a for a in self._mat] return self._new(self.rows, self.cols, mat, copy=False) def _eval_scalar_rmul(self, other): mat = [a*other for a in self._mat] return self._new(self.rows, self.cols, mat, copy=False) def _eval_tolist(self): mat = list(self._mat) cols = self.cols return [mat[i*cols:(i + 1)*cols] for i in range(self.rows)] def as_immutable(self): """Returns an Immutable version of this Matrix """ from .immutable import ImmutableDenseMatrix as cls if self.rows and self.cols: return cls._new(self.tolist()) return cls._new(self.rows, self.cols, []) def as_mutable(self): """Returns a mutable version of this matrix Examples ======== >>> from sympy import ImmutableMatrix >>> X = ImmutableMatrix([[1, 2], [3, 4]]) >>> Y = X.as_mutable() >>> Y[1, 1] = 5 # Can set values in Y >>> Y Matrix([ [1, 2], [3, 5]]) """ return Matrix(self) def equals(self, other, failing_expression=False): """Applies ``equals`` to corresponding elements of the matrices, trying to prove that the elements are equivalent, returning True if they are, False if any pair is not, and None (or the first failing expression if failing_expression is True) if it cannot be decided if the expressions are equivalent or not. This is, in general, an expensive operation. Examples ======== >>> from sympy.matrices import Matrix >>> from sympy.abc import x >>> A = Matrix([x*(x - 1), 0]) >>> B = Matrix([x**2 - x, 0]) >>> A == B False >>> A.simplify() == B.simplify() True >>> A.equals(B) True >>> A.equals(2) False See Also ======== sympy.core.expr.Expr.equals """ self_shape = getattr(self, 'shape', None) other_shape = getattr(other, 'shape', None) if None in (self_shape, other_shape): return False if self_shape != other_shape: return False rv = True for i in range(self.rows): for j in range(self.cols): ans = self[i, j].equals(other[i, j], failing_expression) if ans is False: return False elif ans is not True and rv is True: rv = ans return rv def cholesky(self, hermitian=True): return _cholesky(self, hermitian=hermitian) def LDLdecomposition(self, hermitian=True): return _LDLdecomposition(self, hermitian=hermitian) def lower_triangular_solve(self, rhs): return _lower_triangular_solve(self, rhs) def upper_triangular_solve(self, rhs): return _upper_triangular_solve(self, rhs) cholesky.__doc__ = _cholesky.__doc__ LDLdecomposition.__doc__ = _LDLdecomposition.__doc__ lower_triangular_solve.__doc__ = _lower_triangular_solve.__doc__ upper_triangular_solve.__doc__ = _upper_triangular_solve.__doc__ def _force_mutable(x): """Return a matrix as a Matrix, otherwise return x.""" if getattr(x, 'is_Matrix', False): return x.as_mutable() elif isinstance(x, Basic): return x elif hasattr(x, '__array__'): a = x.__array__() if len(a.shape) == 0: return sympify(a) return Matrix(x) return x class MutableDenseMatrix(DenseMatrix, MatrixBase): __hash__ = None # type: ignore def __new__(cls, *args, **kwargs): return cls._new(*args, **kwargs) @classmethod def _new(cls, *args, copy=True, **kwargs): if copy is False: # The input was rows, cols, [list]. # It should be used directly without creating a copy. if len(args) != 3: raise TypeError("'copy=False' requires a matrix be initialized as rows,cols,[list]") rows, cols, flat_list = args else: rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs) flat_list = list(flat_list) # create a shallow copy self = object.__new__(cls) self.rows = rows self.cols = cols self._mat = flat_list return self def __setitem__(self, key, value): """ Examples ======== >>> from sympy import Matrix, I, zeros, ones >>> m = Matrix(((1, 2+I), (3, 4))) >>> m Matrix([ [1, 2 + I], [3, 4]]) >>> m[1, 0] = 9 >>> m Matrix([ [1, 2 + I], [9, 4]]) >>> m[1, 0] = [[0, 1]] To replace row r you assign to position r*m where m is the number of columns: >>> M = zeros(4) >>> m = M.cols >>> M[3*m] = ones(1, m)*2; M Matrix([ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [2, 2, 2, 2]]) And to replace column c you can assign to position c: >>> M[2] = ones(m, 1)*4; M Matrix([ [0, 0, 4, 0], [0, 0, 4, 0], [0, 0, 4, 0], [2, 2, 4, 2]]) """ rv = self._setitem(key, value) if rv is not None: i, j, value = rv self._mat[i*self.cols + j] = value def as_mutable(self): return self.copy() def _eval_col_del(self, col): for j in range(self.rows-1, -1, -1): del self._mat[col + j*self.cols] self.cols -= 1 def _eval_row_del(self, row): del self._mat[row*self.cols: (row+1)*self.cols] self.rows -= 1 def col_op(self, j, f): """In-place operation on col j using two-arg functor whose args are interpreted as (self[i, j], i). Examples ======== >>> from sympy.matrices import eye >>> M = eye(3) >>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M Matrix([ [1, 2, 0], [0, 1, 0], [0, 0, 1]]) See Also ======== col row_op """ self._mat[j::self.cols] = [f(*t) for t in list(zip(self._mat[j::self.cols], list(range(self.rows))))] def col_swap(self, i, j): """Swap the two given columns of the matrix in-place. Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix([[1, 0], [1, 0]]) >>> M Matrix([ [1, 0], [1, 0]]) >>> M.col_swap(0, 1) >>> M Matrix([ [0, 1], [0, 1]]) See Also ======== col row_swap """ for k in range(0, self.rows): self[k, i], self[k, j] = self[k, j], self[k, i] def copyin_list(self, key, value): """Copy in elements from a list. Parameters ========== key : slice The section of this matrix to replace. value : iterable The iterable to copy values from. Examples ======== >>> from sympy.matrices import eye >>> I = eye(3) >>> I[:2, 0] = [1, 2] # col >>> I Matrix([ [1, 0, 0], [2, 1, 0], [0, 0, 1]]) >>> I[1, :2] = [[3, 4]] >>> I Matrix([ [1, 0, 0], [3, 4, 0], [0, 0, 1]]) See Also ======== copyin_matrix """ if not is_sequence(value): raise TypeError("`value` must be an ordered iterable, not %s." % type(value)) return self.copyin_matrix(key, Matrix(value)) def copyin_matrix(self, key, value): """Copy in values from a matrix into the given bounds. Parameters ========== key : slice The section of this matrix to replace. value : Matrix The matrix to copy values from. Examples ======== >>> from sympy.matrices import Matrix, eye >>> M = Matrix([[0, 1], [2, 3], [4, 5]]) >>> I = eye(3) >>> I[:3, :2] = M >>> I Matrix([ [0, 1, 0], [2, 3, 0], [4, 5, 1]]) >>> I[0, 1] = M >>> I Matrix([ [0, 0, 1], [2, 2, 3], [4, 4, 5]]) See Also ======== copyin_list """ rlo, rhi, clo, chi = self.key2bounds(key) shape = value.shape dr, dc = rhi - rlo, chi - clo if shape != (dr, dc): raise ShapeError(filldedent("The Matrix `value` doesn't have the " "same dimensions " "as the in sub-Matrix given by `key`.")) for i in range(value.rows): for j in range(value.cols): self[i + rlo, j + clo] = value[i, j] def fill(self, value): """Fill the matrix with the scalar value. See Also ======== zeros ones """ self._mat = [value]*len(self) def row_op(self, i, f): """In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], j)``. Examples ======== >>> from sympy.matrices import eye >>> M = eye(3) >>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M Matrix([ [1, 0, 0], [2, 1, 0], [0, 0, 1]]) See Also ======== row zip_row_op col_op """ i0 = i*self.cols ri = self._mat[i0: i0 + self.cols] self._mat[i0: i0 + self.cols] = [f(x, j) for x, j in zip(ri, list(range(self.cols)))] def row_swap(self, i, j): """Swap the two given rows of the matrix in-place. Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix([[0, 1], [1, 0]]) >>> M Matrix([ [0, 1], [1, 0]]) >>> M.row_swap(0, 1) >>> M Matrix([ [1, 0], [0, 1]]) See Also ======== row col_swap """ for k in range(0, self.cols): self[i, k], self[j, k] = self[j, k], self[i, k] def simplify(self, **kwargs): """Applies simplify to the elements of a matrix in place. This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure)) See Also ======== sympy.simplify.simplify.simplify """ for i in range(len(self._mat)): self._mat[i] = _simplify(self._mat[i], **kwargs) def zip_row_op(self, i, k, f): """In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], self[k, j])``. Examples ======== >>> from sympy.matrices import eye >>> M = eye(3) >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M Matrix([ [1, 0, 0], [2, 1, 0], [0, 0, 1]]) See Also ======== row row_op col_op """ i0 = i*self.cols k0 = k*self.cols ri = self._mat[i0: i0 + self.cols] rk = self._mat[k0: k0 + self.cols] self._mat[i0: i0 + self.cols] = [f(x, y) for x, y in zip(ri, rk)] is_zero = False MutableMatrix = Matrix = MutableDenseMatrix ########### # Numpy Utility Functions: # list2numpy, matrix2numpy, symmarray, rot_axis[123] ########### def list2numpy(l, dtype=object): # pragma: no cover """Converts python list of SymPy expressions to a NumPy array. See Also ======== matrix2numpy """ from numpy import empty a = empty(len(l), dtype) for i, s in enumerate(l): a[i] = s return a def matrix2numpy(m, dtype=object): # pragma: no cover """Converts SymPy's matrix to a NumPy array. See Also ======== list2numpy """ from numpy import empty a = empty(m.shape, dtype) for i in range(m.rows): for j in range(m.cols): a[i, j] = m[i, j] return a def rot_axis3(theta): """Returns a rotation matrix for a rotation of theta (in radians) about the 3-axis. Examples ======== >>> from sympy import pi >>> from sympy.matrices import rot_axis3 A rotation of pi/3 (60 degrees): >>> theta = pi/3 >>> rot_axis3(theta) Matrix([ [ 1/2, sqrt(3)/2, 0], [-sqrt(3)/2, 1/2, 0], [ 0, 0, 1]]) If we rotate by pi/2 (90 degrees): >>> rot_axis3(pi/2) Matrix([ [ 0, 1, 0], [-1, 0, 0], [ 0, 0, 1]]) See Also ======== rot_axis1: Returns a rotation matrix for a rotation of theta (in radians) about the 1-axis rot_axis2: Returns a rotation matrix for a rotation of theta (in radians) about the 2-axis """ ct = cos(theta) st = sin(theta) lil = ((ct, st, 0), (-st, ct, 0), (0, 0, 1)) return Matrix(lil) def rot_axis2(theta): """Returns a rotation matrix for a rotation of theta (in radians) about the 2-axis. Examples ======== >>> from sympy import pi >>> from sympy.matrices import rot_axis2 A rotation of pi/3 (60 degrees): >>> theta = pi/3 >>> rot_axis2(theta) Matrix([ [ 1/2, 0, -sqrt(3)/2], [ 0, 1, 0], [sqrt(3)/2, 0, 1/2]]) If we rotate by pi/2 (90 degrees): >>> rot_axis2(pi/2) Matrix([ [0, 0, -1], [0, 1, 0], [1, 0, 0]]) See Also ======== rot_axis1: Returns a rotation matrix for a rotation of theta (in radians) about the 1-axis rot_axis3: Returns a rotation matrix for a rotation of theta (in radians) about the 3-axis """ ct = cos(theta) st = sin(theta) lil = ((ct, 0, -st), (0, 1, 0), (st, 0, ct)) return Matrix(lil) def rot_axis1(theta): """Returns a rotation matrix for a rotation of theta (in radians) about the 1-axis. Examples ======== >>> from sympy import pi >>> from sympy.matrices import rot_axis1 A rotation of pi/3 (60 degrees): >>> theta = pi/3 >>> rot_axis1(theta) Matrix([ [1, 0, 0], [0, 1/2, sqrt(3)/2], [0, -sqrt(3)/2, 1/2]]) If we rotate by pi/2 (90 degrees): >>> rot_axis1(pi/2) Matrix([ [1, 0, 0], [0, 0, 1], [0, -1, 0]]) See Also ======== rot_axis2: Returns a rotation matrix for a rotation of theta (in radians) about the 2-axis rot_axis3: Returns a rotation matrix for a rotation of theta (in radians) about the 3-axis """ ct = cos(theta) st = sin(theta) lil = ((1, 0, 0), (0, ct, st), (0, -st, ct)) return Matrix(lil) @doctest_depends_on(modules=('numpy',)) def symarray(prefix, shape, **kwargs): # pragma: no cover r"""Create a numpy ndarray of symbols (as an object array). The created symbols are named ``prefix_i1_i2_``... You should thus provide a non-empty prefix if you want your symbols to be unique for different output arrays, as SymPy symbols with identical names are the same object. Parameters ---------- prefix : string A prefix prepended to the name of every symbol. shape : int or tuple Shape of the created array. If an int, the array is one-dimensional; for more than one dimension the shape must be a tuple. \*\*kwargs : dict keyword arguments passed on to Symbol Examples ======== These doctests require numpy. >>> from sympy import symarray >>> symarray('', 3) [_0 _1 _2] If you want multiple symarrays to contain distinct symbols, you *must* provide unique prefixes: >>> a = symarray('', 3) >>> b = symarray('', 3) >>> a[0] == b[0] True >>> a = symarray('a', 3) >>> b = symarray('b', 3) >>> a[0] == b[0] False Creating symarrays with a prefix: >>> symarray('a', 3) [a_0 a_1 a_2] For more than one dimension, the shape must be given as a tuple: >>> symarray('a', (2, 3)) [[a_0_0 a_0_1 a_0_2] [a_1_0 a_1_1 a_1_2]] >>> symarray('a', (2, 3, 2)) [[[a_0_0_0 a_0_0_1] [a_0_1_0 a_0_1_1] [a_0_2_0 a_0_2_1]] <BLANKLINE> [[a_1_0_0 a_1_0_1] [a_1_1_0 a_1_1_1] [a_1_2_0 a_1_2_1]]] For setting assumptions of the underlying Symbols: >>> [s.is_real for s in symarray('a', 2, real=True)] [True, True] """ from numpy import empty, ndindex arr = empty(shape, dtype=object) for index in ndindex(shape): arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))), **kwargs) return arr ############### # Functions ############### def casoratian(seqs, n, zero=True): """Given linear difference operator L of order 'k' and homogeneous equation Ly = 0 we want to compute kernel of L, which is a set of 'k' sequences: a(n), b(n), ... z(n). Solutions of L are linearly independent iff their Casoratian, denoted as C(a, b, ..., z), do not vanish for n = 0. Casoratian is defined by k x k determinant:: + a(n) b(n) . . . z(n) + | a(n+1) b(n+1) . . . z(n+1) | | . . . . | | . . . . | | . . . . | + a(n+k-1) b(n+k-1) . . . z(n+k-1) + It proves very useful in rsolve_hyper() where it is applied to a generating set of a recurrence to factor out linearly dependent solutions and return a basis: >>> from sympy import Symbol, casoratian, factorial >>> n = Symbol('n', integer=True) Exponential and factorial are linearly independent: >>> casoratian([2**n, factorial(n)], n) != 0 True """ seqs = list(map(sympify, seqs)) if not zero: f = lambda i, j: seqs[j].subs(n, n + i) else: f = lambda i, j: seqs[j].subs(n, i) k = len(seqs) return Matrix(k, k, f).det() def eye(*args, **kwargs): """Create square identity matrix n x n See Also ======== diag zeros ones """ return Matrix.eye(*args, **kwargs) def diag(*values, strict=True, unpack=False, **kwargs): """Returns a matrix with the provided values placed on the diagonal. If non-square matrices are included, they will produce a block-diagonal matrix. Examples ======== This version of diag is a thin wrapper to Matrix.diag that differs in that it treats all lists like matrices -- even when a single list is given. If this is not desired, either put a `*` before the list or set `unpack=True`. >>> from sympy import diag >>> diag([1, 2, 3], unpack=True) # = diag(1,2,3) or diag(*[1,2,3]) Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> diag([1, 2, 3]) # a column vector Matrix([ [1], [2], [3]]) See Also ======== .common.MatrixCommon.eye .common.MatrixCommon.diagonal - to extract a diagonal .common.MatrixCommon.diag .expressions.blockmatrix.BlockMatrix """ return Matrix.diag(*values, strict=strict, unpack=unpack, **kwargs) def GramSchmidt(vlist, orthonormal=False): """Apply the Gram-Schmidt process to a set of vectors. Parameters ========== vlist : List of Matrix Vectors to be orthogonalized for. orthonormal : Bool, optional If true, return an orthonormal basis. Returns ======= vlist : List of Matrix Orthogonalized vectors Notes ===== This routine is mostly duplicate from ``Matrix.orthogonalize``, except for some difference that this always raises error when linearly dependent vectors are found, and the keyword ``normalize`` has been named as ``orthonormal`` in this function. See Also ======== .matrices.MatrixSubspaces.orthogonalize References ========== .. [1] https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process """ return MutableDenseMatrix.orthogonalize( *vlist, normalize=orthonormal, rankcheck=True ) def hessian(f, varlist, constraints=[]): """Compute Hessian matrix for a function f wrt parameters in varlist which may be given as a sequence or a row/column vector. A list of constraints may optionally be given. Examples ======== >>> from sympy import Function, hessian, pprint >>> from sympy.abc import x, y >>> f = Function('f')(x, y) >>> g1 = Function('g')(x, y) >>> g2 = x**2 + 3*y >>> pprint(hessian(f, (x, y), [g1, g2])) [ d d ] [ 0 0 --(g(x, y)) --(g(x, y)) ] [ dx dy ] [ ] [ 0 0 2*x 3 ] [ ] [ 2 2 ] [d d d ] [--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))] [dx 2 dy dx ] [ dx ] [ ] [ 2 2 ] [d d d ] [--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ] [dy dy dx 2 ] [ dy ] References ========== https://en.wikipedia.org/wiki/Hessian_matrix See Also ======== sympy.matrices.matrices.MatrixCalculus.jacobian wronskian """ # f is the expression representing a function f, return regular matrix if isinstance(varlist, MatrixBase): if 1 not in varlist.shape: raise ShapeError("`varlist` must be a column or row vector.") if varlist.cols == 1: varlist = varlist.T varlist = varlist.tolist()[0] if is_sequence(varlist): n = len(varlist) if not n: raise ShapeError("`len(varlist)` must not be zero.") else: raise ValueError("Improper variable list in hessian function") if not getattr(f, 'diff'): # check differentiability raise ValueError("Function `f` (%s) is not differentiable" % f) m = len(constraints) N = m + n out = zeros(N) for k, g in enumerate(constraints): if not getattr(g, 'diff'): # check differentiability raise ValueError("Function `f` (%s) is not differentiable" % f) for i in range(n): out[k, i + m] = g.diff(varlist[i]) for i in range(n): for j in range(i, n): out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j]) for i in range(N): for j in range(i + 1, N): out[j, i] = out[i, j] return out def jordan_cell(eigenval, n): """ Create a Jordan block: Examples ======== >>> from sympy.matrices import jordan_cell >>> from sympy.abc import x >>> jordan_cell(x, 4) Matrix([ [x, 1, 0, 0], [0, x, 1, 0], [0, 0, x, 1], [0, 0, 0, x]]) """ return Matrix.jordan_block(size=n, eigenvalue=eigenval) def matrix_multiply_elementwise(A, B): """Return the Hadamard product (elementwise product) of A and B >>> from sympy.matrices import matrix_multiply_elementwise >>> from sympy.matrices import Matrix >>> A = Matrix([[0, 1, 2], [3, 4, 5]]) >>> B = Matrix([[1, 10, 100], [100, 10, 1]]) >>> matrix_multiply_elementwise(A, B) Matrix([ [ 0, 10, 200], [300, 40, 5]]) See Also ======== sympy.matrices.common.MatrixCommon.__mul__ """ return A.multiply_elementwise(B) def ones(*args, **kwargs): """Returns a matrix of ones with ``rows`` rows and ``cols`` columns; if ``cols`` is omitted a square matrix will be returned. See Also ======== zeros eye diag """ if 'c' in kwargs: kwargs['cols'] = kwargs.pop('c') return Matrix.ones(*args, **kwargs) def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False, percent=100, prng=None): """Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted the matrix will be square. If ``symmetric`` is True the matrix must be square. If ``percent`` is less than 100 then only approximately the given percentage of elements will be non-zero. The pseudo-random number generator used to generate matrix is chosen in the following way. * If ``prng`` is supplied, it will be used as random number generator. It should be an instance of ``random.Random``, or at least have ``randint`` and ``shuffle`` methods with same signatures. * if ``prng`` is not supplied but ``seed`` is supplied, then new ``random.Random`` with given ``seed`` will be created; * otherwise, a new ``random.Random`` with default seed will be used. Examples ======== >>> from sympy.matrices import randMatrix >>> randMatrix(3) # doctest:+SKIP [25, 45, 27] [44, 54, 9] [23, 96, 46] >>> randMatrix(3, 2) # doctest:+SKIP [87, 29] [23, 37] [90, 26] >>> randMatrix(3, 3, 0, 2) # doctest:+SKIP [0, 2, 0] [2, 0, 1] [0, 0, 1] >>> randMatrix(3, symmetric=True) # doctest:+SKIP [85, 26, 29] [26, 71, 43] [29, 43, 57] >>> A = randMatrix(3, seed=1) >>> B = randMatrix(3, seed=2) >>> A == B False >>> A == randMatrix(3, seed=1) True >>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP [77, 70, 0], [70, 0, 0], [ 0, 0, 88] """ if c is None: c = r # Note that ``Random()`` is equivalent to ``Random(None)`` prng = prng or random.Random(seed) if not symmetric: m = Matrix._new(r, c, lambda i, j: prng.randint(min, max)) if percent == 100: return m z = int(r*c*(100 - percent) // 100) m._mat[:z] = [S.Zero]*z prng.shuffle(m._mat) return m # Symmetric case if r != c: raise ValueError('For symmetric matrices, r must equal c, but %i != %i' % (r, c)) m = zeros(r) ij = [(i, j) for i in range(r) for j in range(i, r)] if percent != 100: ij = prng.sample(ij, int(len(ij)*percent // 100)) for i, j in ij: value = prng.randint(min, max) m[i, j] = m[j, i] = value return m def wronskian(functions, var, method='bareiss'): """ Compute Wronskian for [] of functions :: | f1 f2 ... fn | | f1' f2' ... fn' | | . . . . | W(f1, ..., fn) = | . . . . | | . . . . | | (n) (n) (n) | | D (f1) D (f2) ... D (fn) | see: https://en.wikipedia.org/wiki/Wronskian See Also ======== sympy.matrices.matrices.MatrixCalculus.jacobian hessian """ for index in range(0, len(functions)): functions[index] = sympify(functions[index]) n = len(functions) if n == 0: return 1 W = Matrix(n, n, lambda i, j: functions[i].diff(var, j)) return W.det(method) def zeros(*args, **kwargs): """Returns a matrix of zeros with ``rows`` rows and ``cols`` columns; if ``cols`` is omitted a square matrix will be returned. See Also ======== ones eye diag """ if 'c' in kwargs: kwargs['cols'] = kwargs.pop('c') return Matrix.zeros(*args, **kwargs)
23233607f1ae1c0a57ac8e177832c3262413fc7e32414e46e6202d72aed1048b
from collections import defaultdict from collections.abc import Callable from functools import reduce from sympy.core import SympifyError, Add from sympy.core.compatibility import as_int, is_sequence from sympy.core.containers import Dict from sympy.core.expr import Expr from sympy.core.singleton import S from sympy.core.sympify import _sympify from sympy.functions import Abs from sympy.utilities.iterables import uniq from .common import a2idx from .dense import Matrix from .matrices import MatrixBase, ShapeError from .utilities import _iszero from .decompositions import ( _liupc, _row_structure_symbolic_cholesky, _cholesky_sparse, _LDLdecomposition_sparse) from .solvers import ( _lower_triangular_solve_sparse, _upper_triangular_solve_sparse) class SparseMatrix(MatrixBase): """ A sparse matrix (a matrix with a large number of zero elements). Examples ======== >>> from sympy.matrices import SparseMatrix, ones >>> SparseMatrix(2, 2, range(4)) Matrix([ [0, 1], [2, 3]]) >>> SparseMatrix(2, 2, {(1, 1): 2}) Matrix([ [0, 0], [0, 2]]) A SparseMatrix can be instantiated from a ragged list of lists: >>> SparseMatrix([[1, 2, 3], [1, 2], [1]]) Matrix([ [1, 2, 3], [1, 2, 0], [1, 0, 0]]) For safety, one may include the expected size and then an error will be raised if the indices of any element are out of range or (for a flat list) if the total number of elements does not match the expected shape: >>> SparseMatrix(2, 2, [1, 2]) Traceback (most recent call last): ... ValueError: List length (2) != rows*columns (4) Here, an error is not raised because the list is not flat and no element is out of range: >>> SparseMatrix(2, 2, [[1, 2]]) Matrix([ [1, 2], [0, 0]]) But adding another element to the first (and only) row will cause an error to be raised: >>> SparseMatrix(2, 2, [[1, 2, 3]]) Traceback (most recent call last): ... ValueError: The location (0, 2) is out of designated range: (1, 1) To autosize the matrix, pass None for rows: >>> SparseMatrix(None, [[1, 2, 3]]) Matrix([[1, 2, 3]]) >>> SparseMatrix(None, {(1, 1): 1, (3, 3): 3}) Matrix([ [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 3]]) Values that are themselves a Matrix are automatically expanded: >>> SparseMatrix(4, 4, {(1, 1): ones(2)}) Matrix([ [0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) A ValueError is raised if the expanding matrix tries to overwrite a different element already present: >>> SparseMatrix(3, 3, {(0, 0): ones(2), (1, 1): 2}) Traceback (most recent call last): ... ValueError: collision at (1, 1) See Also ======== DenseMatrix MutableSparseMatrix ImmutableSparseMatrix """ @classmethod def _handle_creation_inputs(cls, *args, **kwargs): if len(args) == 1 and isinstance(args[0], MatrixBase): rows = args[0].rows cols = args[0].cols smat = args[0].todok() return rows, cols, smat smat = {} # autosizing if len(args) == 2 and args[0] is None: args = [None, None, args[1]] if len(args) == 3: r, c = args[:2] if r is c is None: rows = cols = None elif None in (r, c): raise ValueError( 'Pass rows=None and no cols for autosizing.') else: rows, cols = as_int(args[0]), as_int(args[1]) if isinstance(args[2], Callable): op = args[2] if None in (rows, cols): raise ValueError( "{} and {} must be integers for this " "specification.".format(rows, cols)) row_indices = [cls._sympify(i) for i in range(rows)] col_indices = [cls._sympify(j) for j in range(cols)] for i in row_indices: for j in col_indices: value = cls._sympify(op(i, j)) if value != cls.zero: smat[i, j] = value return rows, cols, smat elif isinstance(args[2], (dict, Dict)): def update(i, j, v): # update self._smat and make sure there are # no collisions if v: if (i, j) in smat and v != smat[i, j]: raise ValueError( "There is a collision at {} for {} and {}." .format((i, j), v, smat[i, j]) ) smat[i, j] = v # manual copy, copy.deepcopy() doesn't work for (r, c), v in args[2].items(): if isinstance(v, MatrixBase): for (i, j), vv in v.todok().items(): update(r + i, c + j, vv) elif isinstance(v, (list, tuple)): _, _, smat = cls._handle_creation_inputs(v, **kwargs) for i, j in smat: update(r + i, c + j, smat[i, j]) else: v = cls._sympify(v) update(r, c, cls._sympify(v)) elif is_sequence(args[2]): flat = not any(is_sequence(i) for i in args[2]) if not flat: _, _, smat = \ cls._handle_creation_inputs(args[2], **kwargs) else: flat_list = args[2] if len(flat_list) != rows * cols: raise ValueError( "The length of the flat list ({}) does not " "match the specified size ({} * {})." .format(len(flat_list), rows, cols) ) for i in range(rows): for j in range(cols): value = flat_list[i*cols + j] value = cls._sympify(value) if value != cls.zero: smat[i, j] = value if rows is None: # autosizing keys = smat.keys() rows = max([r for r, _ in keys]) + 1 if keys else 0 cols = max([c for _, c in keys]) + 1 if keys else 0 else: for i, j in smat.keys(): if i and i >= rows or j and j >= cols: raise ValueError( "The location {} is out of the designated range" "[{}, {}]x[{}, {}]" .format((i, j), 0, rows - 1, 0, cols - 1) ) return rows, cols, smat elif len(args) == 1 and isinstance(args[0], (list, tuple)): # list of values or lists v = args[0] c = 0 for i, row in enumerate(v): if not isinstance(row, (list, tuple)): row = [row] for j, vv in enumerate(row): if vv != cls.zero: smat[i, j] = cls._sympify(vv) c = max(c, len(row)) rows = len(v) if c else 0 cols = c return rows, cols, smat else: # handle full matrix forms with _handle_creation_inputs rows, cols, mat = super()._handle_creation_inputs(*args) for i in range(rows): for j in range(cols): value = mat[cols*i + j] if value != cls.zero: smat[i, j] = value return rows, cols, smat def __eq__(self, other): try: other = _sympify(other) except SympifyError: return NotImplemented self_shape = getattr(self, 'shape', None) other_shape = getattr(other, 'shape', None) if None in (self_shape, other_shape): return False if self_shape != other_shape: return False if isinstance(other, SparseMatrix): return self._smat == other._smat elif isinstance(other, MatrixBase): return self._smat == MutableSparseMatrix(other)._smat def __getitem__(self, key): if isinstance(key, tuple): i, j = key try: i, j = self.key2ij(key) return self._smat.get((i, j), S.Zero) except (TypeError, IndexError): if isinstance(i, slice): i = range(self.rows)[i] elif is_sequence(i): pass elif isinstance(i, Expr) and not i.is_number: from sympy.matrices.expressions.matexpr import MatrixElement return MatrixElement(self, i, j) else: if i >= self.rows: raise IndexError('Row index out of bounds') i = [i] if isinstance(j, slice): j = range(self.cols)[j] elif is_sequence(j): pass elif isinstance(j, Expr) and not j.is_number: from sympy.matrices.expressions.matexpr import MatrixElement return MatrixElement(self, i, j) else: if j >= self.cols: raise IndexError('Col index out of bounds') j = [j] return self.extract(i, j) # check for single arg, like M[:] or M[3] if isinstance(key, slice): lo, hi = key.indices(len(self))[:2] L = [] for i in range(lo, hi): m, n = divmod(i, self.cols) L.append(self._smat.get((m, n), S.Zero)) return L i, j = divmod(a2idx(key, len(self)), self.cols) return self._smat.get((i, j), S.Zero) def __setitem__(self, key, value): raise NotImplementedError() def _eval_inverse(self, **kwargs): return self.inv(method=kwargs.get('method', 'LDL'), iszerofunc=kwargs.get('iszerofunc', _iszero), try_block_diag=kwargs.get('try_block_diag', False)) def _eval_Abs(self): return self.applyfunc(lambda x: Abs(x)) def _eval_add(self, other): """If `other` is a SparseMatrix, add efficiently. Otherwise, do standard addition.""" if not isinstance(other, SparseMatrix): return self + self._new(other) smat = {} zero = self._sympify(0) for key in set().union(self._smat.keys(), other._smat.keys()): sum = self._smat.get(key, zero) + other._smat.get(key, zero) if sum != 0: smat[key] = sum return self._new(self.rows, self.cols, smat) def _eval_col_insert(self, icol, other): if not isinstance(other, SparseMatrix): other = MutableSparseMatrix(other) new_smat = {} # make room for the new rows for key, val in self._smat.items(): row, col = key if col >= icol: col += other.cols new_smat[row, col] = val # add other's keys for key, val in other._smat.items(): row, col = key new_smat[row, col + icol] = val return self._new(self.rows, self.cols + other.cols, new_smat) def _eval_conjugate(self): smat = {key: val.conjugate() for key,val in self._smat.items()} return self._new(self.rows, self.cols, smat) def _eval_extract(self, rowsList, colsList): urow = list(uniq(rowsList)) ucol = list(uniq(colsList)) smat = {} if len(urow)*len(ucol) < len(self._smat): # there are fewer elements requested than there are elements in the matrix for i, r in enumerate(urow): for j, c in enumerate(ucol): smat[i, j] = self._smat.get((r, c), 0) else: # most of the request will be zeros so check all of self's entries, # keeping only the ones that are desired for rk, ck in self._smat: if rk in urow and ck in ucol: smat[urow.index(rk), ucol.index(ck)] = self._smat[rk, ck] rv = self._new(len(urow), len(ucol), smat) # rv is nominally correct but there might be rows/cols # which require duplication if len(rowsList) != len(urow): for i, r in enumerate(rowsList): i_previous = rowsList.index(r) if i_previous != i: rv = rv.row_insert(i, rv.row(i_previous)) if len(colsList) != len(ucol): for i, c in enumerate(colsList): i_previous = colsList.index(c) if i_previous != i: rv = rv.col_insert(i, rv.col(i_previous)) return rv @classmethod def _eval_eye(cls, rows, cols): entries = {(i,i): S.One for i in range(min(rows, cols))} return cls._new(rows, cols, entries) def _eval_has(self, *patterns): # if the matrix has any zeros, see if S.Zero # has the pattern. If _smat is full length, # the matrix has no zeros. zhas = S.Zero.has(*patterns) if len(self._smat) == self.rows*self.cols: zhas = False return any(self[key].has(*patterns) for key in self._smat) or zhas def _eval_is_Identity(self): if not all(self[i, i] == 1 for i in range(self.rows)): return False return len(self._smat) == self.rows def _eval_is_symmetric(self, simpfunc): diff = (self - self.T).applyfunc(simpfunc) return len(diff.values()) == 0 def _eval_matrix_mul(self, other): """Fast multiplication exploiting the sparsity of the matrix.""" if not isinstance(other, SparseMatrix): other = self._new(other) # if we made it here, we're both sparse matrices # create quick lookups for rows and cols row_lookup = defaultdict(dict) for (i,j), val in self._smat.items(): row_lookup[i][j] = val col_lookup = defaultdict(dict) for (i,j), val in other._smat.items(): col_lookup[j][i] = val smat = {} for row in row_lookup.keys(): for col in col_lookup.keys(): # find the common indices of non-zero entries. # these are the only things that need to be multiplied. indices = set(col_lookup[col].keys()) & set(row_lookup[row].keys()) if indices: vec = [row_lookup[row][k]*col_lookup[col][k] for k in indices] try: smat[row, col] = Add(*vec) except (TypeError, SympifyError): # Some matrices don't work with `sum` or `Add` # They don't work with `sum` because `sum` tries to add `0` # Fall back to a safe way to multiply if the `Add` fails. smat[row, col] = reduce(lambda a, b: a + b, vec) return self._new(self.rows, other.cols, smat) def _eval_row_insert(self, irow, other): if not isinstance(other, SparseMatrix): other = MutableSparseMatrix(other) new_smat = {} # make room for the new rows for key, val in self._smat.items(): row, col = key if row >= irow: row += other.rows new_smat[row, col] = val # add other's keys for key, val in other._smat.items(): row, col = key new_smat[row + irow, col] = val return self._new(self.rows + other.rows, self.cols, new_smat) def _eval_scalar_mul(self, other): return self.applyfunc(lambda x: x*other) def _eval_scalar_rmul(self, other): return self.applyfunc(lambda x: other*x) def _eval_todok(self): return self._smat.copy() def _eval_transpose(self): """Returns the transposed SparseMatrix of this SparseMatrix. Examples ======== >>> from sympy.matrices import SparseMatrix >>> a = SparseMatrix(((1, 2), (3, 4))) >>> a Matrix([ [1, 2], [3, 4]]) >>> a.T Matrix([ [1, 3], [2, 4]]) """ smat = {(j,i): val for (i,j),val in self._smat.items()} return self._new(self.cols, self.rows, smat) def _eval_values(self): return [v for k,v in self._smat.items() if not v.is_zero] @classmethod def _eval_zeros(cls, rows, cols): return cls._new(rows, cols, {}) @property def _mat(self): """Return a list of matrix elements. Some routines in DenseMatrix use `_mat` directly to speed up operations.""" return list(self) def applyfunc(self, f): """Apply a function to each element of the matrix. Examples ======== >>> from sympy.matrices import SparseMatrix >>> m = SparseMatrix(2, 2, lambda i, j: i*2+j) >>> m Matrix([ [0, 1], [2, 3]]) >>> m.applyfunc(lambda i: 2*i) Matrix([ [0, 2], [4, 6]]) """ if not callable(f): raise TypeError("`f` must be callable.") out = self.copy() for k, v in self._smat.items(): fv = f(v) if fv: out._smat[k] = fv else: out._smat.pop(k, None) return out def as_immutable(self): """Returns an Immutable version of this Matrix.""" from .immutable import ImmutableSparseMatrix return ImmutableSparseMatrix(self) def as_mutable(self): """Returns a mutable version of this matrix. Examples ======== >>> from sympy import ImmutableMatrix >>> X = ImmutableMatrix([[1, 2], [3, 4]]) >>> Y = X.as_mutable() >>> Y[1, 1] = 5 # Can set values in Y >>> Y Matrix([ [1, 2], [3, 5]]) """ return MutableSparseMatrix(self) def col_list(self): """Returns a column-sorted list of non-zero elements of the matrix. Examples ======== >>> from sympy.matrices import SparseMatrix >>> a=SparseMatrix(((1, 2), (3, 4))) >>> a Matrix([ [1, 2], [3, 4]]) >>> a.CL [(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)] See Also ======== sympy.matrices.sparse.MutableSparseMatrix.col_op sympy.matrices.sparse.SparseMatrix.row_list """ return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))] def copy(self): return self._new(self.rows, self.cols, self._smat) def nnz(self): """Returns the number of non-zero elements in Matrix.""" return len(self._smat) def row_list(self): """Returns a row-sorted list of non-zero elements of the matrix. Examples ======== >>> from sympy.matrices import SparseMatrix >>> a = SparseMatrix(((1, 2), (3, 4))) >>> a Matrix([ [1, 2], [3, 4]]) >>> a.RL [(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)] See Also ======== sympy.matrices.sparse.MutableSparseMatrix.row_op sympy.matrices.sparse.SparseMatrix.col_list """ return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(k))] def scalar_multiply(self, scalar): "Scalar element-wise multiplication" M = self.zeros(*self.shape) if scalar: for i in self._smat: v = scalar*self._smat[i] if v: M._smat[i] = v else: M._smat.pop(i, None) return M def solve_least_squares(self, rhs, method='LDL'): """Return the least-square fit to the data. By default the cholesky_solve routine is used (method='CH'); other methods of matrix inversion can be used. To find out which are available, see the docstring of the .inv() method. Examples ======== >>> from sympy.matrices import SparseMatrix, Matrix, ones >>> A = Matrix([1, 2, 3]) >>> B = Matrix([2, 3, 4]) >>> S = SparseMatrix(A.row_join(B)) >>> S Matrix([ [1, 2], [2, 3], [3, 4]]) If each line of S represent coefficients of Ax + By and x and y are [2, 3] then S*xy is: >>> r = S*Matrix([2, 3]); r Matrix([ [ 8], [13], [18]]) But let's add 1 to the middle value and then solve for the least-squares value of xy: >>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy Matrix([ [ 5/3], [10/3]]) The error is given by S*xy - r: >>> S*xy - r Matrix([ [1/3], [1/3], [1/3]]) >>> _.norm().n(2) 0.58 If a different xy is used, the norm will be higher: >>> xy += ones(2, 1)/10 >>> (S*xy - r).norm().n(2) 1.5 """ t = self.T return (t*self).inv(method=method)*t*rhs def solve(self, rhs, method='LDL'): """Return solution to self*soln = rhs using given inversion method. For a list of possible inversion methods, see the .inv() docstring. """ if not self.is_square: if self.rows < self.cols: raise ValueError('Under-determined system.') elif self.rows > self.cols: raise ValueError('For over-determined system, M, having ' 'more rows than columns, try M.solve_least_squares(rhs).') else: return self.inv(method=method).multiply(rhs) RL = property(row_list, None, None, "Alternate faster representation") CL = property(col_list, None, None, "Alternate faster representation") def liupc(self): return _liupc(self) def row_structure_symbolic_cholesky(self): return _row_structure_symbolic_cholesky(self) def cholesky(self, hermitian=True): return _cholesky_sparse(self, hermitian=hermitian) def LDLdecomposition(self, hermitian=True): return _LDLdecomposition_sparse(self, hermitian=hermitian) def lower_triangular_solve(self, rhs): return _lower_triangular_solve_sparse(self, rhs) def upper_triangular_solve(self, rhs): return _upper_triangular_solve_sparse(self, rhs) liupc.__doc__ = _liupc.__doc__ row_structure_symbolic_cholesky.__doc__ = _row_structure_symbolic_cholesky.__doc__ cholesky.__doc__ = _cholesky_sparse.__doc__ LDLdecomposition.__doc__ = _LDLdecomposition_sparse.__doc__ lower_triangular_solve.__doc__ = lower_triangular_solve.__doc__ upper_triangular_solve.__doc__ = upper_triangular_solve.__doc__ class MutableSparseMatrix(SparseMatrix, MatrixBase): def __new__(cls, *args, **kwargs): return cls._new(*args, **kwargs) @classmethod def _new(cls, *args, **kwargs): obj = super().__new__(cls) rows, cols, smat = cls._handle_creation_inputs(*args, **kwargs) obj.rows = rows obj.cols = cols obj._smat = smat return obj def __setitem__(self, key, value): """Assign value to position designated by key. Examples ======== >>> from sympy.matrices import SparseMatrix, ones >>> M = SparseMatrix(2, 2, {}) >>> M[1] = 1; M Matrix([ [0, 1], [0, 0]]) >>> M[1, 1] = 2; M Matrix([ [0, 1], [0, 2]]) >>> M = SparseMatrix(2, 2, {}) >>> M[:, 1] = [1, 1]; M Matrix([ [0, 1], [0, 1]]) >>> M = SparseMatrix(2, 2, {}) >>> M[1, :] = [[1, 1]]; M Matrix([ [0, 0], [1, 1]]) To replace row r you assign to position r*m where m is the number of columns: >>> M = SparseMatrix(4, 4, {}) >>> m = M.cols >>> M[3*m] = ones(1, m)*2; M Matrix([ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [2, 2, 2, 2]]) And to replace column c you can assign to position c: >>> M[2] = ones(m, 1)*4; M Matrix([ [0, 0, 4, 0], [0, 0, 4, 0], [0, 0, 4, 0], [2, 2, 4, 2]]) """ rv = self._setitem(key, value) if rv is not None: i, j, value = rv if value: self._smat[i, j] = value elif (i, j) in self._smat: del self._smat[i, j] def as_mutable(self): return self.copy() __hash__ = None # type: ignore def _eval_col_del(self, k): newD = {} for i, j in self._smat: if j == k: pass elif j > k: newD[i, j - 1] = self._smat[i, j] else: newD[i, j] = self._smat[i, j] self._smat = newD self.cols -= 1 def _eval_row_del(self, k): newD = {} for i, j in self._smat: if i == k: pass elif i > k: newD[i - 1, j] = self._smat[i, j] else: newD[i, j] = self._smat[i, j] self._smat = newD self.rows -= 1 def col_join(self, other): """Returns B augmented beneath A (row-wise joining):: [A] [B] Examples ======== >>> from sympy import SparseMatrix, Matrix, ones >>> A = SparseMatrix(ones(3)) >>> A Matrix([ [1, 1, 1], [1, 1, 1], [1, 1, 1]]) >>> B = SparseMatrix.eye(3) >>> B Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> C = A.col_join(B); C Matrix([ [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> C == A.col_join(Matrix(B)) True Joining along columns is the same as appending rows at the end of the matrix: >>> C == A.row_insert(A.rows, Matrix(B)) True """ # A null matrix can always be stacked (see #10770) if self.rows == 0 and self.cols != other.cols: return self._new(0, other.cols, []).col_join(other) A, B = self, other if not A.cols == B.cols: raise ShapeError() A = A.copy() if not isinstance(B, SparseMatrix): k = 0 b = B._mat for i in range(B.rows): for j in range(B.cols): v = b[k] if v: A._smat[i + A.rows, j] = v k += 1 else: for (i, j), v in B._smat.items(): A._smat[i + A.rows, j] = v A.rows += B.rows return A def col_op(self, j, f): """In-place operation on col j using two-arg functor whose args are interpreted as (self[i, j], i) for i in range(self.rows). Examples ======== >>> from sympy.matrices import SparseMatrix >>> M = SparseMatrix.eye(3)*2 >>> M[1, 0] = -1 >>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M Matrix([ [ 2, 4, 0], [-1, 0, 0], [ 0, 0, 2]]) """ for i in range(self.rows): v = self._smat.get((i, j), S.Zero) fv = f(v, i) if fv: self._smat[i, j] = fv elif v: self._smat.pop((i, j)) def col_swap(self, i, j): """Swap, in place, columns i and j. Examples ======== >>> from sympy.matrices import SparseMatrix >>> S = SparseMatrix.eye(3); S[2, 1] = 2 >>> S.col_swap(1, 0); S Matrix([ [0, 1, 0], [1, 0, 0], [2, 0, 1]]) """ if i > j: i, j = j, i rows = self.col_list() temp = [] for ii, jj, v in rows: if jj == i: self._smat.pop((ii, jj)) temp.append((ii, v)) elif jj == j: self._smat.pop((ii, jj)) self._smat[ii, i] = v elif jj > j: break for k, v in temp: self._smat[k, j] = v def copyin_list(self, key, value): if not is_sequence(value): raise TypeError("`value` must be of type list or tuple.") self.copyin_matrix(key, Matrix(value)) def copyin_matrix(self, key, value): # include this here because it's not part of BaseMatrix rlo, rhi, clo, chi = self.key2bounds(key) shape = value.shape dr, dc = rhi - rlo, chi - clo if shape != (dr, dc): raise ShapeError( "The Matrix `value` doesn't have the same dimensions " "as the in sub-Matrix given by `key`.") if not isinstance(value, SparseMatrix): for i in range(value.rows): for j in range(value.cols): self[i + rlo, j + clo] = value[i, j] else: if (rhi - rlo)*(chi - clo) < len(self): for i in range(rlo, rhi): for j in range(clo, chi): self._smat.pop((i, j), None) else: for i, j, v in self.row_list(): if rlo <= i < rhi and clo <= j < chi: self._smat.pop((i, j), None) for k, v in value._smat.items(): i, j = k self[i + rlo, j + clo] = value[i, j] def fill(self, value): """Fill self with the given value. Notes ===== Unless many values are going to be deleted (i.e. set to zero) this will create a matrix that is slower than a dense matrix in operations. Examples ======== >>> from sympy.matrices import SparseMatrix >>> M = SparseMatrix.zeros(3); M Matrix([ [0, 0, 0], [0, 0, 0], [0, 0, 0]]) >>> M.fill(1); M Matrix([ [1, 1, 1], [1, 1, 1], [1, 1, 1]]) """ if not value: self._smat = {} else: v = self._sympify(value) self._smat = {(i, j): v for i in range(self.rows) for j in range(self.cols)} def row_join(self, other): """Returns B appended after A (column-wise augmenting):: [A B] Examples ======== >>> from sympy import SparseMatrix, Matrix >>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0))) >>> A Matrix([ [1, 0, 1], [0, 1, 0], [1, 1, 0]]) >>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1))) >>> B Matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> C = A.row_join(B); C Matrix([ [1, 0, 1, 1, 0, 0], [0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 1]]) >>> C == A.row_join(Matrix(B)) True Joining at row ends is the same as appending columns at the end of the matrix: >>> C == A.col_insert(A.cols, B) True """ # A null matrix can always be stacked (see #10770) if self.cols == 0 and self.rows != other.rows: return self._new(other.rows, 0, []).row_join(other) A, B = self, other if not A.rows == B.rows: raise ShapeError() A = A.copy() if not isinstance(B, SparseMatrix): k = 0 b = B._mat for i in range(B.rows): for j in range(B.cols): v = b[k] if v: A._smat[i, j + A.cols] = v k += 1 else: for (i, j), v in B._smat.items(): A._smat[i, j + A.cols] = v A.cols += B.cols return A def row_op(self, i, f): """In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], j)``. Examples ======== >>> from sympy.matrices import SparseMatrix >>> M = SparseMatrix.eye(3)*2 >>> M[0, 1] = -1 >>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M Matrix([ [2, -1, 0], [4, 0, 0], [0, 0, 2]]) See Also ======== row zip_row_op col_op """ for j in range(self.cols): v = self._smat.get((i, j), S.Zero) fv = f(v, j) if fv: self._smat[i, j] = fv elif v: self._smat.pop((i, j)) def row_swap(self, i, j): """Swap, in place, columns i and j. Examples ======== >>> from sympy.matrices import SparseMatrix >>> S = SparseMatrix.eye(3); S[2, 1] = 2 >>> S.row_swap(1, 0); S Matrix([ [0, 1, 0], [1, 0, 0], [0, 2, 1]]) """ if i > j: i, j = j, i rows = self.row_list() temp = [] for ii, jj, v in rows: if ii == i: self._smat.pop((ii, jj)) temp.append((jj, v)) elif ii == j: self._smat.pop((ii, jj)) self._smat[i, jj] = v elif ii > j: break for k, v in temp: self._smat[j, k] = v def zip_row_op(self, i, k, f): """In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], self[k, j])``. Examples ======== >>> from sympy.matrices import SparseMatrix >>> M = SparseMatrix.eye(3)*2 >>> M[0, 1] = -1 >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M Matrix([ [2, -1, 0], [4, 0, 0], [0, 0, 2]]) See Also ======== row row_op col_op """ self.row_op(i, lambda v, j: f(v, self[k, j])) is_zero = False
61837da4de7c0485af9210306913d121cd0e77cf71c2f7233be3e1078dd9ada2
from types import FunctionType from collections import Counter from mpmath import mp, workprec from mpmath.libmp.libmpf import prec_to_dps from sympy.core.compatibility import default_sort_key from sympy.core.evalf import DEFAULT_MAXPREC, PrecisionExhausted from sympy.core.logic import fuzzy_and, fuzzy_or from sympy.core.numbers import Float from sympy.core.sympify import _sympify from sympy.functions.elementary.miscellaneous import sqrt from sympy.polys import roots, CRootOf, EX from sympy.polys.domainmatrix import ( DomainMatrix, dom_eigenvects, dom_eigenvects_to_sympy) from sympy.simplify import nsimplify, simplify as _simplify from sympy.utilities.exceptions import SymPyDeprecationWarning from .common import MatrixError, NonSquareMatrixError from .determinant import _find_reasonable_pivot from .utilities import _iszero def _eigenvals_triangular(M, multiple=False): """A fast decision for eigenvalues of an upper or a lower triangular matrix. """ diagonal_entries = [M[i, i] for i in range(M.rows)] if multiple: return diagonal_entries return dict(Counter(diagonal_entries)) def _eigenvals_eigenvects_mpmath(M): norm2 = lambda v: mp.sqrt(sum(i**2 for i in v)) v1 = None prec = max([x._prec for x in M.atoms(Float)]) eps = 2**-prec while prec < DEFAULT_MAXPREC: with workprec(prec): A = mp.matrix(M.evalf(n=prec_to_dps(prec))) E, ER = mp.eig(A) v2 = norm2([i for e in E for i in (mp.re(e), mp.im(e))]) if v1 is not None and mp.fabs(v1 - v2) < eps: return E, ER v1 = v2 prec *= 2 # we get here because the next step would have taken us # past MAXPREC or because we never took a step; in case # of the latter, we refuse to send back a solution since # it would not have been verified; we also resist taking # a small step to arrive exactly at MAXPREC since then # the two calculations might be artificially close. raise PrecisionExhausted def _eigenvals_mpmath(M, multiple=False): """Compute eigenvalues using mpmath""" E, _ = _eigenvals_eigenvects_mpmath(M) result = [_sympify(x) for x in E] if multiple: return result return dict(Counter(result)) def _eigenvects_mpmath(M): E, ER = _eigenvals_eigenvects_mpmath(M) result = [] for i in range(M.rows): eigenval = _sympify(E[i]) eigenvect = _sympify(ER[:, i]) result.append((eigenval, 1, [eigenvect])) return result # This functions is a candidate for caching if it gets implemented for matrices. def _eigenvals( M, error_when_incomplete=True, *, simplify=False, multiple=False, rational=False, **flags): r"""Compute eigenvalues of the matrix. Parameters ========== error_when_incomplete : bool, optional If it is set to ``True``, it will raise an error if not all eigenvalues are computed. This is caused by ``roots`` not returning a full list of eigenvalues. simplify : bool or function, optional If it is set to ``True``, it attempts to return the most simplified form of expressions returned by applying default simplification method in every routine. If it is set to ``False``, it will skip simplification in this particular routine to save computation resources. If a function is passed to, it will attempt to apply the particular function as simplification method. rational : bool, optional If it is set to ``True``, every floating point numbers would be replaced with rationals before computation. It can solve some issues of ``roots`` routine not working well with floats. multiple : bool, optional If it is set to ``True``, the result will be in the form of a list. If it is set to ``False``, the result will be in the form of a dictionary. Returns ======= eigs : list or dict Eigenvalues of a matrix. The return format would be specified by the key ``multiple``. Raises ====== MatrixError If not enough roots had got computed. NonSquareMatrixError If attempted to compute eigenvalues from a non-square matrix. Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1]) >>> M.eigenvals() {-1: 1, 0: 1, 2: 1} See Also ======== MatrixDeterminant.charpoly eigenvects Notes ===== Eigenvalues of a matrix $A$ can be computed by solving a matrix equation $\det(A - \lambda I) = 0$ It's not always possible to return radical solutions for eigenvalues for matrices larger than $4, 4$ shape due to Abel-Ruffini theorem. If there is no radical solution is found for the eigenvalue, it may return eigenvalues in the form of :class:`sympy.polys.rootoftools.ComplexRootOf`. """ if not M: if multiple: return [] return {} if not M.is_square: raise NonSquareMatrixError("{} must be a square matrix.".format(M)) if M.is_upper or M.is_lower: return _eigenvals_triangular(M, multiple=multiple) if all(x.is_number for x in M) and M.has(Float): return _eigenvals_mpmath(M, multiple=multiple) if rational: M = M.applyfunc( lambda x: nsimplify(x, rational=True) if x.has(Float) else x) if multiple: return _eigenvals_list( M, error_when_incomplete=error_when_incomplete, simplify=simplify, **flags) return _eigenvals_dict( M, error_when_incomplete=error_when_incomplete, simplify=simplify, **flags) def _eigenvals_list( M, error_when_incomplete=True, simplify=False, **flags): iblocks = M.connected_components() all_eigs = [] for b in iblocks: block = M[b, b] if isinstance(simplify, FunctionType): charpoly = block.charpoly(simplify=simplify) else: charpoly = block.charpoly() eigs = roots(charpoly, multiple=True, **flags) if len(eigs) != block.rows: degree = int(charpoly.degree()) f = charpoly.as_expr() x = charpoly.gen try: eigs = [CRootOf(f, x, idx) for idx in range(degree)] except NotImplementedError: if error_when_incomplete: raise MatrixError else: eigs = [] all_eigs += eigs if not simplify: return all_eigs if not isinstance(simplify, FunctionType): simplify = _simplify return [simplify(value) for value in all_eigs] def _eigenvals_dict( M, error_when_incomplete=True, simplify=False, **flags): iblocks = M.connected_components() all_eigs = {} for b in iblocks: block = M[b, b] if isinstance(simplify, FunctionType): charpoly = block.charpoly(simplify=simplify) else: charpoly = block.charpoly() eigs = roots(charpoly, multiple=False, **flags) if sum(eigs.values()) != block.rows: degree = int(charpoly.degree()) f = charpoly.as_expr() x = charpoly.gen try: eigs = {CRootOf(f, x, idx): 1 for idx in range(degree)} except NotImplementedError: if error_when_incomplete: raise MatrixError else: eigs = {} for k, v in eigs.items(): if k in all_eigs: all_eigs[k] += v else: all_eigs[k] = v if not simplify: return all_eigs if not isinstance(simplify, FunctionType): simplify = _simplify return {simplify(key): value for key, value in all_eigs.items()} def _eigenspace(M, eigenval, iszerofunc=_iszero, simplify=False): """Get a basis for the eigenspace for a particular eigenvalue""" m = M - M.eye(M.rows) * eigenval ret = m.nullspace(iszerofunc=iszerofunc) # The nullspace for a real eigenvalue should be non-trivial. # If we didn't find an eigenvector, try once more a little harder if len(ret) == 0 and simplify: ret = m.nullspace(iszerofunc=iszerofunc, simplify=True) if len(ret) == 0: raise NotImplementedError( "Can't evaluate eigenvector for eigenvalue {}".format(eigenval)) return ret def _eigenvects_DOM(M, **kwargs): DOM = DomainMatrix.from_Matrix(M, field=True, extension=True) if DOM.domain != EX: rational, algebraic = dom_eigenvects(DOM) eigenvects = dom_eigenvects_to_sympy( rational, algebraic, M.__class__, **kwargs) eigenvects = sorted(eigenvects, key=lambda x: default_sort_key(x[0])) return eigenvects return None def _eigenvects_sympy(M, iszerofunc, simplify=True, **flags): eigenvals = M.eigenvals(rational=False, **flags) # Make sure that we have all roots in radical form for x in eigenvals: if x.has(CRootOf): raise MatrixError( "Eigenvector computation is not implemented if the matrix have " "eigenvalues in CRootOf form") eigenvals = sorted(eigenvals.items(), key=default_sort_key) ret = [] for val, mult in eigenvals: vects = _eigenspace(M, val, iszerofunc=iszerofunc, simplify=simplify) ret.append((val, mult, vects)) return ret # This functions is a candidate for caching if it gets implemented for matrices. def _eigenvects(M, error_when_incomplete=True, iszerofunc=_iszero, *, chop=False, **flags): """Compute eigenvectors of the matrix. Parameters ========== error_when_incomplete : bool, optional Raise an error when not all eigenvalues are computed. This is caused by ``roots`` not returning a full list of eigenvalues. iszerofunc : function, optional Specifies a zero testing function to be used in ``rref``. Default value is ``_iszero``, which uses SymPy's naive and fast default assumption handler. It can also accept any user-specified zero testing function, if it is formatted as a function which accepts a single symbolic argument and returns ``True`` if it is tested as zero and ``False`` if it is tested as non-zero, and ``None`` if it is undecidable. simplify : bool or function, optional If ``True``, ``as_content_primitive()`` will be used to tidy up normalization artifacts. It will also be used by the ``nullspace`` routine. chop : bool or positive number, optional If the matrix contains any Floats, they will be changed to Rationals for computation purposes, but the answers will be returned after being evaluated with evalf. The ``chop`` flag is passed to ``evalf``. When ``chop=True`` a default precision will be used; a number will be interpreted as the desired level of precision. Returns ======= ret : [(eigenval, multiplicity, eigenspace), ...] A ragged list containing tuples of data obtained by ``eigenvals`` and ``nullspace``. ``eigenspace`` is a list containing the ``eigenvector`` for each eigenvalue. ``eigenvector`` is a vector in the form of a ``Matrix``. e.g. a vector of length 3 is returned as ``Matrix([a_1, a_2, a_3])``. Raises ====== NotImplementedError If failed to compute nullspace. Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1]) >>> M.eigenvects() [(-1, 1, [Matrix([ [-1], [ 1], [ 0]])]), (0, 1, [Matrix([ [ 0], [-1], [ 1]])]), (2, 1, [Matrix([ [2/3], [1/3], [ 1]])])] See Also ======== eigenvals MatrixSubspaces.nullspace """ simplify = flags.get('simplify', True) primitive = flags.get('simplify', False) flags.pop('simplify', None) # remove this if it's there flags.pop('multiple', None) # remove this if it's there if not isinstance(simplify, FunctionType): simpfunc = _simplify if simplify else lambda x: x has_floats = M.has(Float) if has_floats: if all(x.is_number for x in M): return _eigenvects_mpmath(M) M = M.applyfunc(lambda x: nsimplify(x, rational=True)) ret = _eigenvects_DOM(M) if ret is None: ret = _eigenvects_sympy(M, iszerofunc, simplify=simplify, **flags) if primitive: # if the primitive flag is set, get rid of any common # integer denominators def denom_clean(l): from sympy import gcd return [(v / gcd(list(v))).applyfunc(simpfunc) for v in l] ret = [(val, mult, denom_clean(es)) for val, mult, es in ret] if has_floats: # if we had floats to start with, turn the eigenvectors to floats ret = [(val.evalf(chop=chop), mult, [v.evalf(chop=chop) for v in es]) for val, mult, es in ret] return ret def _is_diagonalizable_with_eigen(M, reals_only=False): """See _is_diagonalizable. This function returns the bool along with the eigenvectors to avoid calculating them again in functions like ``diagonalize``.""" if not M.is_square: return False, [] eigenvecs = M.eigenvects(simplify=True) for val, mult, basis in eigenvecs: if reals_only and not val.is_real: # if we have a complex eigenvalue return False, eigenvecs if mult != len(basis): # if the geometric multiplicity doesn't equal the algebraic return False, eigenvecs return True, eigenvecs def _is_diagonalizable(M, reals_only=False, **kwargs): """Returns ``True`` if a matrix is diagonalizable. Parameters ========== reals_only : bool, optional If ``True``, it tests whether the matrix can be diagonalized to contain only real numbers on the diagonal. If ``False``, it tests whether the matrix can be diagonalized at all, even with numbers that may not be real. Examples ======== Example of a diagonalizable matrix: >>> from sympy import Matrix >>> M = Matrix([[1, 2, 0], [0, 3, 0], [2, -4, 2]]) >>> M.is_diagonalizable() True Example of a non-diagonalizable matrix: >>> M = Matrix([[0, 1], [0, 0]]) >>> M.is_diagonalizable() False Example of a matrix that is diagonalized in terms of non-real entries: >>> M = Matrix([[0, 1], [-1, 0]]) >>> M.is_diagonalizable(reals_only=False) True >>> M.is_diagonalizable(reals_only=True) False See Also ======== is_diagonal diagonalize """ if 'clear_cache' in kwargs: SymPyDeprecationWarning( feature='clear_cache', deprecated_since_version=1.4, issue=15887 ).warn() if 'clear_subproducts' in kwargs: SymPyDeprecationWarning( feature='clear_subproducts', deprecated_since_version=1.4, issue=15887 ).warn() if not M.is_square: return False if all(e.is_real for e in M) and M.is_symmetric(): return True if all(e.is_complex for e in M) and M.is_hermitian: return True return _is_diagonalizable_with_eigen(M, reals_only=reals_only)[0] #G&VL, Matrix Computations, Algo 5.4.2 def _householder_vector(x): if not x.cols == 1: raise ValueError("Input must be a column matrix") v = x.copy() v_plus = x.copy() v_minus = x.copy() q = x[0, 0] / abs(x[0, 0]) norm_x = x.norm() v_plus[0, 0] = x[0, 0] + q * norm_x v_minus[0, 0] = x[0, 0] - q * norm_x if x[1:, 0].norm() == 0: bet = 0 v[0, 0] = 1 else: if v_plus.norm() <= v_minus.norm(): v = v_plus else: v = v_minus v = v / v[0] bet = 2 / (v.norm() ** 2) return v, bet def _bidiagonal_decmp_hholder(M): m = M.rows n = M.cols A = M.as_mutable() U, V = A.eye(m), A.eye(n) for i in range(min(m, n)): v, bet = _householder_vector(A[i:, i]) hh_mat = A.eye(m - i) - bet * v * v.H A[i:, i:] = hh_mat * A[i:, i:] temp = A.eye(m) temp[i:, i:] = hh_mat U = U * temp if i + 1 <= n - 2: v, bet = _householder_vector(A[i, i+1:].T) hh_mat = A.eye(n - i - 1) - bet * v * v.H A[i:, i+1:] = A[i:, i+1:] * hh_mat temp = A.eye(n) temp[i+1:, i+1:] = hh_mat V = temp * V return U, A, V def _eval_bidiag_hholder(M): m = M.rows n = M.cols A = M.as_mutable() for i in range(min(m, n)): v, bet = _householder_vector(A[i:, i]) hh_mat = A.eye(m-i) - bet * v * v.H A[i:, i:] = hh_mat * A[i:, i:] if i + 1 <= n - 2: v, bet = _householder_vector(A[i, i+1:].T) hh_mat = A.eye(n - i - 1) - bet * v * v.H A[i:, i+1:] = A[i:, i+1:] * hh_mat return A def _bidiagonal_decomposition(M, upper=True): """ Returns (U,B,V.H) $A = UBV^{H}$ where A is the input matrix, and B is its Bidiagonalized form Note: Bidiagonal Computation can hang for symbolic matrices. Parameters ========== upper : bool. Whether to do upper bidiagnalization or lower. True for upper and False for lower. References ========== 1. Algorith 5.4.2, Matrix computations by Golub and Van Loan, 4th edition 2. Complex Matrix Bidiagonalization : https://github.com/vslobody/Householder-Bidiagonalization """ if type(upper) is not bool: raise ValueError("upper must be a boolean") if not upper: X = _bidiagonal_decmp_hholder(M.H) return X[2].H, X[1].H, X[0].H return _bidiagonal_decmp_hholder(M) def _bidiagonalize(M, upper=True): """ Returns $B$, the Bidiagonalized form of the input matrix. Note: Bidiagonal Computation can hang for symbolic matrices. Parameters ========== upper : bool. Whether to do upper bidiagnalization or lower. True for upper and False for lower. References ========== 1. Algorith 5.4.2, Matrix computations by Golub and Van Loan, 4th edition 2. Complex Matrix Bidiagonalization : https://github.com/vslobody/Householder-Bidiagonalization """ if type(upper) is not bool: raise ValueError("upper must be a boolean") if not upper: return _eval_bidiag_hholder(M.H).H return _eval_bidiag_hholder(M) def _diagonalize(M, reals_only=False, sort=False, normalize=False): """ Return (P, D), where D is diagonal and D = P^-1 * M * P where M is current matrix. Parameters ========== reals_only : bool. Whether to throw an error if complex numbers are need to diagonalize. (Default: False) sort : bool. Sort the eigenvalues along the diagonal. (Default: False) normalize : bool. If True, normalize the columns of P. (Default: False) Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2]) >>> M Matrix([ [1, 2, 0], [0, 3, 0], [2, -4, 2]]) >>> (P, D) = M.diagonalize() >>> D Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> P Matrix([ [-1, 0, -1], [ 0, 0, -1], [ 2, 1, 2]]) >>> P.inv() * M * P Matrix([ [1, 0, 0], [0, 2, 0], [0, 0, 3]]) See Also ======== is_diagonal is_diagonalizable """ if not M.is_square: raise NonSquareMatrixError() is_diagonalizable, eigenvecs = _is_diagonalizable_with_eigen(M, reals_only=reals_only) if not is_diagonalizable: raise MatrixError("Matrix is not diagonalizable") if sort: eigenvecs = sorted(eigenvecs, key=default_sort_key) p_cols, diag = [], [] for val, mult, basis in eigenvecs: diag += [val] * mult p_cols += basis if normalize: p_cols = [v / v.norm() for v in p_cols] return M.hstack(*p_cols), M.diag(*diag) def _fuzzy_positive_definite(M): positive_diagonals = M._has_positive_diagonals() if positive_diagonals is False: return False if positive_diagonals and M.is_strongly_diagonally_dominant: return True return None def _fuzzy_positive_semidefinite(M): nonnegative_diagonals = M._has_nonnegative_diagonals() if nonnegative_diagonals is False: return False if nonnegative_diagonals and M.is_weakly_diagonally_dominant: return True return None def _is_positive_definite(M): if not M.is_hermitian: if not M.is_square: return False M = M + M.H fuzzy = _fuzzy_positive_definite(M) if fuzzy is not None: return fuzzy return _is_positive_definite_GE(M) def _is_positive_semidefinite(M): if not M.is_hermitian: if not M.is_square: return False M = M + M.H fuzzy = _fuzzy_positive_semidefinite(M) if fuzzy is not None: return fuzzy return _is_positive_semidefinite_cholesky(M) def _is_negative_definite(M): return _is_positive_definite(-M) def _is_negative_semidefinite(M): return _is_positive_semidefinite(-M) def _is_indefinite(M): if M.is_hermitian: eigen = M.eigenvals() args1 = [x.is_positive for x in eigen.keys()] any_positive = fuzzy_or(args1) args2 = [x.is_negative for x in eigen.keys()] any_negative = fuzzy_or(args2) return fuzzy_and([any_positive, any_negative]) elif M.is_square: return (M + M.H).is_indefinite return False def _is_positive_definite_GE(M): """A division-free gaussian elimination method for testing positive-definiteness.""" M = M.as_mutable() size = M.rows for i in range(size): is_positive = M[i, i].is_positive if is_positive is not True: return is_positive for j in range(i+1, size): M[j, i+1:] = M[i, i] * M[j, i+1:] - M[j, i] * M[i, i+1:] return True def _is_positive_semidefinite_cholesky(M): """Uses Cholesky factorization with complete pivoting References ========== .. [1] http://eprints.ma.man.ac.uk/1199/1/covered/MIMS_ep2008_116.pdf .. [2] https://www.value-at-risk.net/cholesky-factorization/ """ M = M.as_mutable() for k in range(M.rows): diags = [M[i, i] for i in range(k, M.rows)] pivot, pivot_val, nonzero, _ = _find_reasonable_pivot(diags) if nonzero: return None if pivot is None: for i in range(k+1, M.rows): for j in range(k, M.cols): iszero = M[i, j].is_zero if iszero is None: return None elif iszero is False: return False return True if M[k, k].is_negative or pivot_val.is_negative: return False if pivot > 0: M.col_swap(k, k+pivot) M.row_swap(k, k+pivot) M[k, k] = sqrt(M[k, k]) M[k, k+1:] /= M[k, k] M[k+1:, k+1:] -= M[k, k+1:].H * M[k, k+1:] return M[-1, -1].is_nonnegative _doc_positive_definite = \ r"""Finds out the definiteness of a matrix. Explanation =========== A square real matrix $A$ is: - A positive definite matrix if $x^T A x > 0$ for all non-zero real vectors $x$. - A positive semidefinite matrix if $x^T A x \geq 0$ for all non-zero real vectors $x$. - A negative definite matrix if $x^T A x < 0$ for all non-zero real vectors $x$. - A negative semidefinite matrix if $x^T A x \leq 0$ for all non-zero real vectors $x$. - An indefinite matrix if there exists non-zero real vectors $x, y$ with $x^T A x > 0 > y^T A y$. A square complex matrix $A$ is: - A positive definite matrix if $\text{re}(x^H A x) > 0$ for all non-zero complex vectors $x$. - A positive semidefinite matrix if $\text{re}(x^H A x) \geq 0$ for all non-zero complex vectors $x$. - A negative definite matrix if $\text{re}(x^H A x) < 0$ for all non-zero complex vectors $x$. - A negative semidefinite matrix if $\text{re}(x^H A x) \leq 0$ for all non-zero complex vectors $x$. - An indefinite matrix if there exists non-zero complex vectors $x, y$ with $\text{re}(x^H A x) > 0 > \text{re}(y^H A y)$. A matrix need not be symmetric or hermitian to be positive definite. - A real non-symmetric matrix is positive definite if and only if $\frac{A + A^T}{2}$ is positive definite. - A complex non-hermitian matrix is positive definite if and only if $\frac{A + A^H}{2}$ is positive definite. And this extension can apply for all the definitions above. However, for complex cases, you can restrict the definition of $\text{re}(x^H A x) > 0$ to $x^H A x > 0$ and require the matrix to be hermitian. But we do not present this restriction for computation because you can check ``M.is_hermitian`` independently with this and use the same procedure. Examples ======== An example of symmetric positive definite matrix: .. plot:: :context: reset :format: doctest :include-source: True >>> from sympy import Matrix, symbols >>> from sympy.plotting import plot3d >>> a, b = symbols('a b') >>> x = Matrix([a, b]) >>> A = Matrix([[1, 0], [0, 1]]) >>> A.is_positive_definite True >>> A.is_positive_semidefinite True >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1)) An example of symmetric positive semidefinite matrix: .. plot:: :context: close-figs :format: doctest :include-source: True >>> A = Matrix([[1, -1], [-1, 1]]) >>> A.is_positive_definite False >>> A.is_positive_semidefinite True >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1)) An example of symmetric negative definite matrix: .. plot:: :context: close-figs :format: doctest :include-source: True >>> A = Matrix([[-1, 0], [0, -1]]) >>> A.is_negative_definite True >>> A.is_negative_semidefinite True >>> A.is_indefinite False >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1)) An example of symmetric indefinite matrix: .. plot:: :context: close-figs :format: doctest :include-source: True >>> A = Matrix([[1, 2], [2, -1]]) >>> A.is_indefinite True >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1)) An example of non-symmetric positive definite matrix. .. plot:: :context: close-figs :format: doctest :include-source: True >>> A = Matrix([[1, 2], [-2, 1]]) >>> A.is_positive_definite True >>> A.is_positive_semidefinite True >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1)) Notes ===== Although some people trivialize the definition of positive definite matrices only for symmetric or hermitian matrices, this restriction is not correct because it does not classify all instances of positive definite matrices from the definition $x^T A x > 0$ or $\text{re}(x^H A x) > 0$. For instance, ``Matrix([[1, 2], [-2, 1]])`` presented in the example above is an example of real positive definite matrix that is not symmetric. However, since the following formula holds true; .. math:: \text{re}(x^H A x) > 0 \iff \text{re}(x^H \frac{A + A^H}{2} x) > 0 We can classify all positive definite matrices that may or may not be symmetric or hermitian by transforming the matrix to $\frac{A + A^T}{2}$ or $\frac{A + A^H}{2}$ (which is guaranteed to be always real symmetric or complex hermitian) and we can defer most of the studies to symmetric or hermitian positive definite matrices. But it is a different problem for the existance of Cholesky decomposition. Because even though a non symmetric or a non hermitian matrix can be positive definite, Cholesky or LDL decomposition does not exist because the decompositions require the matrix to be symmetric or hermitian. References ========== .. [1] https://en.wikipedia.org/wiki/Definiteness_of_a_matrix#Eigenvalues .. [2] http://mathworld.wolfram.com/PositiveDefiniteMatrix.html .. [3] Johnson, C. R. "Positive Definite Matrices." Amer. Math. Monthly 77, 259-264 1970. """ _is_positive_definite.__doc__ = _doc_positive_definite _is_positive_semidefinite.__doc__ = _doc_positive_definite _is_negative_definite.__doc__ = _doc_positive_definite _is_negative_semidefinite.__doc__ = _doc_positive_definite _is_indefinite.__doc__ = _doc_positive_definite def _jordan_form(M, calc_transform=True, *, chop=False): """Return $(P, J)$ where $J$ is a Jordan block matrix and $P$ is a matrix such that $M = P J P^{-1}$ Parameters ========== calc_transform : bool If ``False``, then only $J$ is returned. chop : bool All matrices are converted to exact types when computing eigenvalues and eigenvectors. As a result, there may be approximation errors. If ``chop==True``, these errors will be truncated. Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix([[ 6, 5, -2, -3], [-3, -1, 3, 3], [ 2, 1, -2, -3], [-1, 1, 5, 5]]) >>> P, J = M.jordan_form() >>> J Matrix([ [2, 1, 0, 0], [0, 2, 0, 0], [0, 0, 2, 1], [0, 0, 0, 2]]) See Also ======== jordan_block """ if not M.is_square: raise NonSquareMatrixError("Only square matrices have Jordan forms") mat = M has_floats = M.has(Float) if has_floats: try: max_prec = max(term._prec for term in M._mat if isinstance(term, Float)) except ValueError: # if no term in the matrix is explicitly a Float calling max() # will throw a error so setting max_prec to default value of 53 max_prec = 53 # setting minimum max_dps to 15 to prevent loss of precision in # matrix containing non evaluated expressions max_dps = max(prec_to_dps(max_prec), 15) def restore_floats(*args): """If ``has_floats`` is `True`, cast all ``args`` as matrices of floats.""" if has_floats: args = [m.evalf(n=max_dps, chop=chop) for m in args] if len(args) == 1: return args[0] return args # cache calculations for some speedup mat_cache = {} def eig_mat(val, pow): """Cache computations of ``(M - val*I)**pow`` for quick retrieval""" if (val, pow) in mat_cache: return mat_cache[(val, pow)] if (val, pow - 1) in mat_cache: mat_cache[(val, pow)] = mat_cache[(val, pow - 1)].multiply( mat_cache[(val, 1)], dotprodsimp=None) else: mat_cache[(val, pow)] = (mat - val*M.eye(M.rows)).pow(pow) return mat_cache[(val, pow)] # helper functions def nullity_chain(val, algebraic_multiplicity): """Calculate the sequence [0, nullity(E), nullity(E**2), ...] until it is constant where ``E = M - val*I``""" # mat.rank() is faster than computing the null space, # so use the rank-nullity theorem cols = M.cols ret = [0] nullity = cols - eig_mat(val, 1).rank() i = 2 while nullity != ret[-1]: ret.append(nullity) if nullity == algebraic_multiplicity: break nullity = cols - eig_mat(val, i).rank() i += 1 # Due to issues like #7146 and #15872, SymPy sometimes # gives the wrong rank. In this case, raise an error # instead of returning an incorrect matrix if nullity < ret[-1] or nullity > algebraic_multiplicity: raise MatrixError( "SymPy had encountered an inconsistent " "result while computing Jordan block: " "{}".format(M)) return ret def blocks_from_nullity_chain(d): """Return a list of the size of each Jordan block. If d_n is the nullity of E**n, then the number of Jordan blocks of size n is 2*d_n - d_(n-1) - d_(n+1)""" # d[0] is always the number of columns, so skip past it mid = [2*d[n] - d[n - 1] - d[n + 1] for n in range(1, len(d) - 1)] # d is assumed to plateau with "d[ len(d) ] == d[-1]", so # 2*d_n - d_(n-1) - d_(n+1) == d_n - d_(n-1) end = [d[-1] - d[-2]] if len(d) > 1 else [d[0]] return mid + end def pick_vec(small_basis, big_basis): """Picks a vector from big_basis that isn't in the subspace spanned by small_basis""" if len(small_basis) == 0: return big_basis[0] for v in big_basis: _, pivots = M.hstack(*(small_basis + [v])).echelon_form( with_pivots=True) if pivots[-1] == len(small_basis): return v # roots doesn't like Floats, so replace them with Rationals if has_floats: mat = mat.applyfunc(lambda x: nsimplify(x, rational=True)) # first calculate the jordan block structure eigs = mat.eigenvals() # Make sure that we have all roots in radical form for x in eigs: if x.has(CRootOf): raise MatrixError( "Jordan normal form is not implemented if the matrix have " "eigenvalues in CRootOf form") # most matrices have distinct eigenvalues # and so are diagonalizable. In this case, don't # do extra work! if len(eigs.keys()) == mat.cols: blocks = list(sorted(eigs.keys(), key=default_sort_key)) jordan_mat = mat.diag(*blocks) if not calc_transform: return restore_floats(jordan_mat) jordan_basis = [eig_mat(eig, 1).nullspace()[0] for eig in blocks] basis_mat = mat.hstack(*jordan_basis) return restore_floats(basis_mat, jordan_mat) block_structure = [] for eig in sorted(eigs.keys(), key=default_sort_key): algebraic_multiplicity = eigs[eig] chain = nullity_chain(eig, algebraic_multiplicity) block_sizes = blocks_from_nullity_chain(chain) # if block_sizes = = [a, b, c, ...], then the number of # Jordan blocks of size 1 is a, of size 2 is b, etc. # create an array that has (eig, block_size) with one # entry for each block size_nums = [(i+1, num) for i, num in enumerate(block_sizes)] # we expect larger Jordan blocks to come earlier size_nums.reverse() block_structure.extend( (eig, size) for size, num in size_nums for _ in range(num)) jordan_form_size = sum(size for eig, size in block_structure) if jordan_form_size != M.rows: raise MatrixError( "SymPy had encountered an inconsistent result while " "computing Jordan block. : {}".format(M)) blocks = (mat.jordan_block(size=size, eigenvalue=eig) for eig, size in block_structure) jordan_mat = mat.diag(*blocks) if not calc_transform: return restore_floats(jordan_mat) # For each generalized eigenspace, calculate a basis. # We start by looking for a vector in null( (A - eig*I)**n ) # which isn't in null( (A - eig*I)**(n-1) ) where n is # the size of the Jordan block # # Ideally we'd just loop through block_structure and # compute each generalized eigenspace. However, this # causes a lot of unneeded computation. Instead, we # go through the eigenvalues separately, since we know # their generalized eigenspaces must have bases that # are linearly independent. jordan_basis = [] for eig in sorted(eigs.keys(), key=default_sort_key): eig_basis = [] for block_eig, size in block_structure: if block_eig != eig: continue null_big = (eig_mat(eig, size)).nullspace() null_small = (eig_mat(eig, size - 1)).nullspace() # we want to pick something that is in the big basis # and not the small, but also something that is independent # of any other generalized eigenvectors from a different # generalized eigenspace sharing the same eigenvalue. vec = pick_vec(null_small + eig_basis, null_big) new_vecs = [eig_mat(eig, i).multiply(vec, dotprodsimp=None) for i in range(size)] eig_basis.extend(new_vecs) jordan_basis.extend(reversed(new_vecs)) basis_mat = mat.hstack(*jordan_basis) return restore_floats(basis_mat, jordan_mat) def _left_eigenvects(M, **flags): """Returns left eigenvectors and eigenvalues. This function returns the list of triples (eigenval, multiplicity, basis) for the left eigenvectors. Options are the same as for eigenvects(), i.e. the ``**flags`` arguments gets passed directly to eigenvects(). Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]]) >>> M.eigenvects() [(-1, 1, [Matrix([ [-1], [ 1], [ 0]])]), (0, 1, [Matrix([ [ 0], [-1], [ 1]])]), (2, 1, [Matrix([ [2/3], [1/3], [ 1]])])] >>> M.left_eigenvects() [(-1, 1, [Matrix([[-2, 1, 1]])]), (0, 1, [Matrix([[-1, -1, 1]])]), (2, 1, [Matrix([[1, 1, 1]])])] """ eigs = M.transpose().eigenvects(**flags) return [(val, mult, [l.transpose() for l in basis]) for val, mult, basis in eigs] def _singular_values(M): """Compute the singular values of a Matrix Examples ======== >>> from sympy import Matrix, Symbol >>> x = Symbol('x', real=True) >>> M = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]]) >>> M.singular_values() [sqrt(x**2 + 1), 1, 0] See Also ======== condition_number """ if M.rows >= M.cols: valmultpairs = M.H.multiply(M).eigenvals() else: valmultpairs = M.multiply(M.H).eigenvals() # Expands result from eigenvals into a simple list vals = [] for k, v in valmultpairs.items(): vals += [sqrt(k)] * v # dangerous! same k in several spots! # Pad with zeros if singular values are computed in reverse way, # to give consistent format. if len(vals) < M.cols: vals += [M.zero] * (M.cols - len(vals)) # sort them in descending order vals.sort(reverse=True, key=default_sort_key) return vals
b800a1c9ee9b41c91c9c4e4ee03cc8840cf8ffb9c7ce7d27d68909dc9e9fd2bc
from .utilities import _iszero def _columnspace(M, simplify=False): """Returns a list of vectors (Matrix objects) that span columnspace of ``M`` Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6]) >>> M Matrix([ [ 1, 3, 0], [-2, -6, 0], [ 3, 9, 6]]) >>> M.columnspace() [Matrix([ [ 1], [-2], [ 3]]), Matrix([ [0], [0], [6]])] See Also ======== nullspace rowspace """ reduced, pivots = M.echelon_form(simplify=simplify, with_pivots=True) return [M.col(i) for i in pivots] def _nullspace(M, simplify=False, iszerofunc=_iszero): """Returns list of vectors (Matrix objects) that span nullspace of ``M`` Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6]) >>> M Matrix([ [ 1, 3, 0], [-2, -6, 0], [ 3, 9, 6]]) >>> M.nullspace() [Matrix([ [-3], [ 1], [ 0]])] See Also ======== columnspace rowspace """ reduced, pivots = M.rref(iszerofunc=iszerofunc, simplify=simplify) free_vars = [i for i in range(M.cols) if i not in pivots] basis = [] for free_var in free_vars: # for each free variable, we will set it to 1 and all others # to 0. Then, we will use back substitution to solve the system vec = [M.zero] * M.cols vec[free_var] = M.one for piv_row, piv_col in enumerate(pivots): vec[piv_col] -= reduced[piv_row, free_var] basis.append(vec) return [M._new(M.cols, 1, b) for b in basis] def _rowspace(M, simplify=False): """Returns a list of vectors that span the row space of ``M``. Examples ======== >>> from sympy import Matrix >>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6]) >>> M Matrix([ [ 1, 3, 0], [-2, -6, 0], [ 3, 9, 6]]) >>> M.rowspace() [Matrix([[1, 3, 0]]), Matrix([[0, 0, 6]])] """ reduced, pivots = M.echelon_form(simplify=simplify, with_pivots=True) return [reduced.row(i) for i in range(len(pivots))] def _orthogonalize(cls, *vecs, normalize=False, rankcheck=False): """Apply the Gram-Schmidt orthogonalization procedure to vectors supplied in ``vecs``. Parameters ========== vecs vectors to be made orthogonal normalize : bool If ``True``, return an orthonormal basis. rankcheck : bool If ``True``, the computation does not stop when encountering linearly dependent vectors. If ``False``, it will raise ``ValueError`` when any zero or linearly dependent vectors are found. Returns ======= list List of orthogonal (or orthonormal) basis vectors. Examples ======== >>> from sympy import I, Matrix >>> v = [Matrix([1, I]), Matrix([1, -I])] >>> Matrix.orthogonalize(*v) [Matrix([ [1], [I]]), Matrix([ [ 1], [-I]])] See Also ======== MatrixBase.QRdecomposition References ========== .. [1] https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process """ from .decompositions import _QRdecomposition_optional if not vecs: return [] all_row_vecs = (vecs[0].rows == 1) vecs = [x.vec() for x in vecs] M = cls.hstack(*vecs) Q, R = _QRdecomposition_optional(M, normalize=normalize) if rankcheck and Q.cols < len(vecs): raise ValueError("GramSchmidt: vector set not linearly independent") ret = [] for i in range(Q.cols): if all_row_vecs: col = cls(Q[:, i].T) else: col = cls(Q[:, i]) ret.append(col) return ret